| /* |
| * CDDL HEADER START |
| * |
| * The contents of this file are subject to the terms of the |
| * Common Development and Distribution License (the "License"). |
| * You may not use this file except in compliance with the License. |
| * |
| * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| * or http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| * |
| * When distributing Covered Code, include this CDDL HEADER in each |
| * file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| * If applicable, add the following below this CDDL HEADER, with the |
| * fields enclosed by brackets "[]" replaced with your own identifying |
| * information: Portions Copyright [yyyy] [name of copyright owner] |
| * |
| * CDDL HEADER END |
| */ |
| /* |
| * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. |
| * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. |
| */ |
| |
| /* |
| * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. |
| */ |
| #include <sys/nxge/nxge_impl.h> |
| #include <sys/nxge/nxge_hio.h> |
| #include <sys/nxge/nxge_rxdma.h> |
| #include <sys/pcie.h> |
| |
| uint32_t nxge_use_partition = 0; /* debug partition flag */ |
| uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ |
| uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ |
| /* |
| * PSARC/2007/453 MSI-X interrupt limit override |
| */ |
| uint32_t nxge_msi_enable = 2; |
| |
| /* |
| * Software workaround for a Neptune (PCI-E) |
| * hardware interrupt bug which the hardware |
| * may generate spurious interrupts after the |
| * device interrupt handler was removed. If this flag |
| * is enabled, the driver will reset the |
| * hardware when devices are being detached. |
| */ |
| uint32_t nxge_peu_reset_enable = 0; |
| |
| /* |
| * Software workaround for the hardware |
| * checksum bugs that affect packet transmission |
| * and receive: |
| * |
| * Usage of nxge_cksum_offload: |
| * |
| * (1) nxge_cksum_offload = 0 (default): |
| * - transmits packets: |
| * TCP: uses the hardware checksum feature. |
| * UDP: driver will compute the software checksum |
| * based on the partial checksum computed |
| * by the IP layer. |
| * - receives packets |
| * TCP: marks packets checksum flags based on hardware result. |
| * UDP: will not mark checksum flags. |
| * |
| * (2) nxge_cksum_offload = 1: |
| * - transmit packets: |
| * TCP/UDP: uses the hardware checksum feature. |
| * - receives packets |
| * TCP/UDP: marks packet checksum flags based on hardware result. |
| * |
| * (3) nxge_cksum_offload = 2: |
| * - The driver will not register its checksum capability. |
| * Checksum for both TCP and UDP will be computed |
| * by the stack. |
| * - The software LSO is not allowed in this case. |
| * |
| * (4) nxge_cksum_offload > 2: |
| * - Will be treated as it is set to 2 |
| * (stack will compute the checksum). |
| * |
| * (5) If the hardware bug is fixed, this workaround |
| * needs to be updated accordingly to reflect |
| * the new hardware revision. |
| */ |
| uint32_t nxge_cksum_offload = 0; |
| |
| /* |
| * Globals: tunable parameters (/etc/system or adb) |
| * |
| */ |
| uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; |
| uint32_t nxge_rbr_spare_size = 0; |
| uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; |
| uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET; |
| uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; |
| boolean_t nxge_no_msg = B_TRUE; /* control message display */ |
| uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ |
| uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; |
| uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; |
| uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; |
| uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; |
| nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; |
| |
| /* MAX LSO size */ |
| #define NXGE_LSO_MAXLEN 65535 |
| uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; |
| |
| |
| /* |
| * Add tunable to reduce the amount of time spent in the |
| * ISR doing Rx Processing. |
| */ |
| uint32_t nxge_max_rx_pkts = 1024; |
| |
| /* |
| * Tunables to manage the receive buffer blocks. |
| * |
| * nxge_rx_threshold_hi: copy all buffers. |
| * nxge_rx_bcopy_size_type: receive buffer block size type. |
| * nxge_rx_threshold_lo: copy only up to tunable block size type. |
| */ |
| nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; |
| nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; |
| nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; |
| |
| /* Use kmem_alloc() to allocate data buffers. */ |
| #if defined(__sparc) |
| uint32_t nxge_use_kmem_alloc = 1; |
| #elif defined(__i386) |
| uint32_t nxge_use_kmem_alloc = 0; |
| #else |
| uint32_t nxge_use_kmem_alloc = 1; |
| #endif |
| |
| rtrace_t npi_rtracebuf; |
| |
| /* |
| * The hardware sometimes fails to allow enough time for the link partner |
| * to send an acknowledgement for packets that the hardware sent to it. The |
| * hardware resends the packets earlier than it should be in those instances. |
| * This behavior caused some switches to acknowledge the wrong packets |
| * and it triggered the fatal error. |
| * This software workaround is to set the replay timer to a value |
| * suggested by the hardware team. |
| * |
| * PCI config space replay timer register: |
| * The following replay timeout value is 0xc |
| * for bit 14:18. |
| */ |
| #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 |
| #define PCI_REPLAY_TIMEOUT_SHIFT 14 |
| |
| uint32_t nxge_set_replay_timer = 1; |
| uint32_t nxge_replay_timeout = 0xc; |
| |
| /* |
| * The transmit serialization sometimes causes |
| * longer sleep before calling the driver transmit |
| * function as it sleeps longer than it should. |
| * The performace group suggests that a time wait tunable |
| * can be used to set the maximum wait time when needed |
| * and the default is set to 1 tick. |
| */ |
| uint32_t nxge_tx_serial_maxsleep = 1; |
| |
| #if defined(sun4v) |
| /* |
| * Hypervisor N2/NIU services information. |
| */ |
| /* |
| * The following is the default API supported: |
| * major 1 and minor 1. |
| * |
| * Please update the MAX_NIU_MAJORS, |
| * MAX_NIU_MINORS, and minor number supported |
| * when the newer Hypervior API interfaces |
| * are added. Also, please update nxge_hsvc_register() |
| * if needed. |
| */ |
| static hsvc_info_t niu_hsvc = { |
| HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, |
| NIU_MINOR_VER, "nxge" |
| }; |
| |
| static int nxge_hsvc_register(p_nxge_t); |
| #endif |
| |
| /* |
| * Function Prototypes |
| */ |
| static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); |
| static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); |
| static void nxge_unattach(p_nxge_t); |
| static int nxge_quiesce(dev_info_t *); |
| |
| #if NXGE_PROPERTY |
| static void nxge_remove_hard_properties(p_nxge_t); |
| #endif |
| |
| /* |
| * These two functions are required by nxge_hio.c |
| */ |
| extern int nxge_m_mmac_remove(void *arg, int slot); |
| extern void nxge_grp_cleanup(p_nxge_t nxge); |
| |
| static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); |
| |
| static nxge_status_t nxge_setup_mutexes(p_nxge_t); |
| static void nxge_destroy_mutexes(p_nxge_t); |
| |
| static nxge_status_t nxge_map_regs(p_nxge_t nxgep); |
| static void nxge_unmap_regs(p_nxge_t nxgep); |
| #ifdef NXGE_DEBUG |
| static void nxge_test_map_regs(p_nxge_t nxgep); |
| #endif |
| |
| static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); |
| static void nxge_remove_intrs(p_nxge_t nxgep); |
| |
| static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); |
| static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); |
| static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); |
| static void nxge_intrs_enable(p_nxge_t nxgep); |
| static void nxge_intrs_disable(p_nxge_t nxgep); |
| |
| static void nxge_suspend(p_nxge_t); |
| static nxge_status_t nxge_resume(p_nxge_t); |
| |
| static nxge_status_t nxge_setup_dev(p_nxge_t); |
| static void nxge_destroy_dev(p_nxge_t); |
| |
| static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); |
| static void nxge_free_mem_pool(p_nxge_t); |
| |
| nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); |
| static void nxge_free_rx_mem_pool(p_nxge_t); |
| |
| nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); |
| static void nxge_free_tx_mem_pool(p_nxge_t); |
| |
| static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, |
| struct ddi_dma_attr *, |
| size_t, ddi_device_acc_attr_t *, uint_t, |
| p_nxge_dma_common_t); |
| |
| static void nxge_dma_mem_free(p_nxge_dma_common_t); |
| static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); |
| |
| static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, |
| p_nxge_dma_common_t *, size_t, size_t, uint32_t *); |
| static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); |
| |
| static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, |
| p_nxge_dma_common_t *, size_t); |
| static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); |
| |
| extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, |
| p_nxge_dma_common_t *, size_t, size_t, uint32_t *); |
| static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); |
| |
| extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, |
| p_nxge_dma_common_t *, |
| size_t); |
| static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); |
| |
| static int nxge_init_common_dev(p_nxge_t); |
| static void nxge_uninit_common_dev(p_nxge_t); |
| extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, |
| char *, caddr_t); |
| #if defined(sun4v) |
| extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); |
| extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); |
| #endif |
| |
| /* |
| * The next declarations are for the GLDv3 interface. |
| */ |
| static int nxge_m_start(void *); |
| static void nxge_m_stop(void *); |
| static int nxge_m_multicst(void *, boolean_t, const uint8_t *); |
| static int nxge_m_promisc(void *, boolean_t); |
| static void nxge_m_ioctl(void *, queue_t *, mblk_t *); |
| nxge_status_t nxge_mac_register(p_nxge_t); |
| static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, |
| int slot, int rdctbl, boolean_t usetbl); |
| void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, |
| boolean_t factory); |
| |
| static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); |
| static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); |
| static int nxge_m_setprop(void *, const char *, mac_prop_id_t, |
| uint_t, const void *); |
| static int nxge_m_getprop(void *, const char *, mac_prop_id_t, |
| uint_t, void *); |
| static void nxge_m_propinfo(void *, const char *, mac_prop_id_t, |
| mac_prop_info_handle_t); |
| static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t); |
| static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, |
| const void *); |
| static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *); |
| static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, |
| mac_ring_info_t *, mac_ring_handle_t); |
| static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, |
| mac_ring_type_t); |
| static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, |
| mac_ring_type_t); |
| |
| static void nxge_niu_peu_reset(p_nxge_t nxgep); |
| static void nxge_set_pci_replay_timeout(nxge_t *); |
| |
| char *nxge_priv_props[] = { |
| "_adv_10gfdx_cap", |
| "_adv_pause_cap", |
| "_function_number", |
| "_fw_version", |
| "_port_mode", |
| "_hot_swap_phy", |
| "_rxdma_intr_time", |
| "_rxdma_intr_pkts", |
| "_class_opt_ipv4_tcp", |
| "_class_opt_ipv4_udp", |
| "_class_opt_ipv4_ah", |
| "_class_opt_ipv4_sctp", |
| "_class_opt_ipv6_tcp", |
| "_class_opt_ipv6_udp", |
| "_class_opt_ipv6_ah", |
| "_class_opt_ipv6_sctp", |
| "_soft_lso_enable", |
| NULL |
| }; |
| |
| #define NXGE_NEPTUNE_MAGIC 0x4E584745UL |
| #define MAX_DUMP_SZ 256 |
| |
| #define NXGE_M_CALLBACK_FLAGS \ |
| (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) |
| |
| mac_callbacks_t nxge_m_callbacks = { |
| NXGE_M_CALLBACK_FLAGS, |
| nxge_m_stat, |
| nxge_m_start, |
| nxge_m_stop, |
| nxge_m_promisc, |
| nxge_m_multicst, |
| NULL, |
| NULL, |
| NULL, |
| nxge_m_ioctl, |
| nxge_m_getcapab, |
| NULL, |
| NULL, |
| nxge_m_setprop, |
| nxge_m_getprop, |
| nxge_m_propinfo |
| }; |
| |
| void |
| nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); |
| |
| /* PSARC/2007/453 MSI-X interrupt limit override. */ |
| #define NXGE_MSIX_REQUEST_10G 8 |
| #define NXGE_MSIX_REQUEST_1G 2 |
| static int nxge_create_msi_property(p_nxge_t); |
| /* |
| * For applications that care about the |
| * latency, it was requested by PAE and the |
| * customers that the driver has tunables that |
| * allow the user to tune it to a higher number |
| * interrupts to spread the interrupts among |
| * multiple channels. The DDI framework limits |
| * the maximum number of MSI-X resources to allocate |
| * to 8 (ddi_msix_alloc_limit). If more than 8 |
| * is set, ddi_msix_alloc_limit must be set accordingly. |
| * The default number of MSI interrupts are set to |
| * 8 for 10G and 2 for 1G link. |
| */ |
| #define NXGE_MSIX_MAX_ALLOWED 32 |
| uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; |
| uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; |
| |
| /* |
| * These global variables control the message |
| * output. |
| */ |
| out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; |
| uint64_t nxge_debug_level; |
| |
| /* |
| * This list contains the instance structures for the Neptune |
| * devices present in the system. The lock exists to guarantee |
| * mutually exclusive access to the list. |
| */ |
| void *nxge_list = NULL; |
| void *nxge_hw_list = NULL; |
| nxge_os_mutex_t nxge_common_lock; |
| nxge_os_mutex_t nxgedebuglock; |
| |
| extern uint64_t npi_debug_level; |
| |
| extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); |
| extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); |
| extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); |
| extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); |
| extern void nxge_fm_init(p_nxge_t, |
| ddi_device_acc_attr_t *, |
| ddi_dma_attr_t *); |
| extern void nxge_fm_fini(p_nxge_t); |
| extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); |
| |
| /* |
| * Count used to maintain the number of buffers being used |
| * by Neptune instances and loaned up to the upper layers. |
| */ |
| uint32_t nxge_mblks_pending = 0; |
| |
| /* |
| * Device register access attributes for PIO. |
| */ |
| static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { |
| DDI_DEVICE_ATTR_V1, |
| DDI_STRUCTURE_LE_ACC, |
| DDI_STRICTORDER_ACC, |
| DDI_DEFAULT_ACC |
| }; |
| |
| /* |
| * Device descriptor access attributes for DMA. |
| */ |
| static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { |
| DDI_DEVICE_ATTR_V0, |
| DDI_STRUCTURE_LE_ACC, |
| DDI_STRICTORDER_ACC |
| }; |
| |
| /* |
| * Device buffer access attributes for DMA. |
| */ |
| static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { |
| DDI_DEVICE_ATTR_V0, |
| DDI_STRUCTURE_BE_ACC, |
| DDI_STRICTORDER_ACC |
| }; |
| |
| ddi_dma_attr_t nxge_desc_dma_attr = { |
| DMA_ATTR_V0, /* version number. */ |
| 0, /* low address */ |
| 0xffffffffffffffff, /* high address */ |
| 0xffffffffffffffff, /* address counter max */ |
| #ifndef NIU_PA_WORKAROUND |
| 0x100000, /* alignment */ |
| #else |
| 0x2000, |
| #endif |
| 0xfc00fc, /* dlim_burstsizes */ |
| 0x1, /* minimum transfer size */ |
| 0xffffffffffffffff, /* maximum transfer size */ |
| 0xffffffffffffffff, /* maximum segment size */ |
| 1, /* scatter/gather list length */ |
| (unsigned int) 1, /* granularity */ |
| 0 /* attribute flags */ |
| }; |
| |
| ddi_dma_attr_t nxge_tx_dma_attr = { |
| DMA_ATTR_V0, /* version number. */ |
| 0, /* low address */ |
| 0xffffffffffffffff, /* high address */ |
| 0xffffffffffffffff, /* address counter max */ |
| #if defined(_BIG_ENDIAN) |
| 0x2000, /* alignment */ |
| #else |
| 0x1000, /* alignment */ |
| #endif |
| 0xfc00fc, /* dlim_burstsizes */ |
| 0x1, /* minimum transfer size */ |
| 0xffffffffffffffff, /* maximum transfer size */ |
| 0xffffffffffffffff, /* maximum segment size */ |
| 5, /* scatter/gather list length */ |
| (unsigned int) 1, /* granularity */ |
| 0 /* attribute flags */ |
| }; |
| |
| ddi_dma_attr_t nxge_rx_dma_attr = { |
| DMA_ATTR_V0, /* version number. */ |
| 0, /* low address */ |
| 0xffffffffffffffff, /* high address */ |
| 0xffffffffffffffff, /* address counter max */ |
| 0x2000, /* alignment */ |
| 0xfc00fc, /* dlim_burstsizes */ |
| 0x1, /* minimum transfer size */ |
| 0xffffffffffffffff, /* maximum transfer size */ |
| 0xffffffffffffffff, /* maximum segment size */ |
| 1, /* scatter/gather list length */ |
| (unsigned int) 1, /* granularity */ |
| DDI_DMA_RELAXED_ORDERING /* attribute flags */ |
| }; |
| |
| ddi_dma_lim_t nxge_dma_limits = { |
| (uint_t)0, /* dlim_addr_lo */ |
| (uint_t)0xffffffff, /* dlim_addr_hi */ |
| (uint_t)0xffffffff, /* dlim_cntr_max */ |
| (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ |
| 0x1, /* dlim_minxfer */ |
| 1024 /* dlim_speed */ |
| }; |
| |
| dma_method_t nxge_force_dma = DVMA; |
| |
| /* |
| * dma chunk sizes. |
| * |
| * Try to allocate the largest possible size |
| * so that fewer number of dma chunks would be managed |
| */ |
| #ifdef NIU_PA_WORKAROUND |
| size_t alloc_sizes [] = {0x2000}; |
| #else |
| size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, |
| 0x10000, 0x20000, 0x40000, 0x80000, |
| 0x100000, 0x200000, 0x400000, 0x800000, |
| 0x1000000, 0x2000000, 0x4000000}; |
| #endif |
| |
| /* |
| * Translate "dev_t" to a pointer to the associated "dev_info_t". |
| */ |
| |
| extern void nxge_get_environs(nxge_t *); |
| |
| static int |
| nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) |
| { |
| p_nxge_t nxgep = NULL; |
| int instance; |
| int status = DDI_SUCCESS; |
| uint8_t portn; |
| nxge_mmac_t *mmac_info; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); |
| |
| /* |
| * Get the device instance since we'll need to setup |
| * or retrieve a soft state for this instance. |
| */ |
| instance = ddi_get_instance(dip); |
| |
| switch (cmd) { |
| case DDI_ATTACH: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); |
| break; |
| |
| case DDI_RESUME: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); |
| nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); |
| if (nxgep == NULL) { |
| status = DDI_FAILURE; |
| break; |
| } |
| if (nxgep->dip != dip) { |
| status = DDI_FAILURE; |
| break; |
| } |
| if (nxgep->suspended == DDI_PM_SUSPEND) { |
| status = ddi_dev_is_needed(nxgep->dip, 0, 1); |
| } else { |
| status = nxge_resume(nxgep); |
| } |
| goto nxge_attach_exit; |
| |
| case DDI_PM_RESUME: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); |
| nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); |
| if (nxgep == NULL) { |
| status = DDI_FAILURE; |
| break; |
| } |
| if (nxgep->dip != dip) { |
| status = DDI_FAILURE; |
| break; |
| } |
| status = nxge_resume(nxgep); |
| goto nxge_attach_exit; |
| |
| default: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); |
| status = DDI_FAILURE; |
| goto nxge_attach_exit; |
| } |
| |
| |
| if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { |
| status = DDI_FAILURE; |
| goto nxge_attach_exit; |
| } |
| |
| nxgep = ddi_get_soft_state(nxge_list, instance); |
| if (nxgep == NULL) { |
| status = NXGE_ERROR; |
| goto nxge_attach_fail2; |
| } |
| |
| nxgep->nxge_magic = NXGE_MAGIC; |
| |
| nxgep->drv_state = 0; |
| nxgep->dip = dip; |
| nxgep->instance = instance; |
| nxgep->p_dip = ddi_get_parent(dip); |
| nxgep->nxge_debug_level = nxge_debug_level; |
| npi_debug_level = nxge_debug_level; |
| |
| /* Are we a guest running in a Hybrid I/O environment? */ |
| nxge_get_environs(nxgep); |
| |
| status = nxge_map_regs(nxgep); |
| |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); |
| goto nxge_attach_fail3; |
| } |
| |
| nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); |
| |
| /* Create & initialize the per-Neptune data structure */ |
| /* (even if we're a guest). */ |
| status = nxge_init_common_dev(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "nxge_init_common_dev failed")); |
| goto nxge_attach_fail4; |
| } |
| |
| /* |
| * Software workaround: set the replay timer. |
| */ |
| if (nxgep->niu_type != N2_NIU) { |
| nxge_set_pci_replay_timeout(nxgep); |
| } |
| |
| #if defined(sun4v) |
| /* This is required by nxge_hio_init(), which follows. */ |
| if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) |
| goto nxge_attach_fail4; |
| #endif |
| |
| if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "nxge_hio_init failed")); |
| goto nxge_attach_fail4; |
| } |
| |
| if (nxgep->niu_type == NEPTUNE_2_10GF) { |
| if (nxgep->function_num > 1) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" |
| " function %d. Only functions 0 and 1 are " |
| "supported for this card.", nxgep->function_num)); |
| status = NXGE_ERROR; |
| goto nxge_attach_fail4; |
| } |
| } |
| |
| if (isLDOMguest(nxgep)) { |
| /* |
| * Use the function number here. |
| */ |
| nxgep->mac.portnum = nxgep->function_num; |
| nxgep->mac.porttype = PORT_TYPE_LOGICAL; |
| |
| /* XXX We'll set the MAC address counts to 1 for now. */ |
| mmac_info = &nxgep->nxge_mmac_info; |
| mmac_info->num_mmac = 1; |
| mmac_info->naddrfree = 1; |
| } else { |
| portn = NXGE_GET_PORT_NUM(nxgep->function_num); |
| nxgep->mac.portnum = portn; |
| if ((portn == 0) || (portn == 1)) |
| nxgep->mac.porttype = PORT_TYPE_XMAC; |
| else |
| nxgep->mac.porttype = PORT_TYPE_BMAC; |
| /* |
| * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) |
| * internally, the rest 2 ports use BMAC (1G "Big" MAC). |
| * The two types of MACs have different characterizations. |
| */ |
| mmac_info = &nxgep->nxge_mmac_info; |
| if (nxgep->function_num < 2) { |
| mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; |
| mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; |
| } else { |
| mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; |
| mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; |
| } |
| } |
| /* |
| * Setup the Ndd parameters for the this instance. |
| */ |
| nxge_init_param(nxgep); |
| |
| /* |
| * Setup Register Tracing Buffer. |
| */ |
| npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); |
| |
| /* init stats ptr */ |
| nxge_init_statsp(nxgep); |
| |
| /* |
| * Copy the vpd info from eeprom to a local data |
| * structure, and then check its validity. |
| */ |
| if (!isLDOMguest(nxgep)) { |
| int *regp; |
| uint_t reglen; |
| int rv; |
| |
| nxge_vpd_info_get(nxgep); |
| |
| /* Find the NIU config handle. */ |
| rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, |
| ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, |
| "reg", ®p, ®len); |
| |
| if (rv != DDI_PROP_SUCCESS) { |
| goto nxge_attach_fail5; |
| } |
| /* |
| * The address_hi, that is the first int, in the reg |
| * property consists of config handle, but need to remove |
| * the bits 28-31 which are OBP specific info. |
| */ |
| nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; |
| ddi_prop_free(regp); |
| } |
| |
| /* |
| * Set the defaults for the MTU size. |
| */ |
| nxge_hw_id_init(nxgep); |
| |
| if (isLDOMguest(nxgep)) { |
| uchar_t *prop_val; |
| uint_t prop_len; |
| uint32_t max_frame_size; |
| |
| extern void nxge_get_logical_props(p_nxge_t); |
| |
| nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; |
| nxgep->mac.portmode = PORT_LOGICAL; |
| (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, |
| "phy-type", "virtual transceiver"); |
| |
| nxgep->nports = 1; |
| nxgep->board_ver = 0; /* XXX What? */ |
| |
| /* |
| * local-mac-address property gives us info on which |
| * specific MAC address the Hybrid resource is associated |
| * with. |
| */ |
| if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, |
| "local-mac-address", &prop_val, |
| &prop_len) != DDI_PROP_SUCCESS) { |
| goto nxge_attach_fail5; |
| } |
| if (prop_len != ETHERADDRL) { |
| ddi_prop_free(prop_val); |
| goto nxge_attach_fail5; |
| } |
| ether_copy(prop_val, nxgep->hio_mac_addr); |
| ddi_prop_free(prop_val); |
| nxge_get_logical_props(nxgep); |
| |
| /* |
| * Enable Jumbo property based on the "max-frame-size" |
| * property value. |
| */ |
| max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, |
| nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, |
| "max-frame-size", NXGE_MTU_DEFAULT_MAX); |
| if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && |
| (max_frame_size <= TX_JUMBO_MTU)) { |
| nxgep->mac.is_jumbo = B_TRUE; |
| nxgep->mac.maxframesize = (uint16_t)max_frame_size; |
| nxgep->mac.default_mtu = nxgep->mac.maxframesize - |
| NXGE_EHEADER_VLAN_CRC; |
| } |
| } else { |
| status = nxge_xcvr_find(nxgep); |
| |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " |
| " Couldn't determine card type" |
| " .... exit ")); |
| goto nxge_attach_fail5; |
| } |
| |
| status = nxge_get_config_properties(nxgep); |
| |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "get_hw create failed")); |
| goto nxge_attach_fail; |
| } |
| } |
| |
| /* |
| * Setup the Kstats for the driver. |
| */ |
| nxge_setup_kstats(nxgep); |
| |
| if (!isLDOMguest(nxgep)) |
| nxge_setup_param(nxgep); |
| |
| status = nxge_setup_system_dma_pages(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); |
| goto nxge_attach_fail; |
| } |
| |
| |
| if (!isLDOMguest(nxgep)) |
| nxge_hw_init_niu_common(nxgep); |
| |
| status = nxge_setup_mutexes(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); |
| goto nxge_attach_fail; |
| } |
| |
| #if defined(sun4v) |
| if (isLDOMguest(nxgep)) { |
| /* Find our VR & channel sets. */ |
| status = nxge_hio_vr_add(nxgep); |
| if (status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "nxge_hio_vr_add failed")); |
| (void) hsvc_unregister(&nxgep->niu_hsvc); |
| nxgep->niu_hsvc_available = B_FALSE; |
| goto nxge_attach_fail; |
| } |
| goto nxge_attach_exit; |
| } |
| #endif |
| |
| status = nxge_setup_dev(nxgep); |
| if (status != DDI_SUCCESS) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); |
| goto nxge_attach_fail; |
| } |
| |
| status = nxge_add_intrs(nxgep); |
| if (status != DDI_SUCCESS) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); |
| goto nxge_attach_fail; |
| } |
| |
| /* If a guest, register with vio_net instead. */ |
| if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "unable to register to mac layer (%d)", status)); |
| goto nxge_attach_fail; |
| } |
| |
| mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "registered to mac (instance %d)", instance)); |
| |
| /* nxge_link_monitor calls xcvr.check_link recursively */ |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); |
| |
| goto nxge_attach_exit; |
| |
| nxge_attach_fail: |
| nxge_unattach(nxgep); |
| goto nxge_attach_fail1; |
| |
| nxge_attach_fail5: |
| /* |
| * Tear down the ndd parameters setup. |
| */ |
| nxge_destroy_param(nxgep); |
| |
| /* |
| * Tear down the kstat setup. |
| */ |
| nxge_destroy_kstats(nxgep); |
| |
| nxge_attach_fail4: |
| if (nxgep->nxge_hw_p) { |
| nxge_uninit_common_dev(nxgep); |
| nxgep->nxge_hw_p = NULL; |
| } |
| |
| nxge_attach_fail3: |
| /* |
| * Unmap the register setup. |
| */ |
| nxge_unmap_regs(nxgep); |
| |
| nxge_fm_fini(nxgep); |
| |
| nxge_attach_fail2: |
| ddi_soft_state_free(nxge_list, nxgep->instance); |
| |
| nxge_attach_fail1: |
| if (status != NXGE_OK) |
| status = (NXGE_ERROR | NXGE_DDI_FAILED); |
| nxgep = NULL; |
| |
| nxge_attach_exit: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", |
| status)); |
| |
| return (status); |
| } |
| |
| static int |
| nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) |
| { |
| int status = DDI_SUCCESS; |
| int instance; |
| p_nxge_t nxgep = NULL; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); |
| instance = ddi_get_instance(dip); |
| nxgep = ddi_get_soft_state(nxge_list, instance); |
| if (nxgep == NULL) { |
| status = DDI_FAILURE; |
| goto nxge_detach_exit; |
| } |
| |
| switch (cmd) { |
| case DDI_DETACH: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); |
| break; |
| |
| case DDI_PM_SUSPEND: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); |
| nxgep->suspended = DDI_PM_SUSPEND; |
| nxge_suspend(nxgep); |
| break; |
| |
| case DDI_SUSPEND: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); |
| if (nxgep->suspended != DDI_PM_SUSPEND) { |
| nxgep->suspended = DDI_SUSPEND; |
| nxge_suspend(nxgep); |
| } |
| break; |
| |
| default: |
| status = DDI_FAILURE; |
| } |
| |
| if (cmd != DDI_DETACH) |
| goto nxge_detach_exit; |
| |
| /* |
| * Stop the xcvr polling. |
| */ |
| nxgep->suspended = cmd; |
| |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); |
| |
| if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "<== nxge_detach status = 0x%08X", status)); |
| return (DDI_FAILURE); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_detach (mac_unregister) status = 0x%08X", status)); |
| |
| nxge_unattach(nxgep); |
| nxgep = NULL; |
| |
| nxge_detach_exit: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", |
| status)); |
| |
| return (status); |
| } |
| |
| static void |
| nxge_unattach(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); |
| |
| if (nxgep == NULL || nxgep->dev_regs == NULL) { |
| return; |
| } |
| |
| nxgep->nxge_magic = 0; |
| |
| if (nxgep->nxge_timerid) { |
| nxge_stop_timer(nxgep, nxgep->nxge_timerid); |
| nxgep->nxge_timerid = 0; |
| } |
| |
| /* |
| * If this flag is set, it will affect the Neptune |
| * only. |
| */ |
| if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { |
| nxge_niu_peu_reset(nxgep); |
| } |
| |
| #if defined(sun4v) |
| if (isLDOMguest(nxgep)) { |
| (void) nxge_hio_vr_release(nxgep); |
| } |
| #endif |
| |
| if (nxgep->nxge_hw_p) { |
| nxge_uninit_common_dev(nxgep); |
| nxgep->nxge_hw_p = NULL; |
| } |
| |
| #if defined(sun4v) |
| if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { |
| (void) hsvc_unregister(&nxgep->niu_hsvc); |
| nxgep->niu_hsvc_available = B_FALSE; |
| } |
| #endif |
| /* |
| * Stop any further interrupts. |
| */ |
| nxge_remove_intrs(nxgep); |
| |
| /* |
| * Stop the device and free resources. |
| */ |
| if (!isLDOMguest(nxgep)) { |
| nxge_destroy_dev(nxgep); |
| } |
| |
| /* |
| * Tear down the ndd parameters setup. |
| */ |
| nxge_destroy_param(nxgep); |
| |
| /* |
| * Tear down the kstat setup. |
| */ |
| nxge_destroy_kstats(nxgep); |
| |
| /* |
| * Free any memory allocated for PHY properties |
| */ |
| if (nxgep->phy_prop.cnt > 0) { |
| KMEM_FREE(nxgep->phy_prop.arr, |
| sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt); |
| nxgep->phy_prop.cnt = 0; |
| } |
| |
| /* |
| * Destroy all mutexes. |
| */ |
| nxge_destroy_mutexes(nxgep); |
| |
| /* |
| * Remove the list of ndd parameters which |
| * were setup during attach. |
| */ |
| if (nxgep->dip) { |
| NXGE_DEBUG_MSG((nxgep, OBP_CTL, |
| " nxge_unattach: remove all properties")); |
| |
| (void) ddi_prop_remove_all(nxgep->dip); |
| } |
| |
| #if NXGE_PROPERTY |
| nxge_remove_hard_properties(nxgep); |
| #endif |
| |
| /* |
| * Unmap the register setup. |
| */ |
| nxge_unmap_regs(nxgep); |
| |
| nxge_fm_fini(nxgep); |
| |
| ddi_soft_state_free(nxge_list, nxgep->instance); |
| |
| NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); |
| } |
| |
| #if defined(sun4v) |
| int |
| nxge_hsvc_register(nxge_t *nxgep) |
| { |
| nxge_status_t status; |
| int i, j; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register")); |
| if (nxgep->niu_type != N2_NIU) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register")); |
| return (DDI_SUCCESS); |
| } |
| |
| /* |
| * Currently, the NIU Hypervisor API supports two major versions: |
| * version 1 and 2. |
| * If Hypervisor introduces a higher major or minor version, |
| * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly. |
| */ |
| nxgep->niu_hsvc_available = B_FALSE; |
| bcopy(&niu_hsvc, &nxgep->niu_hsvc, |
| sizeof (hsvc_info_t)); |
| |
| for (i = NIU_MAJOR_HI; i > 0; i--) { |
| nxgep->niu_hsvc.hsvc_major = i; |
| for (j = NIU_MINOR_HI; j >= 0; j--) { |
| nxgep->niu_hsvc.hsvc_minor = j; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_hsvc_register: %s: negotiating " |
| "hypervisor services revision %d " |
| "group: 0x%lx major: 0x%lx " |
| "minor: 0x%lx", |
| nxgep->niu_hsvc.hsvc_modname, |
| nxgep->niu_hsvc.hsvc_rev, |
| nxgep->niu_hsvc.hsvc_group, |
| nxgep->niu_hsvc.hsvc_major, |
| nxgep->niu_hsvc.hsvc_minor, |
| nxgep->niu_min_ver)); |
| |
| if ((status = hsvc_register(&nxgep->niu_hsvc, |
| &nxgep->niu_min_ver)) == 0) { |
| /* Use the supported minor */ |
| nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_hsvc_register: %s: negotiated " |
| "hypervisor services revision %d " |
| "group: 0x%lx major: 0x%lx " |
| "minor: 0x%lx (niu_min_ver 0x%lx)", |
| nxgep->niu_hsvc.hsvc_modname, |
| nxgep->niu_hsvc.hsvc_rev, |
| nxgep->niu_hsvc.hsvc_group, |
| nxgep->niu_hsvc.hsvc_major, |
| nxgep->niu_hsvc.hsvc_minor, |
| nxgep->niu_min_ver)); |
| |
| nxgep->niu_hsvc_available = B_TRUE; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_hsvc_register: " |
| "NIU Hypervisor service enabled")); |
| return (DDI_SUCCESS); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_hsvc_register: %s: negotiated failed - " |
| "try lower major number " |
| "hypervisor services revision %d " |
| "group: 0x%lx major: 0x%lx minor: 0x%lx " |
| "errno: %d", |
| nxgep->niu_hsvc.hsvc_modname, |
| nxgep->niu_hsvc.hsvc_rev, |
| nxgep->niu_hsvc.hsvc_group, |
| nxgep->niu_hsvc.hsvc_major, |
| nxgep->niu_hsvc.hsvc_minor, status)); |
| } |
| } |
| |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "nxge_hsvc_register: %s: cannot negotiate " |
| "hypervisor services revision %d group: 0x%lx " |
| "major: 0x%lx minor: 0x%lx errno: %d", |
| niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, |
| niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, |
| niu_hsvc.hsvc_minor, status)); |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_hsvc_register: Register to NIU Hypervisor failed")); |
| |
| return (DDI_FAILURE); |
| } |
| #endif |
| |
| static char n2_siu_name[] = "niu"; |
| |
| static nxge_status_t |
| nxge_map_regs(p_nxge_t nxgep) |
| { |
| int ddi_status = DDI_SUCCESS; |
| p_dev_regs_t dev_regs; |
| char buf[MAXPATHLEN + 1]; |
| char *devname; |
| #ifdef NXGE_DEBUG |
| char *sysname; |
| #endif |
| off_t regsize; |
| nxge_status_t status = NXGE_OK; |
| #if !defined(_BIG_ENDIAN) |
| off_t pci_offset; |
| uint16_t pcie_devctl; |
| #endif |
| |
| if (isLDOMguest(nxgep)) { |
| return (nxge_guest_regs_map(nxgep)); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); |
| nxgep->dev_regs = NULL; |
| dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); |
| dev_regs->nxge_regh = NULL; |
| dev_regs->nxge_pciregh = NULL; |
| dev_regs->nxge_msix_regh = NULL; |
| dev_regs->nxge_vir_regh = NULL; |
| dev_regs->nxge_vir2_regh = NULL; |
| nxgep->niu_type = NIU_TYPE_NONE; |
| |
| devname = ddi_pathname(nxgep->dip, buf); |
| ASSERT(strlen(devname) > 0); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: pathname devname %s", devname)); |
| |
| /* |
| * The driver is running on a N2-NIU system if devname is something |
| * like "/niu@80/network@0" |
| */ |
| if (strstr(devname, n2_siu_name)) { |
| /* N2/NIU */ |
| nxgep->niu_type = N2_NIU; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: N2/NIU devname %s", devname)); |
| /* |
| * Get function number: |
| * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1" |
| */ |
| nxgep->function_num = |
| (devname[strlen(devname) -1] == '1' ? 1 : 0); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: N2/NIU function number %d", |
| nxgep->function_num)); |
| } else { |
| int *prop_val; |
| uint_t prop_len; |
| uint8_t func_num; |
| |
| if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, |
| 0, "reg", |
| &prop_val, &prop_len) != DDI_PROP_SUCCESS) { |
| NXGE_DEBUG_MSG((nxgep, VPD_CTL, |
| "Reg property not found")); |
| ddi_status = DDI_FAILURE; |
| goto nxge_map_regs_fail0; |
| |
| } else { |
| func_num = (prop_val[0] >> 8) & 0x7; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "Reg property found: fun # %d", |
| func_num)); |
| nxgep->function_num = func_num; |
| if (isLDOMguest(nxgep)) { |
| nxgep->function_num /= 2; |
| return (NXGE_OK); |
| } |
| ddi_prop_free(prop_val); |
| } |
| } |
| |
| switch (nxgep->niu_type) { |
| default: |
| (void) ddi_dev_regsize(nxgep->dip, 0, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: pci config size 0x%x", regsize)); |
| |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 0, |
| (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs, nxge bus config regs failed")); |
| goto nxge_map_regs_fail0; |
| } |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_reg: PCI config addr 0x%0llx " |
| " handle 0x%0llx", dev_regs->nxge_pciregp, |
| dev_regs->nxge_pciregh)); |
| /* |
| * IMP IMP |
| * workaround for bit swapping bug in HW |
| * which ends up in no-snoop = yes |
| * resulting, in DMA not synched properly |
| */ |
| #if !defined(_BIG_ENDIAN) |
| /* workarounds for x86 systems */ |
| pci_offset = 0x80 + PCIE_DEVCTL; |
| pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, |
| pci_offset); |
| pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; |
| pcie_devctl |= PCIE_DEVCTL_RO_EN; |
| pci_config_put16(dev_regs->nxge_pciregh, pci_offset, |
| pcie_devctl); |
| #endif |
| |
| (void) ddi_dev_regsize(nxgep->dip, 1, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: pio size 0x%x", regsize)); |
| /* set up the device mapped register */ |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 1, |
| (caddr_t *)&(dev_regs->nxge_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for Neptune global reg failed")); |
| goto nxge_map_regs_fail1; |
| } |
| |
| /* set up the msi/msi-x mapped register */ |
| (void) ddi_dev_regsize(nxgep->dip, 2, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: msix size 0x%x", regsize)); |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 2, |
| (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for msi reg failed")); |
| goto nxge_map_regs_fail2; |
| } |
| |
| /* set up the vio region mapped register */ |
| (void) ddi_dev_regsize(nxgep->dip, 3, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: vio size 0x%x", regsize)); |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 3, |
| (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); |
| |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for nxge vio reg failed")); |
| goto nxge_map_regs_fail3; |
| } |
| nxgep->dev_regs = dev_regs; |
| |
| NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); |
| NPI_PCI_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_pciregp); |
| NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); |
| NPI_MSI_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_msix_regp); |
| |
| NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); |
| NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); |
| |
| NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); |
| NPI_REG_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_regp); |
| |
| NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); |
| NPI_VREG_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_vir_regp); |
| |
| break; |
| |
| case N2_NIU: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); |
| /* |
| * Set up the device mapped register (FWARC 2006/556) |
| * (changed back to 1: reg starts at 1!) |
| */ |
| (void) ddi_dev_regsize(nxgep->dip, 1, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: dev size 0x%x", regsize)); |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 1, |
| (caddr_t *)&(dev_regs->nxge_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); |
| |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for N2/NIU, global reg failed ")); |
| goto nxge_map_regs_fail1; |
| } |
| |
| /* set up the first vio region mapped register */ |
| (void) ddi_dev_regsize(nxgep->dip, 2, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: vio (1) size 0x%x", regsize)); |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 2, |
| (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); |
| |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for nxge vio reg failed")); |
| goto nxge_map_regs_fail2; |
| } |
| /* set up the second vio region mapped register */ |
| (void) ddi_dev_regsize(nxgep->dip, 3, ®size); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "nxge_map_regs: vio (3) size 0x%x", regsize)); |
| ddi_status = ddi_regs_map_setup(nxgep->dip, 3, |
| (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, |
| &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); |
| |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_map_regs for nxge vio2 reg failed")); |
| goto nxge_map_regs_fail3; |
| } |
| nxgep->dev_regs = dev_regs; |
| |
| NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); |
| NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); |
| |
| NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); |
| NPI_REG_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_regp); |
| |
| NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); |
| NPI_VREG_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_vir_regp); |
| |
| NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); |
| NPI_V2REG_ADD_HANDLE_SET(nxgep, |
| (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); |
| |
| break; |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " |
| " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); |
| |
| goto nxge_map_regs_exit; |
| nxge_map_regs_fail3: |
| if (dev_regs->nxge_msix_regh) { |
| ddi_regs_map_free(&dev_regs->nxge_msix_regh); |
| } |
| if (dev_regs->nxge_vir_regh) { |
| ddi_regs_map_free(&dev_regs->nxge_regh); |
| } |
| nxge_map_regs_fail2: |
| if (dev_regs->nxge_regh) { |
| ddi_regs_map_free(&dev_regs->nxge_regh); |
| } |
| nxge_map_regs_fail1: |
| if (dev_regs->nxge_pciregh) { |
| ddi_regs_map_free(&dev_regs->nxge_pciregh); |
| } |
| nxge_map_regs_fail0: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); |
| kmem_free(dev_regs, sizeof (dev_regs_t)); |
| |
| nxge_map_regs_exit: |
| if (ddi_status != DDI_SUCCESS) |
| status |= (NXGE_ERROR | NXGE_DDI_FAILED); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); |
| return (status); |
| } |
| |
| static void |
| nxge_unmap_regs(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); |
| |
| if (isLDOMguest(nxgep)) { |
| nxge_guest_regs_map_free(nxgep); |
| return; |
| } |
| |
| if (nxgep->dev_regs) { |
| if (nxgep->dev_regs->nxge_pciregh) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_unmap_regs: bus")); |
| ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); |
| nxgep->dev_regs->nxge_pciregh = NULL; |
| } |
| if (nxgep->dev_regs->nxge_regh) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_unmap_regs: device registers")); |
| ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); |
| nxgep->dev_regs->nxge_regh = NULL; |
| } |
| if (nxgep->dev_regs->nxge_msix_regh) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_unmap_regs: device interrupts")); |
| ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); |
| nxgep->dev_regs->nxge_msix_regh = NULL; |
| } |
| if (nxgep->dev_regs->nxge_vir_regh) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_unmap_regs: vio region")); |
| ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); |
| nxgep->dev_regs->nxge_vir_regh = NULL; |
| } |
| if (nxgep->dev_regs->nxge_vir2_regh) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_unmap_regs: vio2 region")); |
| ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); |
| nxgep->dev_regs->nxge_vir2_regh = NULL; |
| } |
| |
| kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); |
| nxgep->dev_regs = NULL; |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); |
| } |
| |
| static nxge_status_t |
| nxge_setup_mutexes(p_nxge_t nxgep) |
| { |
| int ddi_status = DDI_SUCCESS; |
| nxge_status_t status = NXGE_OK; |
| nxge_classify_t *classify_ptr; |
| int partition; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); |
| |
| /* |
| * Get the interrupt cookie so the mutexes can be |
| * Initialized. |
| */ |
| if (isLDOMguest(nxgep)) { |
| nxgep->interrupt_cookie = 0; |
| } else { |
| ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, |
| &nxgep->interrupt_cookie); |
| |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "<== nxge_setup_mutexes: failed 0x%x", |
| ddi_status)); |
| goto nxge_setup_mutexes_exit; |
| } |
| } |
| |
| cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); |
| MUTEX_INIT(&nxgep->poll_lock, NULL, |
| MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| |
| /* |
| * Initialize mutexes for this device. |
| */ |
| MUTEX_INIT(nxgep->genlock, NULL, |
| MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| MUTEX_INIT(&nxgep->ouraddr_lock, NULL, |
| MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| MUTEX_INIT(&nxgep->mif_lock, NULL, |
| MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| MUTEX_INIT(&nxgep->group_lock, NULL, |
| MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| RW_INIT(&nxgep->filter_lock, NULL, |
| RW_DRIVER, (void *)nxgep->interrupt_cookie); |
| |
| classify_ptr = &nxgep->classifier; |
| /* |
| * FFLP Mutexes are never used in interrupt context |
| * as fflp operation can take very long time to |
| * complete and hence not suitable to invoke from interrupt |
| * handlers. |
| */ |
| MUTEX_INIT(&classify_ptr->tcam_lock, NULL, |
| NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { |
| MUTEX_INIT(&classify_ptr->fcram_lock, NULL, |
| NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| for (partition = 0; partition < MAX_PARTITION; partition++) { |
| MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, |
| NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); |
| } |
| } |
| |
| nxge_setup_mutexes_exit: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_setup_mutexes status = %x", status)); |
| |
| if (ddi_status != DDI_SUCCESS) |
| status |= (NXGE_ERROR | NXGE_DDI_FAILED); |
| |
| return (status); |
| } |
| |
| static void |
| nxge_destroy_mutexes(p_nxge_t nxgep) |
| { |
| int partition; |
| nxge_classify_t *classify_ptr; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); |
| RW_DESTROY(&nxgep->filter_lock); |
| MUTEX_DESTROY(&nxgep->group_lock); |
| MUTEX_DESTROY(&nxgep->mif_lock); |
| MUTEX_DESTROY(&nxgep->ouraddr_lock); |
| MUTEX_DESTROY(nxgep->genlock); |
| |
| classify_ptr = &nxgep->classifier; |
| MUTEX_DESTROY(&classify_ptr->tcam_lock); |
| |
| /* Destroy all polling resources. */ |
| MUTEX_DESTROY(&nxgep->poll_lock); |
| cv_destroy(&nxgep->poll_cv); |
| |
| /* free data structures, based on HW type */ |
| if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { |
| MUTEX_DESTROY(&classify_ptr->fcram_lock); |
| for (partition = 0; partition < MAX_PARTITION; partition++) { |
| MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); |
| } |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); |
| } |
| |
| nxge_status_t |
| nxge_init(p_nxge_t nxgep) |
| { |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); |
| |
| if (nxgep->drv_state & STATE_HW_INITIALIZED) { |
| return (status); |
| } |
| |
| /* |
| * Allocate system memory for the receive/transmit buffer blocks |
| * and receive/transmit descriptor rings. |
| */ |
| status = nxge_alloc_mem_pool(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); |
| goto nxge_init_fail1; |
| } |
| |
| if (!isLDOMguest(nxgep)) { |
| /* |
| * Initialize and enable the TXC registers. |
| * (Globally enable the Tx controller, |
| * enable the port, configure the dma channel bitmap, |
| * configure the max burst size). |
| */ |
| status = nxge_txc_init(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, |
| NXGE_ERR_CTL, "init txc failed\n")); |
| goto nxge_init_fail2; |
| } |
| } |
| |
| /* |
| * Initialize and enable TXDMA channels. |
| */ |
| status = nxge_init_txdma_channels(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); |
| goto nxge_init_fail3; |
| } |
| |
| /* |
| * Initialize and enable RXDMA channels. |
| */ |
| status = nxge_init_rxdma_channels(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); |
| goto nxge_init_fail4; |
| } |
| |
| /* |
| * The guest domain is now done. |
| */ |
| if (isLDOMguest(nxgep)) { |
| nxgep->drv_state |= STATE_HW_INITIALIZED; |
| goto nxge_init_exit; |
| } |
| |
| /* |
| * Initialize TCAM and FCRAM (Neptune). |
| */ |
| status = nxge_classify_init(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); |
| goto nxge_init_fail5; |
| } |
| |
| /* |
| * Initialize ZCP |
| */ |
| status = nxge_zcp_init(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); |
| goto nxge_init_fail5; |
| } |
| |
| /* |
| * Initialize IPP. |
| */ |
| status = nxge_ipp_init(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); |
| goto nxge_init_fail5; |
| } |
| |
| /* |
| * Initialize the MAC block. |
| */ |
| status = nxge_mac_init(nxgep); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); |
| goto nxge_init_fail5; |
| } |
| |
| /* |
| * Enable the interrrupts for DDI. |
| */ |
| nxge_intrs_enable(nxgep); |
| |
| nxgep->drv_state |= STATE_HW_INITIALIZED; |
| |
| goto nxge_init_exit; |
| |
| nxge_init_fail5: |
| nxge_uninit_rxdma_channels(nxgep); |
| nxge_init_fail4: |
| nxge_uninit_txdma_channels(nxgep); |
| nxge_init_fail3: |
| if (!isLDOMguest(nxgep)) { |
| (void) nxge_txc_uninit(nxgep); |
| } |
| nxge_init_fail2: |
| nxge_free_mem_pool(nxgep); |
| nxge_init_fail1: |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "<== nxge_init status (failed) = 0x%08x", status)); |
| return (status); |
| |
| nxge_init_exit: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", |
| status)); |
| return (status); |
| } |
| |
| |
| timeout_id_t |
| nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) |
| { |
| if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { |
| return (timeout(func, (caddr_t)nxgep, |
| drv_usectohz(1000 * msec))); |
| } |
| return (NULL); |
| } |
| |
| /*ARGSUSED*/ |
| void |
| nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) |
| { |
| if (timerid) { |
| (void) untimeout(timerid); |
| } |
| } |
| |
| void |
| nxge_uninit(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); |
| |
| if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_uninit: not initialized")); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_uninit")); |
| return; |
| } |
| |
| if (!isLDOMguest(nxgep)) { |
| /* |
| * Reset the receive MAC side. |
| */ |
| (void) nxge_rx_mac_disable(nxgep); |
| |
| /* |
| * Drain the IPP. |
| */ |
| (void) nxge_ipp_drain(nxgep); |
| } |
| |
| /* stop timer */ |
| if (nxgep->nxge_timerid) { |
| nxge_stop_timer(nxgep, nxgep->nxge_timerid); |
| nxgep->nxge_timerid = 0; |
| } |
| |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); |
| (void) nxge_intr_hw_disable(nxgep); |
| |
| |
| /* Disable and soft reset the IPP */ |
| if (!isLDOMguest(nxgep)) |
| (void) nxge_ipp_disable(nxgep); |
| |
| /* Free classification resources */ |
| (void) nxge_classify_uninit(nxgep); |
| |
| /* |
| * Reset the transmit/receive DMA side. |
| */ |
| (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); |
| (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); |
| |
| nxge_uninit_txdma_channels(nxgep); |
| nxge_uninit_rxdma_channels(nxgep); |
| |
| /* |
| * Reset the transmit MAC side. |
| */ |
| (void) nxge_tx_mac_disable(nxgep); |
| |
| nxge_free_mem_pool(nxgep); |
| |
| /* |
| * Start the timer if the reset flag is not set. |
| * If this reset flag is set, the link monitor |
| * will not be started in order to stop furthur bus |
| * activities coming from this interface. |
| * The driver will start the monitor function |
| * if the interface was initialized again later. |
| */ |
| if (!nxge_peu_reset_enable) { |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); |
| } |
| |
| nxgep->drv_state &= ~STATE_HW_INITIALIZED; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " |
| "nxge_mblks_pending %d", nxge_mblks_pending)); |
| } |
| |
| void |
| nxge_get64(p_nxge_t nxgep, p_mblk_t mp) |
| { |
| uint64_t reg; |
| uint64_t regdata; |
| int i, retry; |
| |
| bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); |
| regdata = 0; |
| retry = 1; |
| |
| for (i = 0; i < retry; i++) { |
| NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); |
| } |
| bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); |
| } |
| |
| void |
| nxge_put64(p_nxge_t nxgep, p_mblk_t mp) |
| { |
| uint64_t reg; |
| uint64_t buf[2]; |
| |
| bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); |
| reg = buf[0]; |
| |
| NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); |
| } |
| |
| /*ARGSUSED*/ |
| /*VARARGS*/ |
| void |
| nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) |
| { |
| char msg_buffer[1048]; |
| char prefix_buffer[32]; |
| int instance; |
| uint64_t debug_level; |
| int cmn_level = CE_CONT; |
| va_list ap; |
| |
| if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { |
| /* In case a developer has changed nxge_debug_level. */ |
| if (nxgep->nxge_debug_level != nxge_debug_level) |
| nxgep->nxge_debug_level = nxge_debug_level; |
| } |
| |
| debug_level = (nxgep == NULL) ? nxge_debug_level : |
| nxgep->nxge_debug_level; |
| |
| if ((level & debug_level) || |
| (level == NXGE_NOTE) || |
| (level == NXGE_ERR_CTL)) { |
| /* do the msg processing */ |
| MUTEX_ENTER(&nxgedebuglock); |
| |
| if ((level & NXGE_NOTE)) { |
| cmn_level = CE_NOTE; |
| } |
| |
| if (level & NXGE_ERR_CTL) { |
| cmn_level = CE_WARN; |
| } |
| |
| va_start(ap, fmt); |
| (void) vsprintf(msg_buffer, fmt, ap); |
| va_end(ap); |
| if (nxgep == NULL) { |
| instance = -1; |
| (void) sprintf(prefix_buffer, "%s :", "nxge"); |
| } else { |
| instance = nxgep->instance; |
| (void) sprintf(prefix_buffer, |
| "%s%d :", "nxge", instance); |
| } |
| |
| MUTEX_EXIT(&nxgedebuglock); |
| cmn_err(cmn_level, "!%s %s\n", |
| prefix_buffer, msg_buffer); |
| |
| } |
| } |
| |
| char * |
| nxge_dump_packet(char *addr, int size) |
| { |
| uchar_t *ap = (uchar_t *)addr; |
| int i; |
| static char etherbuf[1024]; |
| char *cp = etherbuf; |
| char digits[] = "0123456789abcdef"; |
| |
| if (!size) |
| size = 60; |
| |
| if (size > MAX_DUMP_SZ) { |
| /* Dump the leading bytes */ |
| for (i = 0; i < MAX_DUMP_SZ/2; i++) { |
| if (*ap > 0x0f) |
| *cp++ = digits[*ap >> 4]; |
| *cp++ = digits[*ap++ & 0xf]; |
| *cp++ = ':'; |
| } |
| for (i = 0; i < 20; i++) |
| *cp++ = '.'; |
| /* Dump the last MAX_DUMP_SZ/2 bytes */ |
| ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); |
| for (i = 0; i < MAX_DUMP_SZ/2; i++) { |
| if (*ap > 0x0f) |
| *cp++ = digits[*ap >> 4]; |
| *cp++ = digits[*ap++ & 0xf]; |
| *cp++ = ':'; |
| } |
| } else { |
| for (i = 0; i < size; i++) { |
| if (*ap > 0x0f) |
| *cp++ = digits[*ap >> 4]; |
| *cp++ = digits[*ap++ & 0xf]; |
| *cp++ = ':'; |
| } |
| } |
| *--cp = 0; |
| return (etherbuf); |
| } |
| |
| #ifdef NXGE_DEBUG |
| static void |
| nxge_test_map_regs(p_nxge_t nxgep) |
| { |
| ddi_acc_handle_t cfg_handle; |
| p_pci_cfg_t cfg_ptr; |
| ddi_acc_handle_t dev_handle; |
| char *dev_ptr; |
| ddi_acc_handle_t pci_config_handle; |
| uint32_t regval; |
| int i; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); |
| |
| dev_handle = nxgep->dev_regs->nxge_regh; |
| dev_ptr = (char *)nxgep->dev_regs->nxge_regp; |
| |
| if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { |
| cfg_handle = nxgep->dev_regs->nxge_pciregh; |
| cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "Neptune PCI cfg_ptr vendor id ptr 0x%llx", |
| &cfg_ptr->vendorid)); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "\tvendorid 0x%x devid 0x%x", |
| NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), |
| NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " |
| "bar1c 0x%x", |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "\nNeptune PCI BAR: base20 0x%x base24 0x%x " |
| "base 28 0x%x bar2c 0x%x\n", |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "\nNeptune PCI BAR: base30 0x%x\n", |
| NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); |
| |
| cfg_handle = nxgep->dev_regs->nxge_pciregh; |
| cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "first 0x%llx second 0x%llx third 0x%llx " |
| "last 0x%llx ", |
| NXGE_PIO_READ64(dev_handle, |
| (uint64_t *)(dev_ptr + 0), 0), |
| NXGE_PIO_READ64(dev_handle, |
| (uint64_t *)(dev_ptr + 8), 0), |
| NXGE_PIO_READ64(dev_handle, |
| (uint64_t *)(dev_ptr + 16), 0), |
| NXGE_PIO_READ64(cfg_handle, |
| (uint64_t *)(dev_ptr + 24), 0))); |
| } |
| } |
| |
| #endif |
| |
| static void |
| nxge_suspend(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); |
| |
| nxge_intrs_disable(nxgep); |
| nxge_destroy_dev(nxgep); |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); |
| } |
| |
| static nxge_status_t |
| nxge_resume(p_nxge_t nxgep) |
| { |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); |
| |
| nxgep->suspended = DDI_RESUME; |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); |
| (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); |
| (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); |
| (void) nxge_rx_mac_enable(nxgep); |
| (void) nxge_tx_mac_enable(nxgep); |
| nxge_intrs_enable(nxgep); |
| nxgep->suspended = 0; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_resume status = 0x%x", status)); |
| return (status); |
| } |
| |
| static nxge_status_t |
| nxge_setup_dev(p_nxge_t nxgep) |
| { |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", |
| nxgep->mac.portnum)); |
| |
| status = nxge_link_init(nxgep); |
| |
| if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "port%d Bad register acc handle", nxgep->mac.portnum)); |
| status = NXGE_ERROR; |
| } |
| |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| " nxge_setup_dev status " |
| "(xcvr init 0x%08x)", status)); |
| goto nxge_setup_dev_exit; |
| } |
| |
| nxge_setup_dev_exit: |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_setup_dev port %d status = 0x%08x", |
| nxgep->mac.portnum, status)); |
| |
| return (status); |
| } |
| |
| static void |
| nxge_destroy_dev(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); |
| |
| (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); |
| |
| (void) nxge_hw_stop(nxgep); |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); |
| } |
| |
| static nxge_status_t |
| nxge_setup_system_dma_pages(p_nxge_t nxgep) |
| { |
| int ddi_status = DDI_SUCCESS; |
| uint_t count; |
| ddi_dma_cookie_t cookie; |
| uint_t iommu_pagesize; |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); |
| nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); |
| if (nxgep->niu_type != N2_NIU) { |
| iommu_pagesize = dvma_pagesize(nxgep->dip); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " |
| " default_block_size %d iommu_pagesize %d", |
| nxgep->sys_page_sz, |
| ddi_ptob(nxgep->dip, (ulong_t)1), |
| nxgep->rx_default_block_size, |
| iommu_pagesize)); |
| |
| if (iommu_pagesize != 0) { |
| if (nxgep->sys_page_sz == iommu_pagesize) { |
| if (iommu_pagesize > 0x4000) |
| nxgep->sys_page_sz = 0x4000; |
| } else { |
| if (nxgep->sys_page_sz > iommu_pagesize) |
| nxgep->sys_page_sz = iommu_pagesize; |
| } |
| } |
| } |
| nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " |
| "default_block_size %d page mask %d", |
| nxgep->sys_page_sz, |
| ddi_ptob(nxgep->dip, (ulong_t)1), |
| nxgep->rx_default_block_size, |
| nxgep->sys_page_mask)); |
| |
| |
| switch (nxgep->sys_page_sz) { |
| default: |
| nxgep->sys_page_sz = 0x1000; |
| nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); |
| nxgep->rx_default_block_size = 0x1000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_4K; |
| break; |
| case 0x1000: |
| nxgep->rx_default_block_size = 0x1000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_4K; |
| break; |
| case 0x2000: |
| nxgep->rx_default_block_size = 0x2000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_8K; |
| break; |
| case 0x4000: |
| nxgep->rx_default_block_size = 0x4000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_16K; |
| break; |
| case 0x8000: |
| nxgep->rx_default_block_size = 0x8000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_32K; |
| break; |
| } |
| |
| #ifndef USE_RX_BIG_BUF |
| nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; |
| #else |
| nxgep->rx_default_block_size = 0x2000; |
| nxgep->rx_bksize_code = RBR_BKSIZE_8K; |
| #endif |
| /* |
| * Get the system DMA burst size. |
| */ |
| ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, |
| DDI_DMA_DONTWAIT, 0, |
| &nxgep->dmasparehandle); |
| if (ddi_status != DDI_SUCCESS) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "ddi_dma_alloc_handle: failed " |
| " status 0x%x", ddi_status)); |
| goto nxge_get_soft_properties_exit; |
| } |
| |
| ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, |
| (caddr_t)nxgep->dmasparehandle, |
| sizeof (nxgep->dmasparehandle), |
| DDI_DMA_RDWR | DDI_DMA_CONSISTENT, |
| DDI_DMA_DONTWAIT, 0, |
| &cookie, &count); |
| if (ddi_status != DDI_DMA_MAPPED) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "Binding spare handle to find system" |
| " burstsize failed.")); |
| ddi_status = DDI_FAILURE; |
| goto nxge_get_soft_properties_fail1; |
| } |
| |
| nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); |
| (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); |
| |
| nxge_get_soft_properties_fail1: |
| ddi_dma_free_handle(&nxgep->dmasparehandle); |
| |
| nxge_get_soft_properties_exit: |
| |
| if (ddi_status != DDI_SUCCESS) |
| status |= (NXGE_ERROR | NXGE_DDI_FAILED); |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, |
| "<== nxge_setup_system_dma_pages status = 0x%08x", status)); |
| return (status); |
| } |
| |
| static nxge_status_t |
| nxge_alloc_mem_pool(p_nxge_t nxgep) |
| { |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); |
| |
| status = nxge_alloc_rx_mem_pool(nxgep); |
| if (status != NXGE_OK) { |
| return (NXGE_ERROR); |
| } |
| |
| status = nxge_alloc_tx_mem_pool(nxgep); |
| if (status != NXGE_OK) { |
| nxge_free_rx_mem_pool(nxgep); |
| return (NXGE_ERROR); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); |
| return (NXGE_OK); |
| } |
| |
| static void |
| nxge_free_mem_pool(p_nxge_t nxgep) |
| { |
| NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); |
| |
| nxge_free_rx_mem_pool(nxgep); |
| nxge_free_tx_mem_pool(nxgep); |
| |
| NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); |
| } |
| |
| nxge_status_t |
| nxge_alloc_rx_mem_pool(p_nxge_t nxgep) |
| { |
| uint32_t rdc_max; |
| p_nxge_dma_pt_cfg_t p_all_cfgp; |
| p_nxge_hw_pt_cfg_t p_cfgp; |
| p_nxge_dma_pool_t dma_poolp; |
| p_nxge_dma_common_t *dma_buf_p; |
| p_nxge_dma_pool_t dma_cntl_poolp; |
| p_nxge_dma_common_t *dma_cntl_p; |
| uint32_t *num_chunks; /* per dma */ |
| nxge_status_t status = NXGE_OK; |
| |
| uint32_t nxge_port_rbr_size; |
| uint32_t nxge_port_rbr_spare_size; |
| uint32_t nxge_port_rcr_size; |
| uint32_t rx_cntl_alloc_size; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); |
| |
| p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; |
| p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; |
| rdc_max = NXGE_MAX_RDCS; |
| |
| /* |
| * Allocate memory for the common DMA data structures. |
| */ |
| dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), |
| KM_SLEEP); |
| dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( |
| sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); |
| |
| dma_cntl_poolp = (p_nxge_dma_pool_t) |
| KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); |
| dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( |
| sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); |
| |
| num_chunks = (uint32_t *)KMEM_ZALLOC( |
| sizeof (uint32_t) * rdc_max, KM_SLEEP); |
| |
| /* |
| * Assume that each DMA channel will be configured with |
| * the default block size. |
| * rbr block counts are modulo the batch count (16). |
| */ |
| nxge_port_rbr_size = p_all_cfgp->rbr_size; |
| nxge_port_rcr_size = p_all_cfgp->rcr_size; |
| |
| if (!nxge_port_rbr_size) { |
| nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; |
| } |
| if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { |
| nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * |
| (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); |
| } |
| |
| p_all_cfgp->rbr_size = nxge_port_rbr_size; |
| nxge_port_rbr_spare_size = nxge_rbr_spare_size; |
| |
| if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { |
| nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * |
| (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); |
| } |
| if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { |
| NXGE_DEBUG_MSG((nxgep, MEM_CTL, |
| "nxge_alloc_rx_mem_pool: RBR size too high %d, " |
| "set to default %d", |
| nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); |
| nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; |
| } |
| if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { |
| NXGE_DEBUG_MSG((nxgep, MEM_CTL, |
| "nxge_alloc_rx_mem_pool: RCR too high %d, " |
| "set to default %d", |
| nxge_port_rcr_size, RCR_DEFAULT_MAX)); |
| nxge_port_rcr_size = RCR_DEFAULT_MAX; |
| } |
| |
| /* |
| * N2/NIU has limitation on the descriptor sizes (contiguous |
| * memory allocation on data buffers to 4M (contig_mem_alloc) |
| * and little endian for control buffers (must use the ddi/dki mem alloc |
| * function). |
| */ |
| #if defined(sun4v) && defined(NIU_LP_WORKAROUND) |
| if (nxgep->niu_type == N2_NIU) { |
| nxge_port_rbr_spare_size = 0; |
| if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || |
| (!ISP2(nxge_port_rbr_size))) { |
| nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; |
| } |
| if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || |
| (!ISP2(nxge_port_rcr_size))) { |
| nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; |
| } |
| } |
| #endif |
| |
| /* |
| * Addresses of receive block ring, receive completion ring and the |
| * mailbox must be all cache-aligned (64 bytes). |
| */ |
| rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; |
| rx_cntl_alloc_size *= (sizeof (rx_desc_t)); |
| rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); |
| rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " |
| "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " |
| "nxge_port_rcr_size = %d " |
| "rx_cntl_alloc_size = %d", |
| nxge_port_rbr_size, nxge_port_rbr_spare_size, |
| nxge_port_rcr_size, |
| rx_cntl_alloc_size)); |
| |
| #if defined(sun4v) && defined(NIU_LP_WORKAROUND) |
| if (nxgep->niu_type == N2_NIU) { |
| uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * |
| (nxge_port_rbr_size + nxge_port_rbr_spare_size)); |
| |
| if (!ISP2(rx_buf_alloc_size)) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "==> nxge_alloc_rx_mem_pool: " |
| " must be power of 2")); |
| status |= (NXGE_ERROR | NXGE_DDI_FAILED); |
| goto nxge_alloc_rx_mem_pool_exit; |
| } |
| |
| if (rx_buf_alloc_size > (1 << 22)) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "==> nxge_alloc_rx_mem_pool: " |
| " limit size to 4M")); |
| status |= (NXGE_ERROR | NXGE_DDI_FAILED); |
| goto nxge_alloc_rx_mem_pool_exit; |
| } |
| |
| if (rx_cntl_alloc_size < 0x2000) { |
| rx_cntl_alloc_size = 0x2000; |
| } |
| } |
| #endif |
| nxgep->nxge_port_rbr_size = nxge_port_rbr_size; |
| nxgep->nxge_port_rcr_size = nxge_port_rcr_size; |
| nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; |
| nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; |
| |
| dma_poolp->ndmas = p_cfgp->max_rdcs; |
| dma_poolp->num_chunks = num_chunks; |
| dma_poolp->buf_allocated = B_TRUE; |
| nxgep->rx_buf_pool_p = dma_poolp; |
| dma_poolp->dma_buf_pool_p = dma_buf_p; |
| |
| dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; |
| dma_cntl_poolp->buf_allocated = B_TRUE; |
| nxgep->rx_cntl_pool_p = dma_cntl_poolp; |
| dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; |
| |
| /* Allocate the receive rings, too. */ |
| nxgep->rx_rbr_rings = |
| KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); |
| nxgep->rx_rbr_rings->rbr_rings = |
| KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); |
| nxgep->rx_rcr_rings = |
| KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); |
| nxgep->rx_rcr_rings->rcr_rings = |
| KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); |
| nxgep->rx_mbox_areas_p = |
| KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); |
| nxgep->rx_mbox_areas_p->rxmbox_areas = |
| KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); |
| |
| nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = |
| p_cfgp->max_rdcs; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, |
| "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); |
| |
| nxge_alloc_rx_mem_pool_exit: |
| return (status); |
| } |
| |
| /* |
| * nxge_alloc_rxb |
| * |
| * Allocate buffers for an RDC. |
| * |
| * Arguments: |
| * nxgep |
| * channel The channel to map into our kernel space. |
| * |
| * Notes: |
| * |
| * NPI function calls: |
| * |
| * NXGE function calls: |
| * |
| * Registers accessed: |
| * |
| * Context: |
| * |
| * Taking apart: |
| * |
| * Open questions: |
| * |
| */ |
| nxge_status_t |
| nxge_alloc_rxb( |
| p_nxge_t nxgep, |
| int channel) |
| { |
| size_t rx_buf_alloc_size; |
| nxge_status_t status = NXGE_OK; |
| |
| nxge_dma_common_t **data; |
| nxge_dma_common_t **control; |
| uint32_t *num_chunks; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); |
| |
| /* |
| * Allocate memory for the receive buffers and descriptor rings. |
| * Replace these allocation functions with the interface functions |
| * provided by the partition manager if/when they are available. |
| */ |
| |
| /* |
| * Allocate memory for the receive buffer blocks. |
| */ |
| rx_buf_alloc_size = (nxgep->rx_default_block_size * |
| (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); |
| |
| data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; |
| num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; |
| |
| if ((status = nxge_alloc_rx_buf_dma( |
| nxgep, channel, data, rx_buf_alloc_size, |
| nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { |
| return (status); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " |
| "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); |
| |
| /* |
| * Allocate memory for descriptor rings and mailbox. |
| */ |
| control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; |
| |
| if ((status = nxge_alloc_rx_cntl_dma( |
| nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) |
| != NXGE_OK) { |
| nxge_free_rx_cntl_dma(nxgep, *control); |
| (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; |
| nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); |
| return (status); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, |
| "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); |
| |
| return (status); |
| } |
| |
| void |
| nxge_free_rxb( |
| p_nxge_t nxgep, |
| int channel) |
| { |
| nxge_dma_common_t *data; |
| nxge_dma_common_t *control; |
| uint32_t num_chunks; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); |
| |
| data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; |
| num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; |
| nxge_free_rx_buf_dma(nxgep, data, num_chunks); |
| |
| nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; |
| nxgep->rx_buf_pool_p->num_chunks[channel] = 0; |
| |
| control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; |
| nxge_free_rx_cntl_dma(nxgep, control); |
| |
| nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; |
| |
| KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); |
| KMEM_FREE(control, sizeof (nxge_dma_common_t)); |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); |
| } |
| |
| static void |
| nxge_free_rx_mem_pool(p_nxge_t nxgep) |
| { |
| int rdc_max = NXGE_MAX_RDCS; |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); |
| |
| if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "<== nxge_free_rx_mem_pool " |
| "(null rx buf pool or buf not allocated")); |
| return; |
| } |
| if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "<== nxge_free_rx_mem_pool " |
| "(null rx cntl buf pool or cntl buf not allocated")); |
| return; |
| } |
| |
| KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, |
| sizeof (p_nxge_dma_common_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); |
| |
| KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, |
| sizeof (uint32_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, |
| sizeof (p_nxge_dma_common_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); |
| |
| nxgep->rx_buf_pool_p = 0; |
| nxgep->rx_cntl_pool_p = 0; |
| |
| KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, |
| sizeof (p_rx_rbr_ring_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); |
| KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, |
| sizeof (p_rx_rcr_ring_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); |
| KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, |
| sizeof (p_rx_mbox_t) * rdc_max); |
| KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); |
| |
| nxgep->rx_rbr_rings = 0; |
| nxgep->rx_rcr_rings = 0; |
| nxgep->rx_mbox_areas_p = 0; |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); |
| } |
| |
| |
| static nxge_status_t |
| nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, |
| p_nxge_dma_common_t *dmap, |
| size_t alloc_size, size_t block_size, uint32_t *num_chunks) |
| { |
| p_nxge_dma_common_t rx_dmap; |
| nxge_status_t status = NXGE_OK; |
| size_t total_alloc_size; |
| size_t allocated = 0; |
| int i, size_index, array_size; |
| boolean_t use_kmem_alloc = B_FALSE; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); |
| |
| rx_dmap = (p_nxge_dma_common_t) |
| KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, |
| KM_SLEEP); |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", |
| dma_channel, alloc_size, block_size, dmap)); |
| |
| total_alloc_size = alloc_size; |
| |
| #if defined(RX_USE_RECLAIM_POST) |
| total_alloc_size = alloc_size + alloc_size/4; |
| #endif |
| |
| i = 0; |
| size_index = 0; |
| array_size = sizeof (alloc_sizes)/sizeof (size_t); |
| while ((size_index < array_size) && |
| (alloc_sizes[size_index] < alloc_size)) |
| size_index++; |
| if (size_index >= array_size) { |
| size_index = array_size - 1; |
| } |
| |
| /* For Neptune, use kmem_alloc if the kmem flag is set. */ |
| if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { |
| use_kmem_alloc = B_TRUE; |
| #if defined(__i386) || defined(__amd64) |
| size_index = 0; |
| #endif |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "==> nxge_alloc_rx_buf_dma: " |
| "Neptune use kmem_alloc() - size_index %d", |
| size_index)); |
| } |
| |
| while ((allocated < total_alloc_size) && |
| (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { |
| rx_dmap[i].dma_chunk_index = i; |
| rx_dmap[i].block_size = block_size; |
| rx_dmap[i].alength = alloc_sizes[size_index]; |
| rx_dmap[i].orig_alength = rx_dmap[i].alength; |
| rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; |
| rx_dmap[i].dma_channel = dma_channel; |
| rx_dmap[i].contig_alloc_type = B_FALSE; |
| rx_dmap[i].kmem_alloc_type = B_FALSE; |
| rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; |
| |
| /* |
| * N2/NIU: data buffers must be contiguous as the driver |
| * needs to call Hypervisor api to set up |
| * logical pages. |
| */ |
| if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { |
| rx_dmap[i].contig_alloc_type = B_TRUE; |
| rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; |
| } else if (use_kmem_alloc) { |
| /* For Neptune, use kmem_alloc */ |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "==> nxge_alloc_rx_buf_dma: " |
| "Neptune use kmem_alloc()")); |
| rx_dmap[i].kmem_alloc_type = B_TRUE; |
| rx_dmap[i].buf_alloc_type = KMEM_ALLOC; |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " |
| "i %d nblocks %d alength %d", |
| dma_channel, i, &rx_dmap[i], block_size, |
| i, rx_dmap[i].nblocks, |
| rx_dmap[i].alength)); |
| status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, |
| &nxge_rx_dma_attr, |
| rx_dmap[i].alength, |
| &nxge_dev_buf_dma_acc_attr, |
| DDI_DMA_READ | DDI_DMA_STREAMING, |
| (p_nxge_dma_common_t)(&rx_dmap[i])); |
| if (status != NXGE_OK) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "nxge_alloc_rx_buf_dma: Alloc Failed: " |
| "dma %d size_index %d size requested %d", |
| dma_channel, |
| size_index, |
| rx_dmap[i].alength)); |
| size_index--; |
| } else { |
| rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| " nxge_alloc_rx_buf_dma DONE alloc mem: " |
| "dma %d dma_buf_p $%p kaddrp $%p alength %d " |
| "buf_alloc_state %d alloc_type %d", |
| dma_channel, |
| &rx_dmap[i], |
| rx_dmap[i].kaddrp, |
| rx_dmap[i].alength, |
| rx_dmap[i].buf_alloc_state, |
| rx_dmap[i].buf_alloc_type)); |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| " alloc_rx_buf_dma allocated rdc %d " |
| "chunk %d size %x dvma %x bufp %llx kaddrp $%p", |
| dma_channel, i, rx_dmap[i].alength, |
| rx_dmap[i].ioaddr_pp, &rx_dmap[i], |
| rx_dmap[i].kaddrp)); |
| i++; |
| allocated += alloc_sizes[size_index]; |
| } |
| } |
| |
| if (allocated < total_alloc_size) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "==> nxge_alloc_rx_buf_dma: not enough for channel %d " |
| "allocated 0x%x requested 0x%x", |
| dma_channel, |
| allocated, total_alloc_size)); |
| status = NXGE_ERROR; |
| goto nxge_alloc_rx_mem_fail1; |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " |
| "allocated 0x%x requested 0x%x", |
| dma_channel, |
| allocated, total_alloc_size)); |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, |
| " alloc_rx_buf_dma rdc %d allocated %d chunks", |
| dma_channel, i)); |
| *num_chunks = i; |
| *dmap = rx_dmap; |
| |
| goto nxge_alloc_rx_mem_exit; |
| |
| nxge_alloc_rx_mem_fail1: |
| KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); |
| |
| nxge_alloc_rx_mem_exit: |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, |
| "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); |
| |
| return (status); |
| } |
| |
| /*ARGSUSED*/ |
| static void |
| nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, |
| uint32_t num_chunks) |
| { |
| int i; |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); |
| |
| if (dmap == 0) |
| return; |
| |
| for (i = 0; i < num_chunks; i++) { |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, |
| "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", |
| i, dmap)); |
| nxge_dma_free_rx_data_buf(dmap++); |
| } |
| |
| NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); |
| } |
| |
| /*ARGSUSED*/ |
| static nxge_status_t |
| nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, |
| p_nxge_dma_common_t *dmap, size_t size) |
| { |
| p_nxge_dma_common_t rx_dmap; |
| nxge_status_t status = NXGE_OK; |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); |
| |
| rx_dmap = (p_nxge_dma_common_t) |
| KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); |
| |
| rx_dmap->contig_alloc_type = B_FALSE; |
| rx_dmap->kmem_alloc_type = B_FALSE; |
| |
| status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, |
| &nxge_desc_dma_attr, |
| size, |
| &nxge_dev_desc_dma_acc_attr, |
| DDI_DMA_RDWR | DDI_DMA_CONSISTENT, |
| rx_dmap); |
| if (status != NXGE_OK) { |
| goto nxge_alloc_rx_cntl_dma_fail1; |
| } |
| |
| *dmap = rx_dmap; |
| goto nxge_alloc_rx_cntl_dma_exit; |
| |
| nxge_alloc_rx_cntl_dma_fail1: |
| KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); |
| |
| nxge_alloc_rx_cntl_dma_exit: |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, |
| "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); |
| |
| return (status); |
| } |
| |
| /*ARGSUSED*/ |
| static void |
| nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) |
| { |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); |
| |
| if (dmap == 0) |
| return; |
| |
| nxge_dma_mem_free(dmap); |
| |
| NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); |
| } |
| |
| typedef struct { |
| size_t tx_size; |
| size_t cr_size; |
| size_t threshhold; |
| } nxge_tdc_sizes_t; |
| |
| static |
| nxge_status_t |
| nxge_tdc_sizes( |
| nxge_t *nxgep, |
| nxge_tdc_sizes_t *sizes) |
| { |
| uint32_t threshhold; /* The bcopy() threshhold */ |
| size_t tx_size; /* Transmit buffer size */ |
| size_t cr_size; /* Completion ring size */ |
| |
| /* |
| * Assume that each DMA channel will be configured with the |
| * default transmit buffer size for copying transmit data. |
| * (If a packet is bigger than this, it will not be copied.) |
| */ |
| if (nxgep->niu_type == N2_NIU) { |
| threshhold = TX_BCOPY_SIZE; |
| } else { |
| threshhold = nxge_bcopy_thresh; |
| } |
| tx_size = nxge_tx_ring_size * threshhold; |
| |
| cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); |
| cr_size += sizeof (txdma_mailbox_t); |
| |
| #if defined(sun4v) && defined(NIU_LP_WORKAROUND) |
| if (nxgep->niu_type == N2_NIU) { |
| if (!ISP2(tx_size)) { |
| NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, |
| "==> nxge_tdc_sizes: Tx size" |
| " must be power of 2")); |
|