| /* |
| * This file is provided under a CDDLv1 license. When using or |
| * redistributing this file, you may do so under this license. |
| * In redistributing this file this license must be included |
| * and no other modification of this header file is permitted. |
| * |
| * CDDL LICENSE SUMMARY |
| * |
| * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. |
| * |
| * The contents of this file are subject to the terms of Version |
| * 1.0 of the Common Development and Distribution License (the "License"). |
| * |
| * You should have received a copy of the License with this software. |
| * You can obtain a copy of the License at |
| * http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| */ |
| |
| /* |
| * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. |
| */ |
| |
| /* |
| * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. |
| * Copyright 2013 Nexenta Systems, Inc. All rights reserved. |
| * Copyright (c) 2017, Joyent, Inc. |
| */ |
| |
| /* |
| * ********************************************************************** |
| * * |
| * Module Name: * |
| * e1000g_main.c * |
| * * |
| * Abstract: * |
| * This file contains the interface routines for the solaris OS. * |
| * It has all DDI entry point routines and GLD entry point routines. * |
| * * |
| * This file also contains routines that take care of initialization * |
| * uninit routine and interrupt routine. * |
| * * |
| * ********************************************************************** |
| */ |
| |
| #include <sys/dlpi.h> |
| #include <sys/mac.h> |
| #include "e1000g_sw.h" |
| #include "e1000g_debug.h" |
| |
| static char ident[] = "Intel PRO/1000 Ethernet"; |
| /* LINTED E_STATIC_UNUSED */ |
| static char e1000g_version[] = "Driver Ver. 5.3.24"; |
| |
| /* |
| * Proto types for DDI entry points |
| */ |
| static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); |
| static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); |
| static int e1000g_quiesce(dev_info_t *); |
| |
| /* |
| * init and intr routines prototype |
| */ |
| static int e1000g_resume(dev_info_t *); |
| static int e1000g_suspend(dev_info_t *); |
| static uint_t e1000g_intr_pciexpress(caddr_t); |
| static uint_t e1000g_intr(caddr_t); |
| static void e1000g_intr_work(struct e1000g *, uint32_t); |
| #pragma inline(e1000g_intr_work) |
| static int e1000g_init(struct e1000g *); |
| static int e1000g_start(struct e1000g *, boolean_t); |
| static void e1000g_stop(struct e1000g *, boolean_t); |
| static int e1000g_m_start(void *); |
| static void e1000g_m_stop(void *); |
| static int e1000g_m_promisc(void *, boolean_t); |
| static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); |
| static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); |
| static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); |
| static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, |
| uint_t, const void *); |
| static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, |
| uint_t, void *); |
| static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, |
| mac_prop_info_handle_t); |
| static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, |
| const void *); |
| static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); |
| static void e1000g_init_locks(struct e1000g *); |
| static void e1000g_destroy_locks(struct e1000g *); |
| static int e1000g_identify_hardware(struct e1000g *); |
| static int e1000g_regs_map(struct e1000g *); |
| static int e1000g_set_driver_params(struct e1000g *); |
| static void e1000g_set_bufsize(struct e1000g *); |
| static int e1000g_register_mac(struct e1000g *); |
| static boolean_t e1000g_rx_drain(struct e1000g *); |
| static boolean_t e1000g_tx_drain(struct e1000g *); |
| static void e1000g_init_unicst(struct e1000g *); |
| static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); |
| static int e1000g_alloc_rx_data(struct e1000g *); |
| static void e1000g_release_multicast(struct e1000g *); |
| static void e1000g_pch_limits(struct e1000g *); |
| static uint32_t e1000g_mtu2maxframe(uint32_t); |
| |
| /* |
| * Local routines |
| */ |
| static boolean_t e1000g_reset_adapter(struct e1000g *); |
| static void e1000g_tx_clean(struct e1000g *); |
| static void e1000g_rx_clean(struct e1000g *); |
| static void e1000g_link_timer(void *); |
| static void e1000g_local_timer(void *); |
| static boolean_t e1000g_link_check(struct e1000g *); |
| static boolean_t e1000g_stall_check(struct e1000g *); |
| static void e1000g_smartspeed(struct e1000g *); |
| static void e1000g_get_conf(struct e1000g *); |
| static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, |
| int *); |
| static void enable_watchdog_timer(struct e1000g *); |
| static void disable_watchdog_timer(struct e1000g *); |
| static void start_watchdog_timer(struct e1000g *); |
| static void restart_watchdog_timer(struct e1000g *); |
| static void stop_watchdog_timer(struct e1000g *); |
| static void stop_link_timer(struct e1000g *); |
| static void stop_82547_timer(e1000g_tx_ring_t *); |
| static void e1000g_force_speed_duplex(struct e1000g *); |
| static void e1000g_setup_max_mtu(struct e1000g *); |
| static void e1000g_get_max_frame_size(struct e1000g *); |
| static boolean_t is_valid_mac_addr(uint8_t *); |
| static void e1000g_unattach(dev_info_t *, struct e1000g *); |
| static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); |
| #ifdef E1000G_DEBUG |
| static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); |
| static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); |
| static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); |
| static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); |
| static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, |
| struct iocblk *, mblk_t *); |
| #endif |
| static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, |
| struct iocblk *, mblk_t *); |
| static boolean_t e1000g_check_loopback_support(struct e1000_hw *); |
| static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); |
| static void e1000g_set_internal_loopback(struct e1000g *); |
| static void e1000g_set_external_loopback_1000(struct e1000g *); |
| static void e1000g_set_external_loopback_100(struct e1000g *); |
| static void e1000g_set_external_loopback_10(struct e1000g *); |
| static int e1000g_add_intrs(struct e1000g *); |
| static int e1000g_intr_add(struct e1000g *, int); |
| static int e1000g_rem_intrs(struct e1000g *); |
| static int e1000g_enable_intrs(struct e1000g *); |
| static int e1000g_disable_intrs(struct e1000g *); |
| static boolean_t e1000g_link_up(struct e1000g *); |
| #ifdef __sparc |
| static boolean_t e1000g_find_mac_address(struct e1000g *); |
| #endif |
| static void e1000g_get_phy_state(struct e1000g *); |
| static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, |
| const void *impl_data); |
| static void e1000g_fm_init(struct e1000g *Adapter); |
| static void e1000g_fm_fini(struct e1000g *Adapter); |
| static void e1000g_param_sync(struct e1000g *); |
| static void e1000g_get_driver_control(struct e1000_hw *); |
| static void e1000g_release_driver_control(struct e1000_hw *); |
| static void e1000g_restore_promisc(struct e1000g *Adapter); |
| |
| char *e1000g_priv_props[] = { |
| "_tx_bcopy_threshold", |
| "_tx_interrupt_enable", |
| "_tx_intr_delay", |
| "_tx_intr_abs_delay", |
| "_rx_bcopy_threshold", |
| "_max_num_rcv_packets", |
| "_rx_intr_delay", |
| "_rx_intr_abs_delay", |
| "_intr_throttling_rate", |
| "_intr_adaptive", |
| "_adv_pause_cap", |
| "_adv_asym_pause_cap", |
| NULL |
| }; |
| |
| static struct cb_ops cb_ws_ops = { |
| nulldev, /* cb_open */ |
| nulldev, /* cb_close */ |
| nodev, /* cb_strategy */ |
| nodev, /* cb_print */ |
| nodev, /* cb_dump */ |
| nodev, /* cb_read */ |
| nodev, /* cb_write */ |
| nodev, /* cb_ioctl */ |
| nodev, /* cb_devmap */ |
| nodev, /* cb_mmap */ |
| nodev, /* cb_segmap */ |
| nochpoll, /* cb_chpoll */ |
| ddi_prop_op, /* cb_prop_op */ |
| NULL, /* cb_stream */ |
| D_MP | D_HOTPLUG, /* cb_flag */ |
| CB_REV, /* cb_rev */ |
| nodev, /* cb_aread */ |
| nodev /* cb_awrite */ |
| }; |
| |
| static struct dev_ops ws_ops = { |
| DEVO_REV, /* devo_rev */ |
| 0, /* devo_refcnt */ |
| NULL, /* devo_getinfo */ |
| nulldev, /* devo_identify */ |
| nulldev, /* devo_probe */ |
| e1000g_attach, /* devo_attach */ |
| e1000g_detach, /* devo_detach */ |
| nodev, /* devo_reset */ |
| &cb_ws_ops, /* devo_cb_ops */ |
| NULL, /* devo_bus_ops */ |
| ddi_power, /* devo_power */ |
| e1000g_quiesce /* devo_quiesce */ |
| }; |
| |
| static struct modldrv modldrv = { |
| &mod_driverops, /* Type of module. This one is a driver */ |
| ident, /* Discription string */ |
| &ws_ops, /* driver ops */ |
| }; |
| |
| static struct modlinkage modlinkage = { |
| MODREV_1, &modldrv, NULL |
| }; |
| |
| /* Access attributes for register mapping */ |
| static ddi_device_acc_attr_t e1000g_regs_acc_attr = { |
| DDI_DEVICE_ATTR_V1, |
| DDI_STRUCTURE_LE_ACC, |
| DDI_STRICTORDER_ACC, |
| DDI_FLAGERR_ACC |
| }; |
| |
| #define E1000G_M_CALLBACK_FLAGS \ |
| (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) |
| |
| static mac_callbacks_t e1000g_m_callbacks = { |
| E1000G_M_CALLBACK_FLAGS, |
| e1000g_m_stat, |
| e1000g_m_start, |
| e1000g_m_stop, |
| e1000g_m_promisc, |
| e1000g_m_multicst, |
| NULL, |
| e1000g_m_tx, |
| NULL, |
| e1000g_m_ioctl, |
| e1000g_m_getcapab, |
| NULL, |
| NULL, |
| e1000g_m_setprop, |
| e1000g_m_getprop, |
| e1000g_m_propinfo |
| }; |
| |
| /* |
| * Global variables |
| */ |
| uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; |
| uint32_t e1000g_mblks_pending = 0; |
| /* |
| * Workaround for Dynamic Reconfiguration support, for x86 platform only. |
| * Here we maintain a private dev_info list if e1000g_force_detach is |
| * enabled. If we force the driver to detach while there are still some |
| * rx buffers retained in the upper layer, we have to keep a copy of the |
| * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data |
| * structure will be freed after the driver is detached. However when we |
| * finally free those rx buffers released by the upper layer, we need to |
| * refer to the dev_info to free the dma buffers. So we save a copy of |
| * the dev_info for this purpose. On x86 platform, we assume this copy |
| * of dev_info is always valid, but on SPARC platform, it could be invalid |
| * after the system board level DR operation. For this reason, the global |
| * variable e1000g_force_detach must be B_FALSE on SPARC platform. |
| */ |
| #ifdef __sparc |
| boolean_t e1000g_force_detach = B_FALSE; |
| #else |
| boolean_t e1000g_force_detach = B_TRUE; |
| #endif |
| private_devi_list_t *e1000g_private_devi_list = NULL; |
| |
| /* |
| * The mutex e1000g_rx_detach_lock is defined to protect the processing of |
| * the private dev_info list, and to serialize the processing of rx buffer |
| * freeing and rx buffer recycling. |
| */ |
| kmutex_t e1000g_rx_detach_lock; |
| /* |
| * The rwlock e1000g_dma_type_lock is defined to protect the global flag |
| * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". |
| * If there are many e1000g instances, the system may run out of DVMA |
| * resources during the initialization of the instances, then the flag will |
| * be changed to "USE_DMA". Because different e1000g instances are initialized |
| * in parallel, we need to use this lock to protect the flag. |
| */ |
| krwlock_t e1000g_dma_type_lock; |
| |
| /* |
| * The 82546 chipset is a dual-port device, both the ports share one eeprom. |
| * Based on the information from Intel, the 82546 chipset has some hardware |
| * problem. When one port is being reset and the other port is trying to |
| * access the eeprom, it could cause system hang or panic. To workaround this |
| * hardware problem, we use a global mutex to prevent such operations from |
| * happening simultaneously on different instances. This workaround is applied |
| * to all the devices supported by this driver. |
| */ |
| kmutex_t e1000g_nvm_lock; |
| |
| /* |
| * Loadable module configuration entry points for the driver |
| */ |
| |
| /* |
| * _init - module initialization |
| */ |
| int |
| _init(void) |
| { |
| int status; |
| |
| mac_init_ops(&ws_ops, WSNAME); |
| status = mod_install(&modlinkage); |
| if (status != DDI_SUCCESS) |
| mac_fini_ops(&ws_ops); |
| else { |
| mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); |
| rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); |
| mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); |
| } |
| |
| return (status); |
| } |
| |
| /* |
| * _fini - module finalization |
| */ |
| int |
| _fini(void) |
| { |
| int status; |
| |
| if (e1000g_mblks_pending != 0) |
| return (EBUSY); |
| |
| status = mod_remove(&modlinkage); |
| if (status == DDI_SUCCESS) { |
| mac_fini_ops(&ws_ops); |
| |
| if (e1000g_force_detach) { |
| private_devi_list_t *devi_node; |
| |
| mutex_enter(&e1000g_rx_detach_lock); |
| while (e1000g_private_devi_list != NULL) { |
| devi_node = e1000g_private_devi_list; |
| e1000g_private_devi_list = |
| e1000g_private_devi_list->next; |
| |
| kmem_free(devi_node->priv_dip, |
| sizeof (struct dev_info)); |
| kmem_free(devi_node, |
| sizeof (private_devi_list_t)); |
| } |
| mutex_exit(&e1000g_rx_detach_lock); |
| } |
| |
| mutex_destroy(&e1000g_rx_detach_lock); |
| rw_destroy(&e1000g_dma_type_lock); |
| mutex_destroy(&e1000g_nvm_lock); |
| } |
| |
| return (status); |
| } |
| |
| /* |
| * _info - module information |
| */ |
| int |
| _info(struct modinfo *modinfop) |
| { |
| return (mod_info(&modlinkage, modinfop)); |
| } |
| |
| /* |
| * e1000g_attach - driver attach |
| * |
| * This function is the device-specific initialization entry |
| * point. This entry point is required and must be written. |
| * The DDI_ATTACH command must be provided in the attach entry |
| * point. When attach() is called with cmd set to DDI_ATTACH, |
| * all normal kernel services (such as kmem_alloc(9F)) are |
| * available for use by the driver. |
| * |
| * The attach() function will be called once for each instance |
| * of the device on the system with cmd set to DDI_ATTACH. |
| * Until attach() succeeds, the only driver entry points which |
| * may be called are open(9E) and getinfo(9E). |
| */ |
| static int |
| e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) |
| { |
| struct e1000g *Adapter; |
| struct e1000_hw *hw; |
| struct e1000g_osdep *osdep; |
| int instance; |
| |
| switch (cmd) { |
| default: |
| e1000g_log(NULL, CE_WARN, |
| "Unsupported command send to e1000g_attach... "); |
| return (DDI_FAILURE); |
| |
| case DDI_RESUME: |
| return (e1000g_resume(devinfo)); |
| |
| case DDI_ATTACH: |
| break; |
| } |
| |
| /* |
| * get device instance number |
| */ |
| instance = ddi_get_instance(devinfo); |
| |
| /* |
| * Allocate soft data structure |
| */ |
| Adapter = |
| (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); |
| |
| Adapter->dip = devinfo; |
| Adapter->instance = instance; |
| Adapter->tx_ring->adapter = Adapter; |
| Adapter->rx_ring->adapter = Adapter; |
| |
| hw = &Adapter->shared; |
| osdep = &Adapter->osdep; |
| hw->back = osdep; |
| osdep->adapter = Adapter; |
| |
| ddi_set_driver_private(devinfo, (caddr_t)Adapter); |
| |
| /* |
| * Initialize for fma support |
| */ |
| (void) e1000g_get_prop(Adapter, "fm-capable", |
| 0, 0x0f, |
| DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | |
| DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, |
| &Adapter->fm_capabilities); |
| e1000g_fm_init(Adapter); |
| Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; |
| |
| /* |
| * PCI Configure |
| */ |
| if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; |
| |
| /* |
| * Setup hardware |
| */ |
| if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); |
| goto attach_fail; |
| } |
| |
| /* |
| * Map in the device registers. |
| */ |
| if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; |
| |
| /* |
| * Initialize driver parameters |
| */ |
| if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| goto attach_fail; |
| } |
| |
| /* |
| * Disable ULP support |
| */ |
| (void) e1000_disable_ulp_lpt_lp(hw, TRUE); |
| |
| /* |
| * Initialize interrupts |
| */ |
| if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; |
| |
| /* |
| * Initialize mutex's for this device. |
| * Do this before enabling the interrupt handler and |
| * register the softint to avoid the condition where |
| * interrupt handler can try using uninitialized mutex |
| */ |
| e1000g_init_locks(Adapter); |
| Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; |
| |
| /* |
| * Initialize Driver Counters |
| */ |
| if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Init stats failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; |
| |
| /* |
| * Initialize chip hardware and software structures |
| */ |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| if (e1000g_init(Adapter) != DDI_SUCCESS) { |
| rw_exit(&Adapter->chip_lock); |
| e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); |
| goto attach_fail; |
| } |
| rw_exit(&Adapter->chip_lock); |
| Adapter->attach_progress |= ATTACH_PROGRESS_INIT; |
| |
| /* |
| * Register the driver to the MAC |
| */ |
| if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Register MAC failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_MAC; |
| |
| /* |
| * Now that mutex locks are initialized, and the chip is also |
| * initialized, enable interrupts. |
| */ |
| if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); |
| goto attach_fail; |
| } |
| Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; |
| |
| /* |
| * If e1000g_force_detach is enabled, in global private dip list, |
| * we will create a new entry, which maintains the priv_dip for DR |
| * supports after driver detached. |
| */ |
| if (e1000g_force_detach) { |
| private_devi_list_t *devi_node; |
| |
| Adapter->priv_dip = |
| kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); |
| bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), |
| sizeof (struct dev_info)); |
| |
| devi_node = |
| kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); |
| |
| mutex_enter(&e1000g_rx_detach_lock); |
| devi_node->priv_dip = Adapter->priv_dip; |
| devi_node->flag = E1000G_PRIV_DEVI_ATTACH; |
| devi_node->pending_rx_count = 0; |
| |
| Adapter->priv_devi_node = devi_node; |
| |
| if (e1000g_private_devi_list == NULL) { |
| devi_node->prev = NULL; |
| devi_node->next = NULL; |
| e1000g_private_devi_list = devi_node; |
| } else { |
| devi_node->prev = NULL; |
| devi_node->next = e1000g_private_devi_list; |
| e1000g_private_devi_list->prev = devi_node; |
| e1000g_private_devi_list = devi_node; |
| } |
| mutex_exit(&e1000g_rx_detach_lock); |
| } |
| |
| Adapter->e1000g_state = E1000G_INITIALIZED; |
| return (DDI_SUCCESS); |
| |
| attach_fail: |
| e1000g_unattach(devinfo, Adapter); |
| return (DDI_FAILURE); |
| } |
| |
| static int |
| e1000g_register_mac(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| mac_register_t *mac; |
| int err; |
| |
| if ((mac = mac_alloc(MAC_VERSION)) == NULL) |
| return (DDI_FAILURE); |
| |
| mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; |
| mac->m_driver = Adapter; |
| mac->m_dip = Adapter->dip; |
| mac->m_src_addr = hw->mac.addr; |
| mac->m_callbacks = &e1000g_m_callbacks; |
| mac->m_min_sdu = 0; |
| mac->m_max_sdu = Adapter->default_mtu; |
| mac->m_margin = VLAN_TAGSZ; |
| mac->m_priv_props = e1000g_priv_props; |
| mac->m_v12n = MAC_VIRT_LEVEL1; |
| |
| err = mac_register(mac, &Adapter->mh); |
| mac_free(mac); |
| |
| return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); |
| } |
| |
| static int |
| e1000g_identify_hardware(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| struct e1000g_osdep *osdep = &Adapter->osdep; |
| |
| /* Get the device id */ |
| hw->vendor_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); |
| hw->device_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); |
| hw->revision_id = |
| pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); |
| hw->subsystem_device_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); |
| hw->subsystem_vendor_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); |
| |
| if (e1000_set_mac_type(hw) != E1000_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, |
| "MAC type could not be set properly."); |
| return (DDI_FAILURE); |
| } |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static int |
| e1000g_regs_map(struct e1000g *Adapter) |
| { |
| dev_info_t *devinfo = Adapter->dip; |
| struct e1000_hw *hw = &Adapter->shared; |
| struct e1000g_osdep *osdep = &Adapter->osdep; |
| off_t mem_size; |
| bar_info_t bar_info; |
| int offset, rnumber; |
| |
| rnumber = ADAPTER_REG_SET; |
| /* Get size of adapter register memory */ |
| if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != |
| DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_dev_regsize for registers failed"); |
| return (DDI_FAILURE); |
| } |
| |
| /* Map adapter register memory */ |
| if ((ddi_regs_map_setup(devinfo, rnumber, |
| (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, |
| &osdep->reg_handle)) != DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_regs_map_setup for registers failed"); |
| goto regs_map_fail; |
| } |
| |
| /* ICH needs to map flash memory */ |
| switch (hw->mac.type) { |
| case e1000_ich8lan: |
| case e1000_ich9lan: |
| case e1000_ich10lan: |
| case e1000_pchlan: |
| case e1000_pch2lan: |
| case e1000_pch_lpt: |
| rnumber = ICH_FLASH_REG_SET; |
| |
| /* get flash size */ |
| if (ddi_dev_regsize(devinfo, rnumber, |
| &mem_size) != DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_dev_regsize for ICH flash failed"); |
| goto regs_map_fail; |
| } |
| |
| /* map flash in */ |
| if (ddi_regs_map_setup(devinfo, rnumber, |
| (caddr_t *)&hw->flash_address, 0, |
| mem_size, &e1000g_regs_acc_attr, |
| &osdep->ich_flash_handle) != DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_regs_map_setup for ICH flash failed"); |
| goto regs_map_fail; |
| } |
| break; |
| case e1000_pch_spt: |
| /* |
| * On the SPT, the device flash is actually in BAR0, not a |
| * separate BAR. Therefore we end up setting the |
| * ich_flash_handle to be the same as the register handle. |
| * We mark the same to reduce the confusion in the other |
| * functions and macros. Though this does make the set up and |
| * tear-down path slightly more complicated. |
| */ |
| osdep->ich_flash_handle = osdep->reg_handle; |
| hw->flash_address = hw->hw_addr; |
| default: |
| break; |
| } |
| |
| /* map io space */ |
| switch (hw->mac.type) { |
| case e1000_82544: |
| case e1000_82540: |
| case e1000_82545: |
| case e1000_82546: |
| case e1000_82541: |
| case e1000_82541_rev_2: |
| /* find the IO bar */ |
| rnumber = -1; |
| for (offset = PCI_CONF_BASE1; |
| offset <= PCI_CONF_BASE5; offset += 4) { |
| if (e1000g_get_bar_info(devinfo, offset, &bar_info) |
| != DDI_SUCCESS) |
| continue; |
| if (bar_info.type == E1000G_BAR_IO) { |
| rnumber = bar_info.rnumber; |
| break; |
| } |
| } |
| |
| if (rnumber < 0) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "No io space is found"); |
| goto regs_map_fail; |
| } |
| |
| /* get io space size */ |
| if (ddi_dev_regsize(devinfo, rnumber, |
| &mem_size) != DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_dev_regsize for io space failed"); |
| goto regs_map_fail; |
| } |
| |
| /* map io space */ |
| if ((ddi_regs_map_setup(devinfo, rnumber, |
| (caddr_t *)&hw->io_base, 0, mem_size, |
| &e1000g_regs_acc_attr, |
| &osdep->io_reg_handle)) != DDI_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "ddi_regs_map_setup for io space failed"); |
| goto regs_map_fail; |
| } |
| break; |
| default: |
| hw->io_base = 0; |
| break; |
| } |
| |
| return (DDI_SUCCESS); |
| |
| regs_map_fail: |
| if (osdep->reg_handle != NULL) |
| ddi_regs_map_free(&osdep->reg_handle); |
| if (osdep->ich_flash_handle != NULL && hw->mac.type != e1000_pch_spt) |
| ddi_regs_map_free(&osdep->ich_flash_handle); |
| return (DDI_FAILURE); |
| } |
| |
| static int |
| e1000g_set_driver_params(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw; |
| |
| hw = &Adapter->shared; |
| |
| /* Set MAC type and initialize hardware functions */ |
| if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "Could not setup hardware functions"); |
| return (DDI_FAILURE); |
| } |
| |
| /* Get bus information */ |
| if (e1000_get_bus_info(hw) != E1000_SUCCESS) { |
| E1000G_DEBUGLOG_0(Adapter, CE_WARN, |
| "Could not get bus information"); |
| return (DDI_FAILURE); |
| } |
| |
| e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); |
| |
| hw->mac.autoneg_failed = B_TRUE; |
| |
| /* Set the autoneg_wait_to_complete flag to B_FALSE */ |
| hw->phy.autoneg_wait_to_complete = B_FALSE; |
| |
| /* Adaptive IFS related changes */ |
| hw->mac.adaptive_ifs = B_TRUE; |
| |
| /* Enable phy init script for IGP phy of 82541/82547 */ |
| if ((hw->mac.type == e1000_82547) || |
| (hw->mac.type == e1000_82541) || |
| (hw->mac.type == e1000_82547_rev_2) || |
| (hw->mac.type == e1000_82541_rev_2)) |
| e1000_init_script_state_82541(hw, B_TRUE); |
| |
| /* Enable the TTL workaround for 82541/82547 */ |
| e1000_set_ttl_workaround_state_82541(hw, B_TRUE); |
| |
| #ifdef __sparc |
| Adapter->strip_crc = B_TRUE; |
| #else |
| Adapter->strip_crc = B_FALSE; |
| #endif |
| |
| /* setup the maximum MTU size of the chip */ |
| e1000g_setup_max_mtu(Adapter); |
| |
| /* Get speed/duplex settings in conf file */ |
| hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; |
| hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
| e1000g_force_speed_duplex(Adapter); |
| |
| /* Get Jumbo Frames settings in conf file */ |
| e1000g_get_max_frame_size(Adapter); |
| |
| /* Get conf file properties */ |
| e1000g_get_conf(Adapter); |
| |
| /* enforce PCH limits */ |
| e1000g_pch_limits(Adapter); |
| |
| /* Set Rx/Tx buffer size */ |
| e1000g_set_bufsize(Adapter); |
| |
| /* Master Latency Timer */ |
| Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; |
| |
| /* copper options */ |
| if (hw->phy.media_type == e1000_media_type_copper) { |
| hw->phy.mdix = 0; /* AUTO_ALL_MODES */ |
| hw->phy.disable_polarity_correction = B_FALSE; |
| hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ |
| } |
| |
| /* The initial link state should be "unknown" */ |
| Adapter->link_state = LINK_STATE_UNKNOWN; |
| |
| /* Initialize rx parameters */ |
| Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; |
| Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; |
| |
| /* Initialize tx parameters */ |
| Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; |
| Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; |
| Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; |
| Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; |
| |
| /* Initialize rx parameters */ |
| Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static void |
| e1000g_setup_max_mtu(struct e1000g *Adapter) |
| { |
| struct e1000_mac_info *mac = &Adapter->shared.mac; |
| struct e1000_phy_info *phy = &Adapter->shared.phy; |
| |
| switch (mac->type) { |
| /* types that do not support jumbo frames */ |
| case e1000_ich8lan: |
| case e1000_82573: |
| case e1000_82583: |
| Adapter->max_mtu = ETHERMTU; |
| break; |
| /* ich9 supports jumbo frames except on one phy type */ |
| case e1000_ich9lan: |
| if (phy->type == e1000_phy_ife) |
| Adapter->max_mtu = ETHERMTU; |
| else |
| Adapter->max_mtu = MAXIMUM_MTU_9K; |
| break; |
| /* pch can do jumbo frames up to 4K */ |
| case e1000_pchlan: |
| Adapter->max_mtu = MAXIMUM_MTU_4K; |
| break; |
| /* pch2 can do jumbo frames up to 9K */ |
| case e1000_pch2lan: |
| case e1000_pch_lpt: |
| case e1000_pch_spt: |
| Adapter->max_mtu = MAXIMUM_MTU_9K; |
| break; |
| /* types with a special limit */ |
| case e1000_82571: |
| case e1000_82572: |
| case e1000_82574: |
| case e1000_80003es2lan: |
| case e1000_ich10lan: |
| if (e1000g_jumbo_mtu >= ETHERMTU && |
| e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { |
| Adapter->max_mtu = e1000g_jumbo_mtu; |
| } else { |
| Adapter->max_mtu = MAXIMUM_MTU_9K; |
| } |
| break; |
| /* default limit is 16K */ |
| default: |
| Adapter->max_mtu = FRAME_SIZE_UPTO_16K - |
| sizeof (struct ether_vlan_header) - ETHERFCSL; |
| break; |
| } |
| } |
| |
| static void |
| e1000g_set_bufsize(struct e1000g *Adapter) |
| { |
| struct e1000_mac_info *mac = &Adapter->shared.mac; |
| uint64_t rx_size; |
| uint64_t tx_size; |
| |
| dev_info_t *devinfo = Adapter->dip; |
| #ifdef __sparc |
| ulong_t iommu_pagesize; |
| #endif |
| /* Get the system page size */ |
| Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); |
| |
| #ifdef __sparc |
| iommu_pagesize = dvma_pagesize(devinfo); |
| if (iommu_pagesize != 0) { |
| if (Adapter->sys_page_sz == iommu_pagesize) { |
| if (iommu_pagesize > 0x4000) |
| Adapter->sys_page_sz = 0x4000; |
| } else { |
| if (Adapter->sys_page_sz > iommu_pagesize) |
| Adapter->sys_page_sz = iommu_pagesize; |
| } |
| } |
| if (Adapter->lso_enable) { |
| Adapter->dvma_page_num = E1000_LSO_MAXLEN / |
| Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; |
| } else { |
| Adapter->dvma_page_num = Adapter->max_frame_size / |
| Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; |
| } |
| ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); |
| #endif |
| |
| Adapter->min_frame_size = ETHERMIN + ETHERFCSL; |
| |
| if (Adapter->mem_workaround_82546 && |
| ((mac->type == e1000_82545) || |
| (mac->type == e1000_82546) || |
| (mac->type == e1000_82546_rev_3))) { |
| Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; |
| } else { |
| rx_size = Adapter->max_frame_size; |
| if ((rx_size > FRAME_SIZE_UPTO_2K) && |
| (rx_size <= FRAME_SIZE_UPTO_4K)) |
| Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; |
| else if ((rx_size > FRAME_SIZE_UPTO_4K) && |
| (rx_size <= FRAME_SIZE_UPTO_8K)) |
| Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; |
| else if ((rx_size > FRAME_SIZE_UPTO_8K) && |
| (rx_size <= FRAME_SIZE_UPTO_16K)) |
| Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; |
| else |
| Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; |
| } |
| Adapter->rx_buffer_size += E1000G_IPALIGNROOM; |
| |
| tx_size = Adapter->max_frame_size; |
| if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) |
| Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; |
| else if ((tx_size > FRAME_SIZE_UPTO_4K) && |
| (tx_size <= FRAME_SIZE_UPTO_8K)) |
| Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; |
| else if ((tx_size > FRAME_SIZE_UPTO_8K) && |
| (tx_size <= FRAME_SIZE_UPTO_16K)) |
| Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; |
| else |
| Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; |
| |
| /* |
| * For Wiseman adapters we have an requirement of having receive |
| * buffers aligned at 256 byte boundary. Since Livengood does not |
| * require this and forcing it for all hardwares will have |
| * performance implications, I am making it applicable only for |
| * Wiseman and for Jumbo frames enabled mode as rest of the time, |
| * it is okay to have normal frames...but it does involve a |
| * potential risk where we may loose data if buffer is not |
| * aligned...so all wiseman boards to have 256 byte aligned |
| * buffers |
| */ |
| if (mac->type < e1000_82543) |
| Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; |
| else |
| Adapter->rx_buf_align = 1; |
| } |
| |
| /* |
| * e1000g_detach - driver detach |
| * |
| * The detach() function is the complement of the attach routine. |
| * If cmd is set to DDI_DETACH, detach() is used to remove the |
| * state associated with a given instance of a device node |
| * prior to the removal of that instance from the system. |
| * |
| * The detach() function will be called once for each instance |
| * of the device for which there has been a successful attach() |
| * once there are no longer any opens on the device. |
| * |
| * Interrupts routine are disabled, All memory allocated by this |
| * driver are freed. |
| */ |
| static int |
| e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) |
| { |
| struct e1000g *Adapter; |
| boolean_t rx_drain; |
| |
| switch (cmd) { |
| default: |
| return (DDI_FAILURE); |
| |
| case DDI_SUSPEND: |
| return (e1000g_suspend(devinfo)); |
| |
| case DDI_DETACH: |
| break; |
| } |
| |
| Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); |
| if (Adapter == NULL) |
| return (DDI_FAILURE); |
| |
| rx_drain = e1000g_rx_drain(Adapter); |
| if (!rx_drain && !e1000g_force_detach) |
| return (DDI_FAILURE); |
| |
| if (mac_unregister(Adapter->mh) != 0) { |
| e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); |
| return (DDI_FAILURE); |
| } |
| Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; |
| |
| ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); |
| |
| if (!e1000g_force_detach && !rx_drain) |
| return (DDI_FAILURE); |
| |
| e1000g_unattach(devinfo, Adapter); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| /* |
| * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance |
| */ |
| void |
| e1000g_free_priv_devi_node(private_devi_list_t *devi_node) |
| { |
| ASSERT(e1000g_private_devi_list != NULL); |
| ASSERT(devi_node != NULL); |
| |
| if (devi_node->prev != NULL) |
| devi_node->prev->next = devi_node->next; |
| if (devi_node->next != NULL) |
| devi_node->next->prev = devi_node->prev; |
| if (devi_node == e1000g_private_devi_list) |
| e1000g_private_devi_list = devi_node->next; |
| |
| kmem_free(devi_node->priv_dip, |
| sizeof (struct dev_info)); |
| kmem_free(devi_node, |
| sizeof (private_devi_list_t)); |
| } |
| |
| static void |
| e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) |
| { |
| private_devi_list_t *devi_node; |
| int result; |
| |
| if (Adapter->e1000g_blink != NULL) { |
| ddi_periodic_delete(Adapter->e1000g_blink); |
| Adapter->e1000g_blink = NULL; |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { |
| (void) e1000g_disable_intrs(Adapter); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { |
| (void) mac_unregister(Adapter->mh); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { |
| (void) e1000g_rem_intrs(Adapter); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { |
| (void) ddi_prop_remove_all(devinfo); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { |
| kstat_delete((kstat_t *)Adapter->e1000g_ksp); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { |
| stop_link_timer(Adapter); |
| |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_reset_hw(&Adapter->shared); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result != E1000_SUCCESS) { |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| } |
| } |
| |
| e1000g_release_multicast(Adapter); |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { |
| if (Adapter->osdep.reg_handle != NULL) |
| ddi_regs_map_free(&Adapter->osdep.reg_handle); |
| if (Adapter->osdep.ich_flash_handle != NULL && |
| Adapter->shared.mac.type != e1000_pch_spt) |
| ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); |
| if (Adapter->osdep.io_reg_handle != NULL) |
| ddi_regs_map_free(&Adapter->osdep.io_reg_handle); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { |
| if (Adapter->osdep.cfg_handle != NULL) |
| pci_config_teardown(&Adapter->osdep.cfg_handle); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { |
| e1000g_destroy_locks(Adapter); |
| } |
| |
| if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { |
| e1000g_fm_fini(Adapter); |
| } |
| |
| mutex_enter(&e1000g_rx_detach_lock); |
| if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { |
| devi_node = Adapter->priv_devi_node; |
| devi_node->flag |= E1000G_PRIV_DEVI_DETACH; |
| |
| if (devi_node->pending_rx_count == 0) { |
| e1000g_free_priv_devi_node(devi_node); |
| } |
| } |
| mutex_exit(&e1000g_rx_detach_lock); |
| |
| kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); |
| |
| /* |
| * Another hotplug spec requirement, |
| * run ddi_set_driver_private(devinfo, null); |
| */ |
| ddi_set_driver_private(devinfo, NULL); |
| } |
| |
| /* |
| * Get the BAR type and rnumber for a given PCI BAR offset |
| */ |
| static int |
| e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) |
| { |
| pci_regspec_t *regs; |
| uint_t regs_length; |
| int type, rnumber, rcount; |
| |
| ASSERT((bar_offset >= PCI_CONF_BASE0) && |
| (bar_offset <= PCI_CONF_BASE5)); |
| |
| /* |
| * Get the DDI "reg" property |
| */ |
| if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, |
| DDI_PROP_DONTPASS, "reg", (int **)®s, |
| ®s_length) != DDI_PROP_SUCCESS) { |
| return (DDI_FAILURE); |
| } |
| |
| rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); |
| /* |
| * Check the BAR offset |
| */ |
| for (rnumber = 0; rnumber < rcount; ++rnumber) { |
| if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { |
| type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; |
| break; |
| } |
| } |
| |
| ddi_prop_free(regs); |
| |
| if (rnumber >= rcount) |
| return (DDI_FAILURE); |
| |
| switch (type) { |
| case PCI_ADDR_CONFIG: |
| bar_info->type = E1000G_BAR_CONFIG; |
| break; |
| case PCI_ADDR_IO: |
| bar_info->type = E1000G_BAR_IO; |
| break; |
| case PCI_ADDR_MEM32: |
| bar_info->type = E1000G_BAR_MEM32; |
| break; |
| case PCI_ADDR_MEM64: |
| bar_info->type = E1000G_BAR_MEM64; |
| break; |
| default: |
| return (DDI_FAILURE); |
| } |
| bar_info->rnumber = rnumber; |
| return (DDI_SUCCESS); |
| } |
| |
| static void |
| e1000g_init_locks(struct e1000g *Adapter) |
| { |
| e1000g_tx_ring_t *tx_ring; |
| e1000g_rx_ring_t *rx_ring; |
| |
| rw_init(&Adapter->chip_lock, NULL, |
| RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| mutex_init(&Adapter->link_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| mutex_init(&Adapter->watchdog_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| |
| tx_ring = Adapter->tx_ring; |
| |
| mutex_init(&tx_ring->tx_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| mutex_init(&tx_ring->usedlist_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| mutex_init(&tx_ring->freelist_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| |
| rx_ring = Adapter->rx_ring; |
| |
| mutex_init(&rx_ring->rx_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| |
| mutex_init(&Adapter->e1000g_led_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| } |
| |
| static void |
| e1000g_destroy_locks(struct e1000g *Adapter) |
| { |
| e1000g_tx_ring_t *tx_ring; |
| e1000g_rx_ring_t *rx_ring; |
| |
| mutex_destroy(&Adapter->e1000g_led_lock); |
| |
| tx_ring = Adapter->tx_ring; |
| mutex_destroy(&tx_ring->tx_lock); |
| mutex_destroy(&tx_ring->usedlist_lock); |
| mutex_destroy(&tx_ring->freelist_lock); |
| |
| rx_ring = Adapter->rx_ring; |
| mutex_destroy(&rx_ring->rx_lock); |
| |
| mutex_destroy(&Adapter->link_lock); |
| mutex_destroy(&Adapter->watchdog_lock); |
| rw_destroy(&Adapter->chip_lock); |
| |
| /* destory mutex initialized in shared code */ |
| e1000_destroy_hw_mutex(&Adapter->shared); |
| } |
| |
| static int |
| e1000g_resume(dev_info_t *devinfo) |
| { |
| struct e1000g *Adapter; |
| |
| Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); |
| if (Adapter == NULL) |
| e1000g_log(Adapter, CE_PANIC, |
| "Instance pointer is null\n"); |
| |
| if (Adapter->dip != devinfo) |
| e1000g_log(Adapter, CE_PANIC, |
| "Devinfo is not the same as saved devinfo\n"); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->e1000g_state & E1000G_STARTED) { |
| if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { |
| rw_exit(&Adapter->chip_lock); |
| /* |
| * We note the failure, but return success, as the |
| * system is still usable without this controller. |
| */ |
| e1000g_log(Adapter, CE_WARN, |
| "e1000g_resume: failed to restart controller\n"); |
| return (DDI_SUCCESS); |
| } |
| /* Enable and start the watchdog timer */ |
| enable_watchdog_timer(Adapter); |
| } |
| |
| Adapter->e1000g_state &= ~E1000G_SUSPENDED; |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static int |
| e1000g_suspend(dev_info_t *devinfo) |
| { |
| struct e1000g *Adapter; |
| |
| Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); |
| if (Adapter == NULL) |
| return (DDI_FAILURE); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| Adapter->e1000g_state |= E1000G_SUSPENDED; |
| |
| /* if the port isn't plumbed, we can simply return */ |
| if (!(Adapter->e1000g_state & E1000G_STARTED)) { |
| rw_exit(&Adapter->chip_lock); |
| return (DDI_SUCCESS); |
| } |
| |
| e1000g_stop(Adapter, B_FALSE); |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| /* Disable and stop all the timers */ |
| disable_watchdog_timer(Adapter); |
| stop_link_timer(Adapter); |
| stop_82547_timer(Adapter->tx_ring); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static int |
| e1000g_init(struct e1000g *Adapter) |
| { |
| uint32_t pba; |
| uint32_t high_water; |
| struct e1000_hw *hw; |
| clock_t link_timeout; |
| int result; |
| |
| hw = &Adapter->shared; |
| |
| /* |
| * reset to put the hardware in a known state |
| * before we try to do anything with the eeprom |
| */ |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_reset_hw(hw); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result != E1000_SUCCESS) { |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_validate_nvm_checksum(hw); |
| if (result < E1000_SUCCESS) { |
| /* |
| * Some PCI-E parts fail the first check due to |
| * the link being in sleep state. Call it again, |
| * if it fails a second time its a real issue. |
| */ |
| result = e1000_validate_nvm_checksum(hw); |
| } |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result < E1000_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, |
| "Invalid NVM checksum. Please contact " |
| "the vendor to update the NVM."); |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| result = 0; |
| #ifdef __sparc |
| /* |
| * First, we try to get the local ethernet address from OBP. If |
| * failed, then we get it from the EEPROM of NIC card. |
| */ |
| result = e1000g_find_mac_address(Adapter); |
| #endif |
| /* Get the local ethernet address. */ |
| if (!result) { |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_read_mac_addr(hw); |
| mutex_exit(&e1000g_nvm_lock); |
| } |
| |
| if (result < E1000_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* check for valid mac address */ |
| if (!is_valid_mac_addr(hw->mac.addr)) { |
| e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* Set LAA state for 82571 chipset */ |
| e1000_set_laa_state_82571(hw, B_TRUE); |
| |
| /* Master Latency Timer implementation */ |
| if (Adapter->master_latency_timer) { |
| pci_config_put8(Adapter->osdep.cfg_handle, |
| PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); |
| } |
| |
| if (hw->mac.type < e1000_82547) { |
| /* |
| * Total FIFO is 64K |
| */ |
| if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) |
| pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ |
| else |
| pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ |
| } else if ((hw->mac.type == e1000_82571) || |
| (hw->mac.type == e1000_82572) || |
| (hw->mac.type == e1000_80003es2lan)) { |
| /* |
| * Total FIFO is 48K |
| */ |
| if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) |
| pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ |
| else |
| pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ |
| } else if (hw->mac.type == e1000_82573) { |
| pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ |
| } else if (hw->mac.type == e1000_82574) { |
| /* Keep adapter default: 20K for Rx, 20K for Tx */ |
| pba = E1000_READ_REG(hw, E1000_PBA); |
| } else if (hw->mac.type == e1000_ich8lan) { |
| pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ |
| } else if (hw->mac.type == e1000_ich9lan) { |
| pba = E1000_PBA_10K; |
| } else if (hw->mac.type == e1000_ich10lan) { |
| pba = E1000_PBA_10K; |
| } else if (hw->mac.type == e1000_pchlan) { |
| pba = E1000_PBA_26K; |
| } else if (hw->mac.type == e1000_pch2lan) { |
| pba = E1000_PBA_26K; |
| } else if (hw->mac.type == e1000_pch_lpt) { |
| pba = E1000_PBA_26K; |
| } else if (hw->mac.type == e1000_pch_spt) { |
| pba = E1000_PBA_26K; |
| } else { |
| /* |
| * Total FIFO is 40K |
| */ |
| if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) |
| pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ |
| else |
| pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ |
| } |
| E1000_WRITE_REG(hw, E1000_PBA, pba); |
| |
| /* |
| * These parameters set thresholds for the adapter's generation(Tx) |
| * and response(Rx) to Ethernet PAUSE frames. These are just threshold |
| * settings. Flow control is enabled or disabled in the configuration |
| * file. |
| * High-water mark is set down from the top of the rx fifo (not |
| * sensitive to max_frame_size) and low-water is set just below |
| * high-water mark. |
| * The high water mark must be low enough to fit one full frame above |
| * it in the rx FIFO. Should be the lower of: |
| * 90% of the Rx FIFO size and the full Rx FIFO size minus the early |
| * receive size (assuming ERT set to E1000_ERT_2048), or the full |
| * Rx FIFO size minus one full frame. |
| */ |
| high_water = min(((pba << 10) * 9 / 10), |
| ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || |
| hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? |
| ((pba << 10) - (E1000_ERT_2048 << 3)) : |
| ((pba << 10) - Adapter->max_frame_size))); |
| |
| hw->fc.high_water = high_water & 0xFFF8; |
| hw->fc.low_water = hw->fc.high_water - 8; |
| |
| if (hw->mac.type == e1000_80003es2lan) |
| hw->fc.pause_time = 0xFFFF; |
| else |
| hw->fc.pause_time = E1000_FC_PAUSE_TIME; |
| hw->fc.send_xon = B_TRUE; |
| |
| /* |
| * Reset the adapter hardware the second time. |
| */ |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_reset_hw(hw); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result != E1000_SUCCESS) { |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* disable wakeup control by default */ |
| if (hw->mac.type >= e1000_82544) |
| E1000_WRITE_REG(hw, E1000_WUC, 0); |
| |
| /* |
| * MWI should be disabled on 82546. |
| */ |
| if (hw->mac.type == e1000_82546) |
| e1000_pci_clear_mwi(hw); |
| else |
| e1000_pci_set_mwi(hw); |
| |
| /* |
| * Configure/Initialize hardware |
| */ |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_init_hw(hw); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result < E1000_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* |
| * Restore LED settings to the default from EEPROM |
| * to meet the standard for Sun platforms. |
| */ |
| (void) e1000_cleanup_led(hw); |
| |
| /* Disable Smart Power Down */ |
| phy_spd_state(hw, B_FALSE); |
| |
| /* Make sure driver has control */ |
| e1000g_get_driver_control(hw); |
| |
| /* |
| * Initialize unicast addresses. |
| */ |
| e1000g_init_unicst(Adapter); |
| |
| /* |
| * Setup and initialize the mctable structures. After this routine |
| * completes Multicast table will be set |
| */ |
| e1000_update_mc_addr_list(hw, |
| (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); |
| msec_delay(5); |
| |
| /* |
| * Implement Adaptive IFS |
| */ |
| e1000_reset_adaptive(hw); |
| |
| /* Setup Interrupt Throttling Register */ |
| if (hw->mac.type >= e1000_82540) { |
| E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); |
| } else |
| Adapter->intr_adaptive = B_FALSE; |
| |
| /* Start the timer for link setup */ |
| if (hw->mac.autoneg) |
| link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); |
| else |
| link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); |
| |
| mutex_enter(&Adapter->link_lock); |
| if (hw->phy.autoneg_wait_to_complete) { |
| Adapter->link_complete = B_TRUE; |
| } else { |
| Adapter->link_complete = B_FALSE; |
| Adapter->link_tid = timeout(e1000g_link_timer, |
| (void *)Adapter, link_timeout); |
| } |
| mutex_exit(&Adapter->link_lock); |
| |
| /* Save the state of the phy */ |
| e1000g_get_phy_state(Adapter); |
| |
| e1000g_param_sync(Adapter); |
| |
| Adapter->init_count++; |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { |
| goto init_fail; |
| } |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| goto init_fail; |
| } |
| |
| Adapter->poll_mode = e1000g_poll_mode; |
| |
| return (DDI_SUCCESS); |
| |
| init_fail: |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| return (DDI_FAILURE); |
| } |
| |
| static int |
| e1000g_alloc_rx_data(struct e1000g *Adapter) |
| { |
| e1000g_rx_ring_t *rx_ring; |
| e1000g_rx_data_t *rx_data; |
| |
| rx_ring = Adapter->rx_ring; |
| |
| rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); |
| |
| if (rx_data == NULL) |
| return (DDI_FAILURE); |
| |
| rx_data->priv_devi_node = Adapter->priv_devi_node; |
| rx_data->rx_ring = rx_ring; |
| |
| mutex_init(&rx_data->freelist_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| mutex_init(&rx_data->recycle_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); |
| |
| rx_ring->rx_data = rx_data; |
| |
| return (DDI_SUCCESS); |
| } |
| |
| void |
| e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) |
| { |
| rx_sw_packet_t *packet, *next_packet; |
| |
| if (rx_data == NULL) |
| return; |
| |
| packet = rx_data->packet_area; |
| while (packet != NULL) { |
| next_packet = packet->next; |
| e1000g_free_rx_sw_packet(packet, B_TRUE); |
| packet = next_packet; |
| } |
| rx_data->packet_area = NULL; |
| } |
| |
| void |
| e1000g_free_rx_data(e1000g_rx_data_t *rx_data) |
| { |
| if (rx_data == NULL) |
| return; |
| |
| mutex_destroy(&rx_data->freelist_lock); |
| mutex_destroy(&rx_data->recycle_lock); |
| |
| kmem_free(rx_data, sizeof (e1000g_rx_data_t)); |
| } |
| |
| /* |
| * Check if the link is up |
| */ |
| static boolean_t |
| e1000g_link_up(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| boolean_t link_up = B_FALSE; |
| |
| /* |
| * get_link_status is set in the interrupt handler on link-status-change |
| * or rx sequence error interrupt. get_link_status will stay |
| * false until the e1000_check_for_link establishes link only |
| * for copper adapters. |
| */ |
| switch (hw->phy.media_type) { |
| case e1000_media_type_copper: |
| if (hw->mac.get_link_status) { |
| /* |
| * SPT devices need a bit of extra time before we ask |
| * them. |
| */ |
| if (hw->mac.type == e1000_pch_spt) |
| msec_delay(50); |
| (void) e1000_check_for_link(hw); |
| if ((E1000_READ_REG(hw, E1000_STATUS) & |
| E1000_STATUS_LU)) { |
| link_up = B_TRUE; |
| } else { |
| link_up = !hw->mac.get_link_status; |
| } |
| } else { |
| link_up = B_TRUE; |
| } |
| break; |
| case e1000_media_type_fiber: |
| (void) e1000_check_for_link(hw); |
| link_up = (E1000_READ_REG(hw, E1000_STATUS) & |
| E1000_STATUS_LU); |
| break; |
| case e1000_media_type_internal_serdes: |
| (void) e1000_check_for_link(hw); |
| link_up = hw->mac.serdes_has_link; |
| break; |
| } |
| |
| return (link_up); |
| } |
| |
| static void |
| e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) |
| { |
| struct iocblk *iocp; |
| struct e1000g *e1000gp; |
| enum ioc_reply status; |
| |
| iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; |
| iocp->ioc_error = 0; |
| e1000gp = (struct e1000g *)arg; |
| |
| ASSERT(e1000gp); |
| if (e1000gp == NULL) { |
| miocnak(q, mp, 0, EINVAL); |
| return; |
| } |
| |
| rw_enter(&e1000gp->chip_lock, RW_READER); |
| if (e1000gp->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&e1000gp->chip_lock); |
| miocnak(q, mp, 0, EINVAL); |
| return; |
| } |
| rw_exit(&e1000gp->chip_lock); |
| |
| switch (iocp->ioc_cmd) { |
| |
| case LB_GET_INFO_SIZE: |
| case LB_GET_INFO: |
| case LB_GET_MODE: |
| case LB_SET_MODE: |
| status = e1000g_loopback_ioctl(e1000gp, iocp, mp); |
| break; |
| |
| |
| #ifdef E1000G_DEBUG |
| case E1000G_IOC_REG_PEEK: |
| case E1000G_IOC_REG_POKE: |
| status = e1000g_pp_ioctl(e1000gp, iocp, mp); |
| break; |
| case E1000G_IOC_CHIP_RESET: |
| e1000gp->reset_count++; |
| if (e1000g_reset_adapter(e1000gp)) |
| status = IOC_ACK; |
| else |
| status = IOC_INVAL; |
| break; |
| #endif |
| default: |
| status = IOC_INVAL; |
| break; |
| } |
| |
| /* |
| * Decide how to reply |
| */ |
| switch (status) { |
| default: |
| case IOC_INVAL: |
| /* |
| * Error, reply with a NAK and EINVAL or the specified error |
| */ |
| miocnak(q, mp, 0, iocp->ioc_error == 0 ? |
| EINVAL : iocp->ioc_error); |
| break; |
| |
| case IOC_DONE: |
| /* |
| * OK, reply already sent |
| */ |
| break; |
| |
| case IOC_ACK: |
| /* |
| * OK, reply with an ACK |
| */ |
| miocack(q, mp, 0, 0); |
| break; |
| |
| case IOC_REPLY: |
| /* |
| * OK, send prepared reply as ACK or NAK |
| */ |
| mp->b_datap->db_type = iocp->ioc_error == 0 ? |
| M_IOCACK : M_IOCNAK; |
| qreply(q, mp); |
| break; |
| } |
| } |
| |
| /* |
| * The default value of e1000g_poll_mode == 0 assumes that the NIC is |
| * capable of supporting only one interrupt and we shouldn't disable |
| * the physical interrupt. In this case we let the interrupt come and |
| * we queue the packets in the rx ring itself in case we are in polling |
| * mode (better latency but slightly lower performance and a very |
| * high intrrupt count in mpstat which is harmless). |
| * |
| * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt |
| * which can be disabled in poll mode. This gives better overall |
| * throughput (compared to the mode above), shows very low interrupt |
| * count but has slightly higher latency since we pick the packets when |
| * the poll thread does polling. |
| * |
| * Currently, this flag should be enabled only while doing performance |
| * measurement or when it can be guaranteed that entire NIC going |
| * in poll mode will not harm any traffic like cluster heartbeat etc. |
| */ |
| int e1000g_poll_mode = 0; |
| |
| /* |
| * Called from the upper layers when driver is in polling mode to |
| * pick up any queued packets. Care should be taken to not block |
| * this thread. |
| */ |
| static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) |
| { |
| e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; |
| mblk_t *mp = NULL; |
| mblk_t *tail; |
| struct e1000g *adapter; |
| |
| adapter = rx_ring->adapter; |
| |
| rw_enter(&adapter->chip_lock, RW_READER); |
| |
| if (adapter->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&adapter->chip_lock); |
| return (NULL); |
| } |
| |
| mutex_enter(&rx_ring->rx_lock); |
| mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); |
| mutex_exit(&rx_ring->rx_lock); |
| rw_exit(&adapter->chip_lock); |
| return (mp); |
| } |
| |
| static int |
| e1000g_m_start(void *arg) |
| { |
| struct e1000g *Adapter = (struct e1000g *)arg; |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&Adapter->chip_lock); |
| return (ECANCELED); |
| } |
| |
| if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { |
| rw_exit(&Adapter->chip_lock); |
| return (ENOTACTIVE); |
| } |
| |
| Adapter->e1000g_state |= E1000G_STARTED; |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| /* Enable and start the watchdog timer */ |
| enable_watchdog_timer(Adapter); |
| |
| return (0); |
| } |
| |
| static int |
| e1000g_start(struct e1000g *Adapter, boolean_t global) |
| { |
| e1000g_rx_data_t *rx_data; |
| |
| if (global) { |
| if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); |
| goto start_fail; |
| } |
| |
| /* Allocate dma resources for descriptors and buffers */ |
| if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, |
| "Alloc DMA resources failed"); |
| goto start_fail; |
| } |
| Adapter->rx_buffer_setup = B_FALSE; |
| } |
| |
| if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { |
| if (e1000g_init(Adapter) != DDI_SUCCESS) { |
| e1000g_log(Adapter, CE_WARN, |
| "Adapter initialization failed"); |
| goto start_fail; |
| } |
| } |
| |
| /* Setup and initialize the transmit structures */ |
| e1000g_tx_setup(Adapter); |
| msec_delay(5); |
| |
| /* Setup and initialize the receive structures */ |
| e1000g_rx_setup(Adapter); |
| msec_delay(5); |
| |
| /* Restore the e1000g promiscuous mode */ |
| e1000g_restore_promisc(Adapter); |
| |
| e1000g_mask_interrupt(Adapter); |
| |
| Adapter->attach_progress |= ATTACH_PROGRESS_INIT; |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| goto start_fail; |
| } |
| |
| return (DDI_SUCCESS); |
| |
| start_fail: |
| rx_data = Adapter->rx_ring->rx_data; |
| |
| if (global) { |
| e1000g_release_dma_resources(Adapter); |
| e1000g_free_rx_pending_buffers(rx_data); |
| e1000g_free_rx_data(rx_data); |
| } |
| |
| mutex_enter(&e1000g_nvm_lock); |
| (void) e1000_reset_hw(&Adapter->shared); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * The I219 has the curious property that if the descriptor rings are not |
| * emptied before resetting the hardware or before changing the device state |
| * based on runtime power management, it'll cause the card to hang. This can |
| * then only be fixed by a PCI reset. As such, for the I219 and it alone, we |
| * have to flush the rings if we're in this state. |
| */ |
| static void |
| e1000g_flush_desc_rings(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| u16 hang_state; |
| u32 fext_nvm11, tdlen; |
| |
| /* First, disable MULR fix in FEXTNVM11 */ |
| fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); |
| fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; |
| E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); |
| |
| /* do nothing if we're not in faulty state, or if the queue is empty */ |
| tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); |
| hang_state = pci_config_get16(Adapter->osdep.cfg_handle, |
| PCICFG_DESC_RING_STATUS); |
| if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) |
| return; |
| e1000g_flush_tx_ring(Adapter); |
| |
| /* recheck, maybe the fault is caused by the rx ring */ |
| hang_state = pci_config_get16(Adapter->osdep.cfg_handle, |
| PCICFG_DESC_RING_STATUS); |
| if (hang_state & FLUSH_DESC_REQUIRED) |
| e1000g_flush_rx_ring(Adapter); |
| |
| } |
| |
| static void |
| e1000g_m_stop(void *arg) |
| { |
| struct e1000g *Adapter = (struct e1000g *)arg; |
| |
| /* Drain tx sessions */ |
| (void) e1000g_tx_drain(Adapter); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&Adapter->chip_lock); |
| return; |
| } |
| Adapter->e1000g_state &= ~E1000G_STARTED; |
| e1000g_stop(Adapter, B_TRUE); |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| /* Disable and stop all the timers */ |
| disable_watchdog_timer(Adapter); |
| stop_link_timer(Adapter); |
| stop_82547_timer(Adapter->tx_ring); |
| } |
| |
| static void |
| e1000g_stop(struct e1000g *Adapter, boolean_t global) |
| { |
| private_devi_list_t *devi_node; |
| e1000g_rx_data_t *rx_data; |
| int result; |
| |
| Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; |
| |
| /* Stop the chip and release pending resources */ |
| |
| /* Tell firmware driver is no longer in control */ |
| e1000g_release_driver_control(&Adapter->shared); |
| |
| e1000g_clear_all_interrupts(Adapter); |
| |
| mutex_enter(&e1000g_nvm_lock); |
| result = e1000_reset_hw(&Adapter->shared); |
| mutex_exit(&e1000g_nvm_lock); |
| |
| if (result != E1000_SUCCESS) { |
| e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| } |
| |
| mutex_enter(&Adapter->link_lock); |
| Adapter->link_complete = B_FALSE; |
| mutex_exit(&Adapter->link_lock); |
| |
| /* Release resources still held by the TX descriptors */ |
| e1000g_tx_clean(Adapter); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); |
| |
| /* Clean the pending rx jumbo packet fragment */ |
| e1000g_rx_clean(Adapter); |
| |
| /* |
| * The I219, eg. the pch_spt, has bugs such that we must ensure that |
| * rings are flushed before we do anything else. This must be done |
| * before we release DMA resources. |
| */ |
| if (Adapter->shared.mac.type == e1000_pch_spt) |
| e1000g_flush_desc_rings(Adapter); |
| |
| if (global) { |
| e1000g_release_dma_resources(Adapter); |
| |
| mutex_enter(&e1000g_rx_detach_lock); |
| rx_data = Adapter->rx_ring->rx_data; |
| rx_data->flag |= E1000G_RX_STOPPED; |
| |
| if (rx_data->pending_count == 0) { |
| e1000g_free_rx_pending_buffers(rx_data); |
| e1000g_free_rx_data(rx_data); |
| } else { |
| devi_node = rx_data->priv_devi_node; |
| if (devi_node != NULL) |
| atomic_inc_32(&devi_node->pending_rx_count); |
| else |
| atomic_inc_32(&Adapter->pending_rx_count); |
| } |
| mutex_exit(&e1000g_rx_detach_lock); |
| } |
| |
| if (Adapter->link_state != LINK_STATE_UNKNOWN) { |
| Adapter->link_state = LINK_STATE_UNKNOWN; |
| if (!Adapter->reset_flag) |
| mac_link_update(Adapter->mh, Adapter->link_state); |
| } |
| } |
| |
| static void |
| e1000g_rx_clean(struct e1000g *Adapter) |
| { |
| e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; |
| |
| if (rx_data == NULL) |
| return; |
| |
| if (rx_data->rx_mblk != NULL) { |
| freemsg(rx_data->rx_mblk); |
| rx_data->rx_mblk = NULL; |
| rx_data->rx_mblk_tail = NULL; |
| rx_data->rx_mblk_len = 0; |
| } |
| } |
| |
| static void |
| e1000g_tx_clean(struct e1000g *Adapter) |
| { |
| e1000g_tx_ring_t *tx_ring; |
| p_tx_sw_packet_t packet; |
| mblk_t *mp; |
| mblk_t *nmp; |
| uint32_t packet_count; |
| |
| tx_ring = Adapter->tx_ring; |
| |
| /* |
| * Here we don't need to protect the lists using |
| * the usedlist_lock and freelist_lock, for they |
| * have been protected by the chip_lock. |
| */ |
| mp = NULL; |
| nmp = NULL; |
| packet_count = 0; |
| packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); |
| while (packet != NULL) { |
| if (packet->mp != NULL) { |
| /* Assemble the message chain */ |
| if (mp == NULL) { |
| mp = packet->mp; |
| nmp = packet->mp; |
| } else { |
| nmp->b_next = packet->mp; |
| nmp = packet->mp; |
| } |
| /* Disconnect the message from the sw packet */ |
| packet->mp = NULL; |
| } |
| |
| e1000g_free_tx_swpkt(packet); |
| packet_count++; |
| |
| packet = (p_tx_sw_packet_t) |
| QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); |
| } |
| |
| if (mp != NULL) |
| freemsgchain(mp); |
| |
| if (packet_count > 0) { |
| QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); |
| QUEUE_INIT_LIST(&tx_ring->used_list); |
| |
| /* Setup TX descriptor pointers */ |
| tx_ring->tbd_next = tx_ring->tbd_first; |
| tx_ring->tbd_oldest = tx_ring->tbd_first; |
| |
| /* Setup our HW Tx Head & Tail descriptor pointers */ |
| E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); |
| E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); |
| } |
| } |
| |
| static boolean_t |
| e1000g_tx_drain(struct e1000g *Adapter) |
| { |
| int i; |
| boolean_t done; |
| e1000g_tx_ring_t *tx_ring; |
| |
| tx_ring = Adapter->tx_ring; |
| |
| /* Allow up to 'wsdraintime' for pending xmit's to complete. */ |
| for (i = 0; i < TX_DRAIN_TIME; i++) { |
| mutex_enter(&tx_ring->usedlist_lock); |
| done = IS_QUEUE_EMPTY(&tx_ring->used_list); |
| mutex_exit(&tx_ring->usedlist_lock); |
| |
| if (done) |
| break; |
| |
| msec_delay(1); |
| } |
| |
| return (done); |
| } |
| |
| static boolean_t |
| e1000g_rx_drain(struct e1000g *Adapter) |
| { |
| int i; |
| boolean_t done; |
| |
| /* |
| * Allow up to RX_DRAIN_TIME for pending received packets to complete. |
| */ |
| for (i = 0; i < RX_DRAIN_TIME; i++) { |
| done = (Adapter->pending_rx_count == 0); |
| |
| if (done) |
| break; |
| |
| msec_delay(1); |
| } |
| |
| return (done); |
| } |
| |
| static boolean_t |
| e1000g_reset_adapter(struct e1000g *Adapter) |
| { |
| /* Disable and stop all the timers */ |
| disable_watchdog_timer(Adapter); |
| stop_link_timer(Adapter); |
| stop_82547_timer(Adapter->tx_ring); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->stall_flag) { |
| Adapter->stall_flag = B_FALSE; |
| Adapter->reset_flag = B_TRUE; |
| } |
| |
| if (!(Adapter->e1000g_state & E1000G_STARTED)) { |
| rw_exit(&Adapter->chip_lock); |
| return (B_TRUE); |
| } |
| |
| e1000g_stop(Adapter, B_FALSE); |
| |
| if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { |
| rw_exit(&Adapter->chip_lock); |
| e1000g_log(Adapter, CE_WARN, "Reset failed"); |
| return (B_FALSE); |
| } |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| /* Enable and start the watchdog timer */ |
| enable_watchdog_timer(Adapter); |
| |
| return (B_TRUE); |
| } |
| |
| boolean_t |
| e1000g_global_reset(struct e1000g *Adapter) |
| { |
| /* Disable and stop all the timers */ |
| disable_watchdog_timer(Adapter); |
| stop_link_timer(Adapter); |
| stop_82547_timer(Adapter->tx_ring); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| e1000g_stop(Adapter, B_TRUE); |
| |
| Adapter->init_count = 0; |
| |
| if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { |
| rw_exit(&Adapter->chip_lock); |
| e1000g_log(Adapter, CE_WARN, "Reset failed"); |
| return (B_FALSE); |
| } |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| /* Enable and start the watchdog timer */ |
| enable_watchdog_timer(Adapter); |
| |
| return (B_TRUE); |
| } |
| |
| /* |
| * e1000g_intr_pciexpress - ISR for PCI Express chipsets |
| * |
| * This interrupt service routine is for PCI-Express adapters. |
| * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED |
| * bit is set. |
| */ |
| static uint_t |
| e1000g_intr_pciexpress(caddr_t arg) |
| { |
| struct e1000g *Adapter; |
| uint32_t icr; |
| |
| Adapter = (struct e1000g *)(uintptr_t)arg; |
| icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| return (DDI_INTR_CLAIMED); |
| } |
| |
| if (icr & E1000_ICR_INT_ASSERTED) { |
| /* |
| * E1000_ICR_INT_ASSERTED bit was set: |
| * Read(Clear) the ICR, claim this interrupt, |
| * look for work to do. |
| */ |
| e1000g_intr_work(Adapter, icr); |
| return (DDI_INTR_CLAIMED); |
| } else { |
| /* |
| * E1000_ICR_INT_ASSERTED bit was not set: |
| * Don't claim this interrupt, return immediately. |
| */ |
| return (DDI_INTR_UNCLAIMED); |
| } |
| } |
| |
| /* |
| * e1000g_intr - ISR for PCI/PCI-X chipsets |
| * |
| * This interrupt service routine is for PCI/PCI-X adapters. |
| * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED |
| * bit is set or not. |
| */ |
| static uint_t |
| e1000g_intr(caddr_t arg) |
| { |
| struct e1000g *Adapter; |
| uint32_t icr; |
| |
| Adapter = (struct e1000g *)(uintptr_t)arg; |
| icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| return (DDI_INTR_CLAIMED); |
| } |
| |
| if (icr) { |
| /* |
| * Any bit was set in ICR: |
| * Read(Clear) the ICR, claim this interrupt, |
| * look for work to do. |
| */ |
| e1000g_intr_work(Adapter, icr); |
| return (DDI_INTR_CLAIMED); |
| } else { |
| /* |
| * No bit was set in ICR: |
| * Don't claim this interrupt, return immediately. |
| */ |
| return (DDI_INTR_UNCLAIMED); |
| } |
| } |
| |
| /* |
| * e1000g_intr_work - actual processing of ISR |
| * |
| * Read(clear) the ICR contents and call appropriate interrupt |
| * processing routines. |
| */ |
| static void |
| e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) |
| { |
| struct e1000_hw *hw; |
| hw = &Adapter->shared; |
| e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; |
| |
| Adapter->rx_pkt_cnt = 0; |
| Adapter->tx_pkt_cnt = 0; |
| |
| rw_enter(&Adapter->chip_lock, RW_READER); |
| |
| if (Adapter->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&Adapter->chip_lock); |
| return; |
| } |
| /* |
| * Here we need to check the "e1000g_state" flag within the chip_lock to |
| * ensure the receive routine will not execute when the adapter is |
| * being reset. |
| */ |
| if (!(Adapter->e1000g_state & E1000G_STARTED)) { |
| rw_exit(&Adapter->chip_lock); |
| return; |
| } |
| |
| if (icr & E1000_ICR_RXT0) { |
| mblk_t *mp = NULL; |
| mblk_t *tail = NULL; |
| e1000g_rx_ring_t *rx_ring; |
| |
| rx_ring = Adapter->rx_ring; |
| mutex_enter(&rx_ring->rx_lock); |
| /* |
| * Sometimes with legacy interrupts, it possible that |
| * there is a single interrupt for Rx/Tx. In which |
| * case, if poll flag is set, we shouldn't really |
| * be doing Rx processing. |
| */ |
| if (!rx_ring->poll_flag) |
| mp = e1000g_receive(rx_ring, &tail, |
| E1000G_CHAIN_NO_LIMIT); |
| mutex_exit(&rx_ring->rx_lock); |
| rw_exit(&Adapter->chip_lock); |
| if (mp != NULL) |
| mac_rx_ring(Adapter->mh, rx_ring->mrh, |
| mp, rx_ring->ring_gen_num); |
| } else |
| rw_exit(&Adapter->chip_lock); |
| |
| if (icr & E1000_ICR_TXDW) { |
| if (!Adapter->tx_intr_enable) |
| e1000g_clear_tx_interrupt(Adapter); |
| |
| /* Recycle the tx descriptors */ |
| rw_enter(&Adapter->chip_lock, RW_READER); |
| (void) e1000g_recycle(tx_ring); |
| E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); |
| rw_exit(&Adapter->chip_lock); |
| |
| if (tx_ring->resched_needed && |
| (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { |
| tx_ring->resched_needed = B_FALSE; |
| mac_tx_update(Adapter->mh); |
| E1000G_STAT(tx_ring->stat_reschedule); |
| } |
| } |
| |
| /* |
| * The Receive Sequence errors RXSEQ and the link status change LSC |
| * are checked to detect that the cable has been pulled out. For |
| * the Wiseman 2.0 silicon, the receive sequence errors interrupt |
| * are an indication that cable is not connected. |
| */ |
| if ((icr & E1000_ICR_RXSEQ) || |
| (icr & E1000_ICR_LSC) || |
| (icr & E1000_ICR_GPI_EN1)) { |
| boolean_t link_changed; |
| timeout_id_t tid = 0; |
| |
| stop_watchdog_timer(Adapter); |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| /* |
| * Because we got a link-status-change interrupt, force |
| * e1000_check_for_link() to look at phy |
| */ |
| Adapter->shared.mac.get_link_status = B_TRUE; |
| |
| /* e1000g_link_check takes care of link status change */ |
| link_changed = e1000g_link_check(Adapter); |
| |
| /* Get new phy state */ |
| e1000g_get_phy_state(Adapter); |
| |
| /* |
| * If the link timer has not timed out, we'll not notify |
| * the upper layer with any link state until the link is up. |
| */ |
| if (link_changed && !Adapter->link_complete) { |
| if (Adapter->link_state == LINK_STATE_UP) { |
| mutex_enter(&Adapter->link_lock); |
| Adapter->link_complete = B_TRUE; |
| tid = Adapter->link_tid; |
| Adapter->link_tid = 0; |
| mutex_exit(&Adapter->link_lock); |
| } else { |
| link_changed = B_FALSE; |
| } |
| } |
| rw_exit(&Adapter->chip_lock); |
| |
| if (link_changed) { |
| if (tid != 0) |
| (void) untimeout(tid); |
| |
| /* |
| * Workaround for esb2. Data stuck in fifo on a link |
| * down event. Stop receiver here and reset in watchdog. |
| */ |
| if ((Adapter->link_state == LINK_STATE_DOWN) && |
| (Adapter->shared.mac.type == e1000_80003es2lan)) { |
| uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); |
| E1000_WRITE_REG(hw, E1000_RCTL, |
| rctl & ~E1000_RCTL_EN); |
| e1000g_log(Adapter, CE_WARN, |
| "ESB2 receiver disabled"); |
| Adapter->esb2_workaround = B_TRUE; |
| } |
| if (!Adapter->reset_flag) |
| mac_link_update(Adapter->mh, |
| Adapter->link_state); |
| if (Adapter->link_state == LINK_STATE_UP) |
| Adapter->reset_flag = B_FALSE; |
| } |
| |
| start_watchdog_timer(Adapter); |
| } |
| } |
| |
| static void |
| e1000g_init_unicst(struct e1000g *Adapter) |
| { |
| struct e1000_hw *hw; |
| int slot; |
| |
| hw = &Adapter->shared; |
| |
| if (Adapter->init_count == 0) { |
| /* Initialize the multiple unicast addresses */ |
| Adapter->unicst_total = min(hw->mac.rar_entry_count, |
| MAX_NUM_UNICAST_ADDRESSES); |
| |
| /* |
| * The common code does not correctly calculate the number of |
| * rar's that could be reserved by firmware for the pch_lpt and |
| * pch_spt macs. The interface has one primary rar, and 11 |
| * additional ones. Those 11 additional ones are not always |
| * available. According to the datasheet, we need to check a |
| * few of the bits set in the FWSM register. If the value is |
| * zero, everything is available. If the value is 1, none of the |
| * additional registers are available. If the value is 2-7, only |
| * that number are available. |
| */ |
| if (hw->mac.type == e1000_pch_lpt || |
| hw->mac.type == e1000_pch_spt) { |
| uint32_t locked, rar; |
| |
| locked = E1000_READ_REG(hw, E1000_FWSM) & |
| E1000_FWSM_WLOCK_MAC_MASK; |
| locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; |
| rar = 1; |
| if (locked == 0) |
| rar += 11; |
| else if (locked == 1) |
| rar += 0; |
| else |
| rar += locked; |
| Adapter->unicst_total = min(rar, |
| MAX_NUM_UNICAST_ADDRESSES); |
| } |
| |
| /* Workaround for an erratum of 82571 chipst */ |
| if ((hw->mac.type == e1000_82571) && |
| (e1000_get_laa_state_82571(hw) == B_TRUE)) |
| Adapter->unicst_total--; |
| |
| /* VMware doesn't support multiple mac addresses properly */ |
| if (hw->subsystem_vendor_id == 0x15ad) |
| Adapter->unicst_total = 1; |
| |
| Adapter->unicst_avail = Adapter->unicst_total; |
| |
| for (slot = 0; slot < Adapter->unicst_total; slot++) { |
| /* Clear both the flag and MAC address */ |
| Adapter->unicst_addr[slot].reg.high = 0; |
| Adapter->unicst_addr[slot].reg.low = 0; |
| } |
| } else { |
| /* Workaround for an erratum of 82571 chipst */ |
| if ((hw->mac.type == e1000_82571) && |
| (e1000_get_laa_state_82571(hw) == B_TRUE)) |
| (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); |
| |
| /* Re-configure the RAR registers */ |
| for (slot = 0; slot < Adapter->unicst_total; slot++) |
| if (Adapter->unicst_addr[slot].mac.set == 1) |
| (void) e1000_rar_set(hw, |
| Adapter->unicst_addr[slot].mac.addr, slot); |
| } |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| } |
| |
| static int |
| e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, |
| int slot) |
| { |
| struct e1000_hw *hw; |
| |
| hw = &Adapter->shared; |
| |
| /* |
| * The first revision of Wiseman silicon (rev 2.0) has an errata |
| * that requires the receiver to be in reset when any of the |
| * receive address registers (RAR regs) are accessed. The first |
| * rev of Wiseman silicon also requires MWI to be disabled when |
| * a global reset or a receive reset is issued. So before we |
| * initialize the RARs, we check the rev of the Wiseman controller |
| * and work around any necessary HW errata. |
| */ |
| if ((hw->mac.type == e1000_82542) && |
| (hw->revision_id == E1000_REVISION_2)) { |
| e1000_pci_clear_mwi(hw); |
| E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); |
| msec_delay(5); |
| } |
| if (mac_addr == NULL) { |
| E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); |
| E1000_WRITE_FLUSH(hw); |
| E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); |
| E1000_WRITE_FLUSH(hw); |
| /* Clear both the flag and MAC address */ |
| Adapter->unicst_addr[slot].reg.high = 0; |
| Adapter->unicst_addr[slot].reg.low = 0; |
| } else { |
| bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, |
| ETHERADDRL); |
| (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); |
| Adapter->unicst_addr[slot].mac.set = 1; |
| } |
| |
| /* Workaround for an erratum of 82571 chipst */ |
| if (slot == 0) { |
| if ((hw->mac.type == e1000_82571) && |
| (e1000_get_laa_state_82571(hw) == B_TRUE)) |
| if (mac_addr == NULL) { |
| E1000_WRITE_REG_ARRAY(hw, E1000_RA, |
| slot << 1, 0); |
| E1000_WRITE_FLUSH(hw); |
| E1000_WRITE_REG_ARRAY(hw, E1000_RA, |
| (slot << 1) + 1, 0); |
| E1000_WRITE_FLUSH(hw); |
| } else { |
| (void) e1000_rar_set(hw, (uint8_t *)mac_addr, |
| LAST_RAR_ENTRY); |
| } |
| } |
| |
| /* |
| * If we are using Wiseman rev 2.0 silicon, we will have previously |
| * put the receive in reset, and disabled MWI, to work around some |
| * HW errata. Now we should take the receiver out of reset, and |
| * re-enabled if MWI if it was previously enabled by the PCI BIOS. |
| */ |
| if ((hw->mac.type == e1000_82542) && |
| (hw->revision_id == E1000_REVISION_2)) { |
| E1000_WRITE_REG(hw, E1000_RCTL, 0); |
| msec_delay(1); |
| if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) |
| e1000_pci_set_mwi(hw); |
| e1000g_rx_setup(Adapter); |
| } |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| return (EIO); |
| } |
| |
| return (0); |
| } |
| |
| static int |
| multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| struct ether_addr *newtable; |
| size_t new_len; |
| size_t old_len; |
| int res = 0; |
| |
| if ((multiaddr[0] & 01) == 0) { |
| res = EINVAL; |
| e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); |
| goto done; |
| } |
| |
| if (Adapter->mcast_count >= Adapter->mcast_max_num) { |
| res = ENOENT; |
| e1000g_log(Adapter, CE_WARN, |
| "Adapter requested more than %d mcast addresses", |
| Adapter->mcast_max_num); |
| goto done; |
| } |
| |
| |
| if (Adapter->mcast_count == Adapter->mcast_alloc_count) { |
| old_len = Adapter->mcast_alloc_count * |
| sizeof (struct ether_addr); |
| new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * |
| sizeof (struct ether_addr); |
| |
| newtable = kmem_alloc(new_len, KM_NOSLEEP); |
| if (newtable == NULL) { |
| res = ENOMEM; |
| e1000g_log(Adapter, CE_WARN, |
| "Not enough memory to alloc mcast table"); |
| goto done; |
| } |
| |
| if (Adapter->mcast_table != NULL) { |
| bcopy(Adapter->mcast_table, newtable, old_len); |
| kmem_free(Adapter->mcast_table, old_len); |
| } |
| Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; |
| Adapter->mcast_table = newtable; |
| } |
| |
| bcopy(multiaddr, |
| &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); |
| Adapter->mcast_count++; |
| |
| /* |
| * Update the MC table in the hardware |
| */ |
| e1000g_clear_interrupt(Adapter); |
| |
| e1000_update_mc_addr_list(hw, |
| (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); |
| |
| e1000g_mask_interrupt(Adapter); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| res = EIO; |
| } |
| |
| done: |
| return (res); |
| } |
| |
| static int |
| multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) |
| { |
| struct e1000_hw *hw = &Adapter->shared; |
| struct ether_addr *newtable; |
| size_t new_len; |
| size_t old_len; |
| unsigned i; |
| |
| for (i = 0; i < Adapter->mcast_count; i++) { |
| if (bcmp(multiaddr, &Adapter->mcast_table[i], |
| ETHERADDRL) == 0) { |
| for (i++; i < Adapter->mcast_count; i++) { |
| Adapter->mcast_table[i - 1] = |
| Adapter->mcast_table[i]; |
| } |
| Adapter->mcast_count--; |
| break; |
| } |
| } |
| |
| if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > |
| MCAST_ALLOC_SIZE) { |
| old_len = Adapter->mcast_alloc_count * |
| sizeof (struct ether_addr); |
| new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * |
| sizeof (struct ether_addr); |
| |
| newtable = kmem_alloc(new_len, KM_NOSLEEP); |
| if (newtable != NULL) { |
| bcopy(Adapter->mcast_table, newtable, new_len); |
| kmem_free(Adapter->mcast_table, old_len); |
| |
| Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; |
| Adapter->mcast_table = newtable; |
| } |
| } |
| |
| /* |
| * Update the MC table in the hardware |
| */ |
| e1000g_clear_interrupt(Adapter); |
| |
| e1000_update_mc_addr_list(hw, |
| (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); |
| |
| e1000g_mask_interrupt(Adapter); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| return (EIO); |
| } |
| |
| return (0); |
| } |
| |
| static void |
| e1000g_release_multicast(struct e1000g *Adapter) |
| { |
| if (Adapter->mcast_table != NULL) { |
| kmem_free(Adapter->mcast_table, |
| Adapter->mcast_alloc_count * sizeof (struct ether_addr)); |
| Adapter->mcast_table = NULL; |
| } |
| } |
| |
| int |
| e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) |
| { |
| struct e1000g *Adapter = (struct e1000g *)arg; |
| int result; |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->e1000g_state & E1000G_SUSPENDED) { |
| result = ECANCELED; |
| goto done; |
| } |
| |
| result = (add) ? multicst_add(Adapter, addr) |
| : multicst_remove(Adapter, addr); |
| |
| done: |
| rw_exit(&Adapter->chip_lock); |
| return (result); |
| |
| } |
| |
| int |
| e1000g_m_promisc(void *arg, boolean_t on) |
| { |
| struct e1000g *Adapter = (struct e1000g *)arg; |
| uint32_t rctl; |
| |
| rw_enter(&Adapter->chip_lock, RW_WRITER); |
| |
| if (Adapter->e1000g_state & E1000G_SUSPENDED) { |
| rw_exit(&Adapter->chip_lock); |
| return (ECANCELED); |
| } |
| |
| rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); |
| |
| if (on) |
| rctl |= |
| (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); |
| else |
| rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); |
| |
| E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); |
| |
| Adapter->e1000g_promisc = on; |
| |
| rw_exit(&Adapter->chip_lock); |
| |
| if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); |
| return (EIO); |
|