| /* |
| * CDDL HEADER START |
| * |
| * The contents of this file are subject to the terms of the |
| * Common Development and Distribution License (the "License"). |
| * You may not use this file except in compliance with the License. |
| * |
| * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| * or http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| * |
| * When distributing Covered Code, include this CDDL HEADER in each |
| * file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| * If applicable, add the following below this CDDL HEADER, with the |
| * fields enclosed by brackets "[]" replaced with your own identifying |
| * information: Portions Copyright [yyyy] [name of copyright owner] |
| * |
| * CDDL HEADER END |
| */ |
| |
| /* |
| * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. |
| */ |
| |
| /* |
| * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. |
| * Copyright (c) 2017, Joyent, Inc. |
| * Copyright 2012 Nexenta Systems, Inc. All rights reserved. |
| * Copyright (c) 2013 Saso Kiselkov. All rights reserved. |
| * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. |
| * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. |
| */ |
| |
| #include "ixgbe_sw.h" |
| |
| static char ixgbe_ident[] = "Intel 10Gb Ethernet"; |
| |
| /* |
| * Local function protoypes |
| */ |
| static int ixgbe_register_mac(ixgbe_t *); |
| static int ixgbe_identify_hardware(ixgbe_t *); |
| static int ixgbe_regs_map(ixgbe_t *); |
| static void ixgbe_init_properties(ixgbe_t *); |
| static int ixgbe_init_driver_settings(ixgbe_t *); |
| static void ixgbe_init_locks(ixgbe_t *); |
| static void ixgbe_destroy_locks(ixgbe_t *); |
| static int ixgbe_init(ixgbe_t *); |
| static int ixgbe_chip_start(ixgbe_t *); |
| static void ixgbe_chip_stop(ixgbe_t *); |
| static int ixgbe_reset(ixgbe_t *); |
| static void ixgbe_tx_clean(ixgbe_t *); |
| static boolean_t ixgbe_tx_drain(ixgbe_t *); |
| static boolean_t ixgbe_rx_drain(ixgbe_t *); |
| static int ixgbe_alloc_rings(ixgbe_t *); |
| static void ixgbe_free_rings(ixgbe_t *); |
| static int ixgbe_alloc_rx_data(ixgbe_t *); |
| static void ixgbe_free_rx_data(ixgbe_t *); |
| static void ixgbe_setup_rings(ixgbe_t *); |
| static void ixgbe_setup_rx(ixgbe_t *); |
| static void ixgbe_setup_tx(ixgbe_t *); |
| static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); |
| static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); |
| static void ixgbe_setup_rss(ixgbe_t *); |
| static void ixgbe_setup_vmdq(ixgbe_t *); |
| static void ixgbe_setup_vmdq_rss(ixgbe_t *); |
| static void ixgbe_setup_rss_table(ixgbe_t *); |
| static void ixgbe_init_unicst(ixgbe_t *); |
| static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); |
| static void ixgbe_setup_multicst(ixgbe_t *); |
| static void ixgbe_get_hw_state(ixgbe_t *); |
| static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); |
| static void ixgbe_get_conf(ixgbe_t *); |
| static void ixgbe_init_params(ixgbe_t *); |
| static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); |
| static void ixgbe_driver_link_check(ixgbe_t *); |
| static void ixgbe_sfp_check(void *); |
| static void ixgbe_overtemp_check(void *); |
| static void ixgbe_phy_check(void *); |
| static void ixgbe_link_timer(void *); |
| static void ixgbe_local_timer(void *); |
| static void ixgbe_arm_watchdog_timer(ixgbe_t *); |
| static void ixgbe_restart_watchdog_timer(ixgbe_t *); |
| static void ixgbe_disable_adapter_interrupts(ixgbe_t *); |
| static void ixgbe_enable_adapter_interrupts(ixgbe_t *); |
| static boolean_t is_valid_mac_addr(uint8_t *); |
| static boolean_t ixgbe_stall_check(ixgbe_t *); |
| static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); |
| static void ixgbe_set_internal_mac_loopback(ixgbe_t *); |
| static boolean_t ixgbe_find_mac_address(ixgbe_t *); |
| static int ixgbe_alloc_intrs(ixgbe_t *); |
| static int ixgbe_alloc_intr_handles(ixgbe_t *, int); |
| static int ixgbe_add_intr_handlers(ixgbe_t *); |
| static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); |
| static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); |
| static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); |
| static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); |
| static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); |
| static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); |
| static int ixgbe_map_intrs_to_vectors(ixgbe_t *); |
| static void ixgbe_setup_adapter_vector(ixgbe_t *); |
| static void ixgbe_rem_intr_handlers(ixgbe_t *); |
| static void ixgbe_rem_intrs(ixgbe_t *); |
| static int ixgbe_enable_intrs(ixgbe_t *); |
| static int ixgbe_disable_intrs(ixgbe_t *); |
| static uint_t ixgbe_intr_legacy(void *, void *); |
| static uint_t ixgbe_intr_msi(void *, void *); |
| static uint_t ixgbe_intr_msix(void *, void *); |
| static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); |
| static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); |
| static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); |
| static void ixgbe_get_driver_control(struct ixgbe_hw *); |
| static int ixgbe_addmac(void *, const uint8_t *); |
| static int ixgbe_remmac(void *, const uint8_t *); |
| static void ixgbe_release_driver_control(struct ixgbe_hw *); |
| |
| static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); |
| static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); |
| static int ixgbe_resume(dev_info_t *); |
| static int ixgbe_suspend(dev_info_t *); |
| static int ixgbe_quiesce(dev_info_t *); |
| static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); |
| static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); |
| static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); |
| static int ixgbe_intr_cb_register(ixgbe_t *); |
| static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); |
| |
| static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, |
| const void *impl_data); |
| static void ixgbe_fm_init(ixgbe_t *); |
| static void ixgbe_fm_fini(ixgbe_t *); |
| |
| char *ixgbe_priv_props[] = { |
| "_tx_copy_thresh", |
| "_tx_recycle_thresh", |
| "_tx_overload_thresh", |
| "_tx_resched_thresh", |
| "_rx_copy_thresh", |
| "_rx_limit_per_intr", |
| "_intr_throttling", |
| "_adv_pause_cap", |
| "_adv_asym_pause_cap", |
| NULL |
| }; |
| |
| #define IXGBE_MAX_PRIV_PROPS \ |
| (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) |
| |
| static struct cb_ops ixgbe_cb_ops = { |
| nulldev, /* cb_open */ |
| nulldev, /* cb_close */ |
| nodev, /* cb_strategy */ |
| nodev, /* cb_print */ |
| nodev, /* cb_dump */ |
| nodev, /* cb_read */ |
| nodev, /* cb_write */ |
| nodev, /* cb_ioctl */ |
| nodev, /* cb_devmap */ |
| nodev, /* cb_mmap */ |
| nodev, /* cb_segmap */ |
| nochpoll, /* cb_chpoll */ |
| ddi_prop_op, /* cb_prop_op */ |
| NULL, /* cb_stream */ |
| D_MP | D_HOTPLUG, /* cb_flag */ |
| CB_REV, /* cb_rev */ |
| nodev, /* cb_aread */ |
| nodev /* cb_awrite */ |
| }; |
| |
| static struct dev_ops ixgbe_dev_ops = { |
| DEVO_REV, /* devo_rev */ |
| 0, /* devo_refcnt */ |
| NULL, /* devo_getinfo */ |
| nulldev, /* devo_identify */ |
| nulldev, /* devo_probe */ |
| ixgbe_attach, /* devo_attach */ |
| ixgbe_detach, /* devo_detach */ |
| nodev, /* devo_reset */ |
| &ixgbe_cb_ops, /* devo_cb_ops */ |
| NULL, /* devo_bus_ops */ |
| ddi_power, /* devo_power */ |
| ixgbe_quiesce, /* devo_quiesce */ |
| }; |
| |
| static struct modldrv ixgbe_modldrv = { |
| &mod_driverops, /* Type of module. This one is a driver */ |
| ixgbe_ident, /* Discription string */ |
| &ixgbe_dev_ops /* driver ops */ |
| }; |
| |
| static struct modlinkage ixgbe_modlinkage = { |
| MODREV_1, &ixgbe_modldrv, NULL |
| }; |
| |
| /* |
| * Access attributes for register mapping |
| */ |
| ddi_device_acc_attr_t ixgbe_regs_acc_attr = { |
| DDI_DEVICE_ATTR_V1, |
| DDI_STRUCTURE_LE_ACC, |
| DDI_STRICTORDER_ACC, |
| DDI_FLAGERR_ACC |
| }; |
| |
| /* |
| * Loopback property |
| */ |
| static lb_property_t lb_normal = { |
| normal, "normal", IXGBE_LB_NONE |
| }; |
| |
| static lb_property_t lb_mac = { |
| internal, "MAC", IXGBE_LB_INTERNAL_MAC |
| }; |
| |
| static lb_property_t lb_external = { |
| external, "External", IXGBE_LB_EXTERNAL |
| }; |
| |
| #define IXGBE_M_CALLBACK_FLAGS \ |
| (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) |
| |
| static mac_callbacks_t ixgbe_m_callbacks = { |
| IXGBE_M_CALLBACK_FLAGS, |
| ixgbe_m_stat, |
| ixgbe_m_start, |
| ixgbe_m_stop, |
| ixgbe_m_promisc, |
| ixgbe_m_multicst, |
| NULL, |
| NULL, |
| NULL, |
| ixgbe_m_ioctl, |
| ixgbe_m_getcapab, |
| NULL, |
| NULL, |
| ixgbe_m_setprop, |
| ixgbe_m_getprop, |
| ixgbe_m_propinfo |
| }; |
| |
| /* |
| * Initialize capabilities of each supported adapter type |
| */ |
| static adapter_info_t ixgbe_82598eb_cap = { |
| 64, /* maximum number of rx queues */ |
| 1, /* minimum number of rx queues */ |
| 64, /* default number of rx queues */ |
| 16, /* maximum number of rx groups */ |
| 1, /* minimum number of rx groups */ |
| 1, /* default number of rx groups */ |
| 32, /* maximum number of tx queues */ |
| 1, /* minimum number of tx queues */ |
| 8, /* default number of tx queues */ |
| 16366, /* maximum MTU size */ |
| 0xFFFF, /* maximum interrupt throttle rate */ |
| 0, /* minimum interrupt throttle rate */ |
| 200, /* default interrupt throttle rate */ |
| 18, /* maximum total msix vectors */ |
| 16, /* maximum number of ring vectors */ |
| 2, /* maximum number of other vectors */ |
| IXGBE_EICR_LSC, /* "other" interrupt types handled */ |
| 0, /* "other" interrupt types enable mask */ |
| (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ |
| | IXGBE_FLAG_RSS_CAPABLE |
| | IXGBE_FLAG_VMDQ_CAPABLE) |
| }; |
| |
| static adapter_info_t ixgbe_82599eb_cap = { |
| 128, /* maximum number of rx queues */ |
| 1, /* minimum number of rx queues */ |
| 128, /* default number of rx queues */ |
| 64, /* maximum number of rx groups */ |
| 1, /* minimum number of rx groups */ |
| 1, /* default number of rx groups */ |
| 128, /* maximum number of tx queues */ |
| 1, /* minimum number of tx queues */ |
| 8, /* default number of tx queues */ |
| 15500, /* maximum MTU size */ |
| 0xFF8, /* maximum interrupt throttle rate */ |
| 0, /* minimum interrupt throttle rate */ |
| 200, /* default interrupt throttle rate */ |
| 64, /* maximum total msix vectors */ |
| 16, /* maximum number of ring vectors */ |
| 2, /* maximum number of other vectors */ |
| (IXGBE_EICR_LSC |
| | IXGBE_EICR_GPI_SDP1 |
| | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ |
| |
| (IXGBE_SDP1_GPIEN |
| | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ |
| |
| (IXGBE_FLAG_DCA_CAPABLE |
| | IXGBE_FLAG_RSS_CAPABLE |
| | IXGBE_FLAG_VMDQ_CAPABLE |
| | IXGBE_FLAG_RSC_CAPABLE |
| | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ |
| }; |
| |
| static adapter_info_t ixgbe_X540_cap = { |
| 128, /* maximum number of rx queues */ |
| 1, /* minimum number of rx queues */ |
| 128, /* default number of rx queues */ |
| 64, /* maximum number of rx groups */ |
| 1, /* minimum number of rx groups */ |
| 1, /* default number of rx groups */ |
| 128, /* maximum number of tx queues */ |
| 1, /* minimum number of tx queues */ |
| 8, /* default number of tx queues */ |
| 15500, /* maximum MTU size */ |
| 0xFF8, /* maximum interrupt throttle rate */ |
| 0, /* minimum interrupt throttle rate */ |
| 200, /* default interrupt throttle rate */ |
| 64, /* maximum total msix vectors */ |
| 16, /* maximum number of ring vectors */ |
| 2, /* maximum number of other vectors */ |
| (IXGBE_EICR_LSC |
| | IXGBE_EICR_GPI_SDP1_X540 |
| | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ |
| |
| (IXGBE_SDP1_GPIEN_X540 |
| | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ |
| |
| (IXGBE_FLAG_DCA_CAPABLE |
| | IXGBE_FLAG_RSS_CAPABLE |
| | IXGBE_FLAG_VMDQ_CAPABLE |
| | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ |
| }; |
| |
| static adapter_info_t ixgbe_X550_cap = { |
| 128, /* maximum number of rx queues */ |
| 1, /* minimum number of rx queues */ |
| 128, /* default number of rx queues */ |
| 64, /* maximum number of rx groups */ |
| 1, /* minimum number of rx groups */ |
| 1, /* default number of rx groups */ |
| 128, /* maximum number of tx queues */ |
| 1, /* minimum number of tx queues */ |
| 8, /* default number of tx queues */ |
| 15500, /* maximum MTU size */ |
| 0xFF8, /* maximum interrupt throttle rate */ |
| 0, /* minimum interrupt throttle rate */ |
| 0x200, /* default interrupt throttle rate */ |
| 64, /* maximum total msix vectors */ |
| 16, /* maximum number of ring vectors */ |
| 2, /* maximum number of other vectors */ |
| IXGBE_EICR_LSC, /* "other" interrupt types handled */ |
| 0, /* "other" interrupt types enable mask */ |
| (IXGBE_FLAG_RSS_CAPABLE |
| | IXGBE_FLAG_VMDQ_CAPABLE |
| | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ |
| }; |
| |
| /* |
| * Module Initialization Functions. |
| */ |
| |
| int |
| _init(void) |
| { |
| int status; |
| |
| mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); |
| |
| status = mod_install(&ixgbe_modlinkage); |
| |
| if (status != DDI_SUCCESS) { |
| mac_fini_ops(&ixgbe_dev_ops); |
| } |
| |
| return (status); |
| } |
| |
| int |
| _fini(void) |
| { |
| int status; |
| |
| status = mod_remove(&ixgbe_modlinkage); |
| |
| if (status == DDI_SUCCESS) { |
| mac_fini_ops(&ixgbe_dev_ops); |
| } |
| |
| return (status); |
| } |
| |
| int |
| _info(struct modinfo *modinfop) |
| { |
| int status; |
| |
| status = mod_info(&ixgbe_modlinkage, modinfop); |
| |
| return (status); |
| } |
| |
| /* |
| * ixgbe_attach - Driver attach. |
| * |
| * This function is the device specific initialization entry |
| * point. This entry point is required and must be written. |
| * The DDI_ATTACH command must be provided in the attach entry |
| * point. When attach() is called with cmd set to DDI_ATTACH, |
| * all normal kernel services (such as kmem_alloc(9F)) are |
| * available for use by the driver. |
| * |
| * The attach() function will be called once for each instance |
| * of the device on the system with cmd set to DDI_ATTACH. |
| * Until attach() succeeds, the only driver entry points which |
| * may be called are open(9E) and getinfo(9E). |
| */ |
| static int |
| ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) |
| { |
| ixgbe_t *ixgbe; |
| struct ixgbe_osdep *osdep; |
| struct ixgbe_hw *hw; |
| int instance; |
| char taskqname[32]; |
| |
| /* |
| * Check the command and perform corresponding operations |
| */ |
| switch (cmd) { |
| default: |
| return (DDI_FAILURE); |
| |
| case DDI_RESUME: |
| return (ixgbe_resume(devinfo)); |
| |
| case DDI_ATTACH: |
| break; |
| } |
| |
| /* Get the device instance */ |
| instance = ddi_get_instance(devinfo); |
| |
| /* Allocate memory for the instance data structure */ |
| ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); |
| |
| ixgbe->dip = devinfo; |
| ixgbe->instance = instance; |
| |
| hw = &ixgbe->hw; |
| osdep = &ixgbe->osdep; |
| hw->back = osdep; |
| osdep->ixgbe = ixgbe; |
| |
| /* Attach the instance pointer to the dev_info data structure */ |
| ddi_set_driver_private(devinfo, ixgbe); |
| |
| /* |
| * Initialize for FMA support |
| */ |
| ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, |
| 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | |
| DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); |
| ixgbe_fm_init(ixgbe); |
| ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; |
| |
| /* |
| * Map PCI config space registers |
| */ |
| if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to map PCI configurations"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; |
| |
| /* |
| * Identify the chipset family |
| */ |
| if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to identify hardware"); |
| goto attach_fail; |
| } |
| |
| /* |
| * Map device registers |
| */ |
| if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to map device registers"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; |
| |
| /* |
| * Initialize driver parameters |
| */ |
| ixgbe_init_properties(ixgbe); |
| ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; |
| |
| /* |
| * Register interrupt callback |
| */ |
| if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to register interrupt callback"); |
| goto attach_fail; |
| } |
| |
| /* |
| * Allocate interrupts |
| */ |
| if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to allocate interrupts"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; |
| |
| /* |
| * Allocate rx/tx rings based on the ring numbers. |
| * The actual numbers of rx/tx rings are decided by the number of |
| * allocated interrupt vectors, so we should allocate the rings after |
| * interrupts are allocated. |
| */ |
| if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; |
| |
| /* |
| * Map rings to interrupt vectors |
| */ |
| if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); |
| goto attach_fail; |
| } |
| |
| /* |
| * Add interrupt handlers |
| */ |
| if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to add interrupt handlers"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; |
| |
| /* |
| * Create a taskq for sfp-change |
| */ |
| (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); |
| if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, |
| 1, TASKQ_DEFAULTPRI, 0)) == NULL) { |
| ixgbe_error(ixgbe, "sfp_taskq create failed"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; |
| |
| /* |
| * Create a taskq for over-temp |
| */ |
| (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); |
| if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, |
| 1, TASKQ_DEFAULTPRI, 0)) == NULL) { |
| ixgbe_error(ixgbe, "overtemp_taskq create failed"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; |
| |
| /* |
| * Create a taskq for processing external PHY interrupts |
| */ |
| (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); |
| if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, |
| 1, TASKQ_DEFAULTPRI, 0)) == NULL) { |
| ixgbe_error(ixgbe, "phy_taskq create failed"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; |
| |
| /* |
| * Initialize driver parameters |
| */ |
| if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to initialize driver settings"); |
| goto attach_fail; |
| } |
| |
| /* |
| * Initialize mutexes for this device. |
| * Do this before enabling the interrupt handler and |
| * register the softint to avoid the condition where |
| * interrupt handler can try using uninitialized mutex. |
| */ |
| ixgbe_init_locks(ixgbe); |
| ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; |
| |
| /* |
| * Initialize chipset hardware |
| */ |
| if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to initialize adapter"); |
| goto attach_fail; |
| } |
| ixgbe->link_check_complete = B_FALSE; |
| ixgbe->link_check_hrtime = gethrtime() + |
| (IXGBE_LINK_UP_TIME * 100000000ULL); |
| ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; |
| |
| if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); |
| goto attach_fail; |
| } |
| |
| /* |
| * Initialize adapter capabilities |
| */ |
| ixgbe_init_params(ixgbe); |
| |
| /* |
| * Initialize statistics |
| */ |
| if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to initialize statistics"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; |
| |
| /* |
| * Register the driver to the MAC |
| */ |
| if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to register MAC"); |
| goto attach_fail; |
| } |
| mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); |
| ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; |
| |
| ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, |
| IXGBE_CYCLIC_PERIOD, DDI_IPL_0); |
| if (ixgbe->periodic_id == 0) { |
| ixgbe_error(ixgbe, "Failed to add the link check timer"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; |
| |
| /* |
| * Now that mutex locks are initialized, and the chip is also |
| * initialized, enable interrupts. |
| */ |
| if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); |
| goto attach_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; |
| |
| ixgbe_log(ixgbe, "%s", ixgbe_ident); |
| atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); |
| |
| return (DDI_SUCCESS); |
| |
| attach_fail: |
| ixgbe_unconfigure(devinfo, ixgbe); |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * ixgbe_detach - Driver detach. |
| * |
| * The detach() function is the complement of the attach routine. |
| * If cmd is set to DDI_DETACH, detach() is used to remove the |
| * state associated with a given instance of a device node |
| * prior to the removal of that instance from the system. |
| * |
| * The detach() function will be called once for each instance |
| * of the device for which there has been a successful attach() |
| * once there are no longer any opens on the device. |
| * |
| * Interrupts routine are disabled, All memory allocated by this |
| * driver are freed. |
| */ |
| static int |
| ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) |
| { |
| ixgbe_t *ixgbe; |
| |
| /* |
| * Check detach command |
| */ |
| switch (cmd) { |
| default: |
| return (DDI_FAILURE); |
| |
| case DDI_SUSPEND: |
| return (ixgbe_suspend(devinfo)); |
| |
| case DDI_DETACH: |
| break; |
| } |
| |
| /* |
| * Get the pointer to the driver private data structure |
| */ |
| ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); |
| if (ixgbe == NULL) |
| return (DDI_FAILURE); |
| |
| /* |
| * If the device is still running, it needs to be stopped first. |
| * This check is necessary because under some specific circumstances, |
| * the detach routine can be called without stopping the interface |
| * first. |
| */ |
| if (ixgbe->ixgbe_state & IXGBE_STARTED) { |
| atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); |
| mutex_enter(&ixgbe->gen_lock); |
| ixgbe_stop(ixgbe, B_TRUE); |
| mutex_exit(&ixgbe->gen_lock); |
| /* Disable and stop the watchdog timer */ |
| ixgbe_disable_watchdog_timer(ixgbe); |
| } |
| |
| /* |
| * Check if there are still rx buffers held by the upper layer. |
| * If so, fail the detach. |
| */ |
| if (!ixgbe_rx_drain(ixgbe)) |
| return (DDI_FAILURE); |
| |
| /* |
| * Do the remaining unconfigure routines |
| */ |
| ixgbe_unconfigure(devinfo, ixgbe); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| /* |
| * quiesce(9E) entry point. |
| * |
| * This function is called when the system is single-threaded at high |
| * PIL with preemption disabled. Therefore, this function must not be |
| * blocked. |
| * |
| * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. |
| * DDI_FAILURE indicates an error condition and should almost never happen. |
| */ |
| static int |
| ixgbe_quiesce(dev_info_t *devinfo) |
| { |
| ixgbe_t *ixgbe; |
| struct ixgbe_hw *hw; |
| |
| ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); |
| |
| if (ixgbe == NULL) |
| return (DDI_FAILURE); |
| |
| hw = &ixgbe->hw; |
| |
| /* |
| * Disable the adapter interrupts |
| */ |
| ixgbe_disable_adapter_interrupts(ixgbe); |
| |
| /* |
| * Tell firmware driver is no longer in control |
| */ |
| ixgbe_release_driver_control(hw); |
| |
| /* |
| * Reset the chipset |
| */ |
| (void) ixgbe_reset_hw(hw); |
| |
| /* |
| * Reset PHY |
| */ |
| (void) ixgbe_reset_phy(hw); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static void |
| ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) |
| { |
| /* |
| * Disable interrupt |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { |
| (void) ixgbe_disable_intrs(ixgbe); |
| } |
| |
| /* |
| * remove the link check timer |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { |
| if (ixgbe->periodic_id != NULL) { |
| ddi_periodic_delete(ixgbe->periodic_id); |
| ixgbe->periodic_id = NULL; |
| } |
| } |
| |
| /* |
| * Unregister MAC |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { |
| (void) mac_unregister(ixgbe->mac_hdl); |
| } |
| |
| /* |
| * Free statistics |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { |
| kstat_delete((kstat_t *)ixgbe->ixgbe_ks); |
| } |
| |
| /* |
| * Remove interrupt handlers |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { |
| ixgbe_rem_intr_handlers(ixgbe); |
| } |
| |
| /* |
| * Remove taskq for sfp-status-change |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { |
| ddi_taskq_destroy(ixgbe->sfp_taskq); |
| } |
| |
| /* |
| * Remove taskq for over-temp |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { |
| ddi_taskq_destroy(ixgbe->overtemp_taskq); |
| } |
| |
| /* |
| * Remove taskq for external PHYs |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { |
| ddi_taskq_destroy(ixgbe->phy_taskq); |
| } |
| |
| /* |
| * Remove interrupts |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { |
| ixgbe_rem_intrs(ixgbe); |
| } |
| |
| /* |
| * Unregister interrupt callback handler |
| */ |
| if (ixgbe->cb_hdl != NULL) { |
| (void) ddi_cb_unregister(ixgbe->cb_hdl); |
| } |
| |
| /* |
| * Remove driver properties |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { |
| (void) ddi_prop_remove_all(devinfo); |
| } |
| |
| /* |
| * Stop the chipset |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { |
| mutex_enter(&ixgbe->gen_lock); |
| ixgbe_chip_stop(ixgbe); |
| mutex_exit(&ixgbe->gen_lock); |
| } |
| |
| /* |
| * Free register handle |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { |
| if (ixgbe->osdep.reg_handle != NULL) |
| ddi_regs_map_free(&ixgbe->osdep.reg_handle); |
| } |
| |
| /* |
| * Free PCI config handle |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { |
| if (ixgbe->osdep.cfg_handle != NULL) |
| pci_config_teardown(&ixgbe->osdep.cfg_handle); |
| } |
| |
| /* |
| * Free locks |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { |
| ixgbe_destroy_locks(ixgbe); |
| } |
| |
| /* |
| * Free the rx/tx rings |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { |
| ixgbe_free_rings(ixgbe); |
| } |
| |
| /* |
| * Unregister FMA capabilities |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { |
| ixgbe_fm_fini(ixgbe); |
| } |
| |
| /* |
| * Free the driver data structure |
| */ |
| kmem_free(ixgbe, sizeof (ixgbe_t)); |
| |
| ddi_set_driver_private(devinfo, NULL); |
| } |
| |
| /* |
| * ixgbe_register_mac - Register the driver and its function pointers with |
| * the GLD interface. |
| */ |
| static int |
| ixgbe_register_mac(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| mac_register_t *mac; |
| int status; |
| |
| if ((mac = mac_alloc(MAC_VERSION)) == NULL) |
| return (IXGBE_FAILURE); |
| |
| mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; |
| mac->m_driver = ixgbe; |
| mac->m_dip = ixgbe->dip; |
| mac->m_src_addr = hw->mac.addr; |
| mac->m_callbacks = &ixgbe_m_callbacks; |
| mac->m_min_sdu = 0; |
| mac->m_max_sdu = ixgbe->default_mtu; |
| mac->m_margin = VLAN_TAGSZ; |
| mac->m_priv_props = ixgbe_priv_props; |
| mac->m_v12n = MAC_VIRT_LEVEL1; |
| |
| status = mac_register(mac, &ixgbe->mac_hdl); |
| |
| mac_free(mac); |
| |
| return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); |
| } |
| |
| /* |
| * ixgbe_identify_hardware - Identify the type of the chipset. |
| */ |
| static int |
| ixgbe_identify_hardware(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| struct ixgbe_osdep *osdep = &ixgbe->osdep; |
| |
| /* |
| * Get the device id |
| */ |
| hw->vendor_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); |
| hw->device_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); |
| hw->revision_id = |
| pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); |
| hw->subsystem_device_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); |
| hw->subsystem_vendor_id = |
| pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); |
| |
| /* |
| * Set the mac type of the adapter based on the device id |
| */ |
| if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Install adapter capabilities |
| */ |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); |
| ixgbe->capab = &ixgbe_82598eb_cap; |
| |
| if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { |
| ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
| ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; |
| ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; |
| } |
| break; |
| |
| case ixgbe_mac_82599EB: |
| IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); |
| ixgbe->capab = &ixgbe_82599eb_cap; |
| |
| if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { |
| ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; |
| ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; |
| ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; |
| } |
| break; |
| |
| case ixgbe_mac_X540: |
| IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); |
| ixgbe->capab = &ixgbe_X540_cap; |
| /* |
| * For now, X540 is all set in its capab structure. |
| * As other X540 variants show up, things can change here. |
| */ |
| break; |
| |
| case ixgbe_mac_X550: |
| case ixgbe_mac_X550EM_x: |
| IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); |
| ixgbe->capab = &ixgbe_X550_cap; |
| |
| if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) |
| ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; |
| |
| /* |
| * Link detection on X552 SFP+ and X552/X557-AT |
| */ |
| if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || |
| hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { |
| ixgbe->capab->other_intr |= |
| IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); |
| ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; |
| } |
| break; |
| |
| default: |
| IXGBE_DEBUGLOG_1(ixgbe, |
| "adapter not supported in ixgbe_identify_hardware(): %d\n", |
| hw->mac.type); |
| return (IXGBE_FAILURE); |
| } |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_regs_map - Map the device registers. |
| * |
| */ |
| static int |
| ixgbe_regs_map(ixgbe_t *ixgbe) |
| { |
| dev_info_t *devinfo = ixgbe->dip; |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| struct ixgbe_osdep *osdep = &ixgbe->osdep; |
| off_t mem_size; |
| |
| /* |
| * First get the size of device registers to be mapped. |
| */ |
| if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) |
| != DDI_SUCCESS) { |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Call ddi_regs_map_setup() to map registers |
| */ |
| if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, |
| (caddr_t *)&hw->hw_addr, 0, |
| mem_size, &ixgbe_regs_acc_attr, |
| &osdep->reg_handle)) != DDI_SUCCESS) { |
| return (IXGBE_FAILURE); |
| } |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_init_properties - Initialize driver properties. |
| */ |
| static void |
| ixgbe_init_properties(ixgbe_t *ixgbe) |
| { |
| /* |
| * Get conf file properties, including link settings |
| * jumbo frames, ring number, descriptor number, etc. |
| */ |
| ixgbe_get_conf(ixgbe); |
| } |
| |
| /* |
| * ixgbe_init_driver_settings - Initialize driver settings. |
| * |
| * The settings include hardware function pointers, bus information, |
| * rx/tx rings settings, link state, and any other parameters that |
| * need to be setup during driver initialization. |
| */ |
| static int |
| ixgbe_init_driver_settings(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| dev_info_t *devinfo = ixgbe->dip; |
| ixgbe_rx_ring_t *rx_ring; |
| ixgbe_rx_group_t *rx_group; |
| ixgbe_tx_ring_t *tx_ring; |
| uint32_t rx_size; |
| uint32_t tx_size; |
| uint32_t ring_per_group; |
| int i; |
| |
| /* |
| * Initialize chipset specific hardware function pointers |
| */ |
| if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Get the system page size |
| */ |
| ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); |
| |
| /* |
| * Set rx buffer size |
| * |
| * The IP header alignment room is counted in the calculation. |
| * The rx buffer size is in unit of 1K that is required by the |
| * chipset hardware. |
| */ |
| rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; |
| ixgbe->rx_buf_size = ((rx_size >> 10) + |
| ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; |
| |
| /* |
| * Set tx buffer size |
| */ |
| tx_size = ixgbe->max_frame_size; |
| ixgbe->tx_buf_size = ((tx_size >> 10) + |
| ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; |
| |
| /* |
| * Initialize rx/tx rings/groups parameters |
| */ |
| ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| rx_ring->index = i; |
| rx_ring->ixgbe = ixgbe; |
| rx_ring->group_index = i / ring_per_group; |
| rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); |
| } |
| |
| for (i = 0; i < ixgbe->num_rx_groups; i++) { |
| rx_group = &ixgbe->rx_groups[i]; |
| rx_group->index = i; |
| rx_group->ixgbe = ixgbe; |
| } |
| |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| tx_ring->index = i; |
| tx_ring->ixgbe = ixgbe; |
| if (ixgbe->tx_head_wb_enable) |
| tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; |
| else |
| tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; |
| |
| tx_ring->ring_size = ixgbe->tx_ring_size; |
| tx_ring->free_list_size = ixgbe->tx_ring_size + |
| (ixgbe->tx_ring_size >> 1); |
| } |
| |
| /* |
| * Initialize values of interrupt throttling rate |
| */ |
| for (i = 1; i < MAX_INTR_VECTOR; i++) |
| ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; |
| |
| /* |
| * The initial link state should be "unknown" |
| */ |
| ixgbe->link_state = LINK_STATE_UNKNOWN; |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_init_locks - Initialize locks. |
| */ |
| static void |
| ixgbe_init_locks(ixgbe_t *ixgbe) |
| { |
| ixgbe_rx_ring_t *rx_ring; |
| ixgbe_tx_ring_t *tx_ring; |
| int i; |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| mutex_init(&rx_ring->rx_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| } |
| |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| mutex_init(&tx_ring->tx_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| mutex_init(&tx_ring->recycle_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| mutex_init(&tx_ring->tcb_head_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| mutex_init(&tx_ring->tcb_tail_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| } |
| |
| mutex_init(&ixgbe->gen_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| |
| mutex_init(&ixgbe->watchdog_lock, NULL, |
| MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); |
| } |
| |
| /* |
| * ixgbe_destroy_locks - Destroy locks. |
| */ |
| static void |
| ixgbe_destroy_locks(ixgbe_t *ixgbe) |
| { |
| ixgbe_rx_ring_t *rx_ring; |
| ixgbe_tx_ring_t *tx_ring; |
| int i; |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| mutex_destroy(&rx_ring->rx_lock); |
| } |
| |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| mutex_destroy(&tx_ring->tx_lock); |
| mutex_destroy(&tx_ring->recycle_lock); |
| mutex_destroy(&tx_ring->tcb_head_lock); |
| mutex_destroy(&tx_ring->tcb_tail_lock); |
| } |
| |
| mutex_destroy(&ixgbe->gen_lock); |
| mutex_destroy(&ixgbe->watchdog_lock); |
| } |
| |
| /* |
| * We need to try and determine which LED index in hardware corresponds to the |
| * link/activity LED. This is the one that'll be overwritten when we perform |
| * GLDv3 LED activity. |
| */ |
| static void |
| ixgbe_led_init(ixgbe_t *ixgbe) |
| { |
| uint32_t reg, i; |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| |
| reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); |
| for (i = 0; i < 4; i++) { |
| if (((reg >> IXGBE_LED_MODE_SHIFT(i)) & |
| IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { |
| ixgbe->ixgbe_led_index = i; |
| return; |
| } |
| } |
| |
| /* |
| * If we couldn't determine this, we use the default for various MACs |
| * based on information Intel has inserted into other drivers over the |
| * years. Note, when we have support for the X553 which should add the |
| * ixgbe_x550_em_a mac type, that should be at index 0. |
| */ |
| switch (hw->mac.type) { |
| case ixgbe_mac_X550EM_x: |
| ixgbe->ixgbe_led_index = 1; |
| break; |
| default: |
| ixgbe->ixgbe_led_index = 2; |
| break; |
| } |
| } |
| |
| static int |
| ixgbe_resume(dev_info_t *devinfo) |
| { |
| ixgbe_t *ixgbe; |
| int i; |
| |
| ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); |
| if (ixgbe == NULL) |
| return (DDI_FAILURE); |
| |
| mutex_enter(&ixgbe->gen_lock); |
| |
| if (ixgbe->ixgbe_state & IXGBE_STARTED) { |
| if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { |
| mutex_exit(&ixgbe->gen_lock); |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * Enable and start the watchdog timer |
| */ |
| ixgbe_enable_watchdog_timer(ixgbe); |
| } |
| |
| atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); |
| |
| if (ixgbe->ixgbe_state & IXGBE_STARTED) { |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| mac_tx_ring_update(ixgbe->mac_hdl, |
| ixgbe->tx_rings[i].ring_handle); |
| } |
| } |
| |
| mutex_exit(&ixgbe->gen_lock); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| static int |
| ixgbe_suspend(dev_info_t *devinfo) |
| { |
| ixgbe_t *ixgbe; |
| |
| ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); |
| if (ixgbe == NULL) |
| return (DDI_FAILURE); |
| |
| mutex_enter(&ixgbe->gen_lock); |
| |
| atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); |
| if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { |
| mutex_exit(&ixgbe->gen_lock); |
| return (DDI_SUCCESS); |
| } |
| ixgbe_stop(ixgbe, B_FALSE); |
| |
| mutex_exit(&ixgbe->gen_lock); |
| |
| /* |
| * Disable and stop the watchdog timer |
| */ |
| ixgbe_disable_watchdog_timer(ixgbe); |
| |
| return (DDI_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_init - Initialize the device. |
| */ |
| static int |
| ixgbe_init(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| u8 pbanum[IXGBE_PBANUM_LENGTH]; |
| int rv; |
| |
| mutex_enter(&ixgbe->gen_lock); |
| |
| /* |
| * Configure/Initialize hardware |
| */ |
| rv = ixgbe_init_hw(hw); |
| if (rv != IXGBE_SUCCESS) { |
| switch (rv) { |
| |
| /* |
| * The first three errors are not prohibitive to us progressing |
| * further, and are maily advisory in nature. In the case of a |
| * SFP module not being present or not deemed supported by the |
| * common code, we adivse the operator of this fact but carry on |
| * instead of failing hard, as SFPs can be inserted or replaced |
| * while the driver is running. In the case of a unknown error, |
| * we fail-hard, logging the reason and emitting a FMA event. |
| */ |
| case IXGBE_ERR_EEPROM_VERSION: |
| ixgbe_error(ixgbe, |
| "This Intel 10Gb Ethernet device is pre-release and" |
| " contains outdated firmware. Please contact your" |
| " hardware vendor for a replacement."); |
| break; |
| case IXGBE_ERR_SFP_NOT_PRESENT: |
| ixgbe_error(ixgbe, |
| "No SFP+ module detected on this interface. Please " |
| "install a supported SFP+ module for this " |
| "interface to become operational."); |
| break; |
| case IXGBE_ERR_SFP_NOT_SUPPORTED: |
| ixgbe_error(ixgbe, |
| "Unsupported SFP+ module detected. Please replace " |
| "it with a supported SFP+ module per Intel " |
| "documentation, or bypass this check with " |
| "allow_unsupported_sfp=1 in ixgbe.conf."); |
| break; |
| default: |
| ixgbe_error(ixgbe, |
| "Failed to initialize hardware. ixgbe_init_hw " |
| "returned %d", rv); |
| ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| } |
| |
| /* |
| * Need to init eeprom before validating the checksum. |
| */ |
| if (ixgbe_init_eeprom_params(hw) < 0) { |
| ixgbe_error(ixgbe, |
| "Unable to intitialize the eeprom interface."); |
| ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* |
| * NVM validation |
| */ |
| if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { |
| /* |
| * Some PCI-E parts fail the first check due to |
| * the link being in sleep state. Call it again, |
| * if it fails a second time it's a real issue. |
| */ |
| if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { |
| ixgbe_error(ixgbe, |
| "Invalid NVM checksum. Please contact " |
| "the vendor to update the NVM."); |
| ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| } |
| |
| /* |
| * Setup default flow control thresholds - enable/disable |
| * & flow control type is controlled by ixgbe.conf |
| */ |
| hw->fc.high_water[0] = DEFAULT_FCRTH; |
| hw->fc.low_water[0] = DEFAULT_FCRTL; |
| hw->fc.pause_time = DEFAULT_FCPAUSE; |
| hw->fc.send_xon = B_TRUE; |
| |
| /* |
| * Initialize flow control |
| */ |
| (void) ixgbe_start_hw(hw); |
| |
| /* |
| * Initialize link settings |
| */ |
| (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); |
| |
| /* |
| * Initialize the chipset hardware |
| */ |
| if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); |
| goto init_fail; |
| } |
| |
| /* |
| * Read identifying information and place in devinfo. |
| */ |
| pbanum[0] = '\0'; |
| (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); |
| if (*pbanum != '\0') { |
| (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, |
| "printed-board-assembly", (char *)pbanum); |
| } |
| |
| /* |
| * Determine LED index. |
| */ |
| ixgbe_led_init(ixgbe); |
| |
| if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { |
| goto init_fail; |
| } |
| |
| mutex_exit(&ixgbe->gen_lock); |
| return (IXGBE_SUCCESS); |
| |
| init_fail: |
| /* |
| * Reset PHY |
| */ |
| (void) ixgbe_reset_phy(hw); |
| |
| mutex_exit(&ixgbe->gen_lock); |
| ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * ixgbe_chip_start - Initialize and start the chipset hardware. |
| */ |
| static int |
| ixgbe_chip_start(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| int i; |
| |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| /* |
| * Get the mac address |
| * This function should handle SPARC case correctly. |
| */ |
| if (!ixgbe_find_mac_address(ixgbe)) { |
| ixgbe_error(ixgbe, "Failed to get the mac address"); |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Validate the mac address |
| */ |
| (void) ixgbe_init_rx_addrs(hw); |
| if (!is_valid_mac_addr(hw->mac.addr)) { |
| ixgbe_error(ixgbe, "Invalid mac address"); |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Re-enable relaxed ordering for performance. It is disabled |
| * by default in the hardware init. |
| */ |
| if (ixgbe->relax_order_enable == B_TRUE) |
| ixgbe_enable_relaxed_ordering(hw); |
| |
| /* |
| * Setup adapter interrupt vectors |
| */ |
| ixgbe_setup_adapter_vector(ixgbe); |
| |
| /* |
| * Initialize unicast addresses. |
| */ |
| ixgbe_init_unicst(ixgbe); |
| |
| /* |
| * Setup and initialize the mctable structures. |
| */ |
| ixgbe_setup_multicst(ixgbe); |
| |
| /* |
| * Set interrupt throttling rate |
| */ |
| for (i = 0; i < ixgbe->intr_cnt; i++) { |
| IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); |
| } |
| |
| /* |
| * Disable Wake-on-LAN |
| */ |
| IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); |
| |
| /* |
| * Some adapters offer Energy Efficient Ethernet (EEE) support. |
| * Due to issues with EEE in e1000g/igb, we disable this by default |
| * as a precautionary measure. |
| * |
| * Currently, the only known adapter which supports EEE in the ixgbe |
| * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the |
| * first revision of it, as well as any X550 with MAC type 6 (non-EM) |
| */ |
| (void) ixgbe_setup_eee(hw, B_FALSE); |
| |
| /* |
| * Turn on any present SFP Tx laser |
| */ |
| ixgbe_enable_tx_laser(hw); |
| |
| /* |
| * Power on the PHY |
| */ |
| (void) ixgbe_set_phy_power(hw, B_TRUE); |
| |
| /* |
| * Save the state of the PHY |
| */ |
| ixgbe_get_hw_state(ixgbe); |
| |
| /* |
| * Make sure driver has control |
| */ |
| ixgbe_get_driver_control(hw); |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_chip_stop - Stop the chipset hardware |
| */ |
| static void |
| ixgbe_chip_stop(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| int rv; |
| |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| /* |
| * Stop interupt generation and disable Tx unit |
| */ |
| hw->adapter_stopped = B_FALSE; |
| (void) ixgbe_stop_adapter(hw); |
| |
| /* |
| * Reset the chipset |
| */ |
| (void) ixgbe_reset_hw(hw); |
| |
| /* |
| * Reset PHY |
| */ |
| (void) ixgbe_reset_phy(hw); |
| |
| /* |
| * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting |
| * the PHY while doing so. Else, just power down the PHY. |
| */ |
| if (hw->phy.ops.enter_lplu != NULL) { |
| hw->phy.reset_disable = B_TRUE; |
| rv = hw->phy.ops.enter_lplu(hw); |
| if (rv != IXGBE_SUCCESS) |
| ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); |
| hw->phy.reset_disable = B_FALSE; |
| } else { |
| (void) ixgbe_set_phy_power(hw, B_FALSE); |
| } |
| |
| /* |
| * Turn off any present SFP Tx laser |
| * Expected for health and safety reasons |
| */ |
| ixgbe_disable_tx_laser(hw); |
| |
| /* |
| * Tell firmware driver is no longer in control |
| */ |
| ixgbe_release_driver_control(hw); |
| |
| } |
| |
| /* |
| * ixgbe_reset - Reset the chipset and re-start the driver. |
| * |
| * It involves stopping and re-starting the chipset, |
| * and re-configuring the rx/tx rings. |
| */ |
| static int |
| ixgbe_reset(ixgbe_t *ixgbe) |
| { |
| int i; |
| |
| /* |
| * Disable and stop the watchdog timer |
| */ |
| ixgbe_disable_watchdog_timer(ixgbe); |
| |
| mutex_enter(&ixgbe->gen_lock); |
| |
| ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); |
| atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); |
| |
| ixgbe_stop(ixgbe, B_FALSE); |
| |
| if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { |
| mutex_exit(&ixgbe->gen_lock); |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * After resetting, need to recheck the link status. |
| */ |
| ixgbe->link_check_complete = B_FALSE; |
| ixgbe->link_check_hrtime = gethrtime() + |
| (IXGBE_LINK_UP_TIME * 100000000ULL); |
| |
| atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); |
| |
| if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| mac_tx_ring_update(ixgbe->mac_hdl, |
| ixgbe->tx_rings[i].ring_handle); |
| } |
| } |
| |
| mutex_exit(&ixgbe->gen_lock); |
| |
| /* |
| * Enable and start the watchdog timer |
| */ |
| ixgbe_enable_watchdog_timer(ixgbe); |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. |
| */ |
| static void |
| ixgbe_tx_clean(ixgbe_t *ixgbe) |
| { |
| ixgbe_tx_ring_t *tx_ring; |
| tx_control_block_t *tcb; |
| link_list_t pending_list; |
| uint32_t desc_num; |
| int i, j; |
| |
| LINK_LIST_INIT(&pending_list); |
| |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| |
| mutex_enter(&tx_ring->recycle_lock); |
| |
| /* |
| * Clean the pending tx data - the pending packets in the |
| * work_list that have no chances to be transmitted again. |
| * |
| * We must ensure the chipset is stopped or the link is down |
| * before cleaning the transmit packets. |
| */ |
| desc_num = 0; |
| for (j = 0; j < tx_ring->ring_size; j++) { |
| tcb = tx_ring->work_list[j]; |
| if (tcb != NULL) { |
| desc_num += tcb->desc_num; |
| |
| tx_ring->work_list[j] = NULL; |
| |
| ixgbe_free_tcb(tcb); |
| |
| LIST_PUSH_TAIL(&pending_list, &tcb->link); |
| } |
| } |
| |
| if (desc_num > 0) { |
| atomic_add_32(&tx_ring->tbd_free, desc_num); |
| ASSERT(tx_ring->tbd_free == tx_ring->ring_size); |
| |
| /* |
| * Reset the head and tail pointers of the tbd ring; |
| * Reset the writeback head if it's enable. |
| */ |
| tx_ring->tbd_head = 0; |
| tx_ring->tbd_tail = 0; |
| if (ixgbe->tx_head_wb_enable) |
| *tx_ring->tbd_head_wb = 0; |
| |
| IXGBE_WRITE_REG(&ixgbe->hw, |
| IXGBE_TDH(tx_ring->index), 0); |
| IXGBE_WRITE_REG(&ixgbe->hw, |
| IXGBE_TDT(tx_ring->index), 0); |
| } |
| |
| mutex_exit(&tx_ring->recycle_lock); |
| |
| /* |
| * Add the tx control blocks in the pending list to |
| * the free list. |
| */ |
| ixgbe_put_free_list(tx_ring, &pending_list); |
| } |
| } |
| |
| /* |
| * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be |
| * transmitted. |
| */ |
| static boolean_t |
| ixgbe_tx_drain(ixgbe_t *ixgbe) |
| { |
| ixgbe_tx_ring_t *tx_ring; |
| boolean_t done; |
| int i, j; |
| |
| /* |
| * Wait for a specific time to allow pending tx packets |
| * to be transmitted. |
| * |
| * Check the counter tbd_free to see if transmission is done. |
| * No lock protection is needed here. |
| * |
| * Return B_TRUE if all pending packets have been transmitted; |
| * Otherwise return B_FALSE; |
| */ |
| for (i = 0; i < TX_DRAIN_TIME; i++) { |
| |
| done = B_TRUE; |
| for (j = 0; j < ixgbe->num_tx_rings; j++) { |
| tx_ring = &ixgbe->tx_rings[j]; |
| done = done && |
| (tx_ring->tbd_free == tx_ring->ring_size); |
| } |
| |
| if (done) |
| break; |
| |
| msec_delay(1); |
| } |
| |
| return (done); |
| } |
| |
| /* |
| * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. |
| */ |
| static boolean_t |
| ixgbe_rx_drain(ixgbe_t *ixgbe) |
| { |
| boolean_t done = B_TRUE; |
| int i; |
| |
| /* |
| * Polling the rx free list to check if those rx buffers held by |
| * the upper layer are released. |
| * |
| * Check the counter rcb_free to see if all pending buffers are |
| * released. No lock protection is needed here. |
| * |
| * Return B_TRUE if all pending buffers have been released; |
| * Otherwise return B_FALSE; |
| */ |
| for (i = 0; i < RX_DRAIN_TIME; i++) { |
| done = (ixgbe->rcb_pending == 0); |
| |
| if (done) |
| break; |
| |
| msec_delay(1); |
| } |
| |
| return (done); |
| } |
| |
| /* |
| * ixgbe_start - Start the driver/chipset. |
| */ |
| int |
| ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| int i; |
| |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| if (alloc_buffer) { |
| if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, |
| "Failed to allocate software receive rings"); |
| return (IXGBE_FAILURE); |
| } |
| |
| /* Allocate buffers for all the rx/tx rings */ |
| if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "Failed to allocate DMA resource"); |
| return (IXGBE_FAILURE); |
| } |
| |
| ixgbe->tx_ring_init = B_TRUE; |
| } else { |
| ixgbe->tx_ring_init = B_FALSE; |
| } |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) |
| mutex_enter(&ixgbe->rx_rings[i].rx_lock); |
| for (i = 0; i < ixgbe->num_tx_rings; i++) |
| mutex_enter(&ixgbe->tx_rings[i].tx_lock); |
| |
| /* |
| * Start the chipset hardware |
| */ |
| if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); |
| goto start_failure; |
| } |
| |
| /* |
| * Configure link now for X550 |
| * |
| * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the |
| * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, |
| * the resting state of the link would be the maximum speed that |
| * autonegotiation will allow (usually 10Gb, infrastructure allowing) |
| * so we never bothered with explicitly setting the link to 10Gb as it |
| * would already be at that state on driver attach. With X550, we must |
| * trigger a re-negotiation of the link in order to switch from a LPLU |
| * 1Gb link to 10Gb (cable and link partner permitting.) |
| */ |
| if (hw->mac.type == ixgbe_mac_X550 || |
| hw->mac.type == ixgbe_mac_X550EM_x) { |
| (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); |
| ixgbe_get_hw_state(ixgbe); |
| } |
| |
| if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { |
| goto start_failure; |
| } |
| |
| /* |
| * Setup the rx/tx rings |
| */ |
| ixgbe_setup_rings(ixgbe); |
| |
| /* |
| * ixgbe_start() will be called when resetting, however if reset |
| * happens, we need to clear the ERROR, STALL and OVERTEMP flags |
| * before enabling the interrupts. |
| */ |
| atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR |
| | IXGBE_STALL| IXGBE_OVERTEMP)); |
| |
| /* |
| * Enable adapter interrupts |
| * The interrupts must be enabled after the driver state is START |
| */ |
| ixgbe_enable_adapter_interrupts(ixgbe); |
| |
| for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->tx_rings[i].tx_lock); |
| for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->rx_rings[i].rx_lock); |
| |
| return (IXGBE_SUCCESS); |
| |
| start_failure: |
| for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->tx_rings[i].tx_lock); |
| for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->rx_rings[i].rx_lock); |
| |
| ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); |
| |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * ixgbe_stop - Stop the driver/chipset. |
| */ |
| void |
| ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) |
| { |
| int i; |
| |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| /* |
| * Disable the adapter interrupts |
| */ |
| ixgbe_disable_adapter_interrupts(ixgbe); |
| |
| /* |
| * Drain the pending tx packets |
| */ |
| (void) ixgbe_tx_drain(ixgbe); |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) |
| mutex_enter(&ixgbe->rx_rings[i].rx_lock); |
| for (i = 0; i < ixgbe->num_tx_rings; i++) |
| mutex_enter(&ixgbe->tx_rings[i].tx_lock); |
| |
| /* |
| * Stop the chipset hardware |
| */ |
| ixgbe_chip_stop(ixgbe); |
| |
| if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { |
| ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); |
| } |
| |
| /* |
| * Clean the pending tx data/resources |
| */ |
| ixgbe_tx_clean(ixgbe); |
| |
| for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->tx_rings[i].tx_lock); |
| for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) |
| mutex_exit(&ixgbe->rx_rings[i].rx_lock); |
| |
| if (ixgbe->link_state == LINK_STATE_UP) { |
| ixgbe->link_state = LINK_STATE_UNKNOWN; |
| mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); |
| } |
| |
| if (free_buffer) { |
| /* |
| * Release the DMA/memory resources of rx/tx rings |
| */ |
| ixgbe_free_dma(ixgbe); |
| ixgbe_free_rx_data(ixgbe); |
| } |
| } |
| |
| /* |
| * ixgbe_cbfunc - Driver interface for generic DDI callbacks |
| */ |
| /* ARGSUSED */ |
| static int |
| ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, |
| void *arg1, void *arg2) |
| { |
| ixgbe_t *ixgbe = (ixgbe_t *)arg1; |
| |
| switch (cbaction) { |
| /* IRM callback */ |
| int count; |
| case DDI_CB_INTR_ADD: |
| case DDI_CB_INTR_REMOVE: |
| count = (int)(uintptr_t)cbarg; |
| ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); |
| DTRACE_PROBE2(ixgbe__irm__callback, int, count, |
| int, ixgbe->intr_cnt); |
| if (ixgbe_intr_adjust(ixgbe, cbaction, count) != |
| DDI_SUCCESS) { |
| ixgbe_error(ixgbe, |
| "IRM CB: Failed to adjust interrupts"); |
| goto cb_fail; |
| } |
| break; |
| default: |
| IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", |
| cbaction); |
| return (DDI_ENOTSUP); |
| } |
| return (DDI_SUCCESS); |
| cb_fail: |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. |
| */ |
| static int |
| ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) |
| { |
| int i, rc, actual; |
| |
| if (count == 0) |
| return (DDI_SUCCESS); |
| |
| if ((cbaction == DDI_CB_INTR_ADD && |
| ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || |
| (cbaction == DDI_CB_INTR_REMOVE && |
| ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) |
| return (DDI_FAILURE); |
| |
| if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { |
| return (DDI_FAILURE); |
| } |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) |
| mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); |
| for (i = 0; i < ixgbe->num_tx_rings; i++) |
| mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); |
| |
| mutex_enter(&ixgbe->gen_lock); |
| ixgbe->ixgbe_state &= ~IXGBE_STARTED; |
| ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; |
| ixgbe->ixgbe_state |= IXGBE_SUSPENDED; |
| mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); |
| |
| ixgbe_stop(ixgbe, B_FALSE); |
| /* |
| * Disable interrupts |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { |
| rc = ixgbe_disable_intrs(ixgbe); |
| ASSERT(rc == IXGBE_SUCCESS); |
| } |
| ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; |
| |
| /* |
| * Remove interrupt handlers |
| */ |
| if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { |
| ixgbe_rem_intr_handlers(ixgbe); |
| } |
| ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; |
| |
| /* |
| * Clear vect_map |
| */ |
| bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); |
| switch (cbaction) { |
| case DDI_CB_INTR_ADD: |
| rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, |
| DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, |
| DDI_INTR_ALLOC_NORMAL); |
| if (rc != DDI_SUCCESS || actual != count) { |
| ixgbe_log(ixgbe, "Adjust interrupts failed." |
| "return: %d, irm cb size: %d, actual: %d", |
| rc, count, actual); |
| goto intr_adjust_fail; |
| } |
| ixgbe->intr_cnt += count; |
| break; |
| |
| case DDI_CB_INTR_REMOVE: |
| for (i = ixgbe->intr_cnt - count; |
| i < ixgbe->intr_cnt; i ++) { |
| rc = ddi_intr_free(ixgbe->htable[i]); |
| ixgbe->htable[i] = NULL; |
| if (rc != DDI_SUCCESS) { |
| ixgbe_log(ixgbe, "Adjust interrupts failed." |
| "return: %d, irm cb size: %d, actual: %d", |
| rc, count, actual); |
| goto intr_adjust_fail; |
| } |
| } |
| ixgbe->intr_cnt -= count; |
| break; |
| } |
| |
| /* |
| * Get priority for first vector, assume remaining are all the same |
| */ |
| rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); |
| if (rc != DDI_SUCCESS) { |
| ixgbe_log(ixgbe, |
| "Get interrupt priority failed: %d", rc); |
| goto intr_adjust_fail; |
| } |
| rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); |
| if (rc != DDI_SUCCESS) { |
| ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); |
| goto intr_adjust_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; |
| |
| /* |
| * Map rings to interrupt vectors |
| */ |
| if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, |
| "IRM CB: Failed to map interrupts to vectors"); |
| goto intr_adjust_fail; |
| } |
| |
| /* |
| * Add interrupt handlers |
| */ |
| if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); |
| goto intr_adjust_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; |
| |
| /* |
| * Now that mutex locks are initialized, and the chip is also |
| * initialized, enable interrupts. |
| */ |
| if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); |
| goto intr_adjust_fail; |
| } |
| ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; |
| if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { |
| ixgbe_error(ixgbe, "IRM CB: Failed to start"); |
| goto intr_adjust_fail; |
| } |
| ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; |
| ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; |
| ixgbe->ixgbe_state |= IXGBE_STARTED; |
| mutex_exit(&ixgbe->gen_lock); |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, |
| ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); |
| } |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, |
| ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); |
| } |
| |
| /* Wakeup all Tx rings */ |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| mac_tx_ring_update(ixgbe->mac_hdl, |
| ixgbe->tx_rings[i].ring_handle); |
| } |
| |
| IXGBE_DEBUGLOG_3(ixgbe, |
| "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", |
| ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); |
| return (DDI_SUCCESS); |
| |
| intr_adjust_fail: |
| ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); |
| mutex_exit(&ixgbe->gen_lock); |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * ixgbe_intr_cb_register - Register interrupt callback function. |
| */ |
| static int |
| ixgbe_intr_cb_register(ixgbe_t *ixgbe) |
| { |
| if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, |
| ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { |
| return (IXGBE_FAILURE); |
| } |
| IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. |
| */ |
| static int |
| ixgbe_alloc_rings(ixgbe_t *ixgbe) |
| { |
| /* |
| * Allocate memory space for rx rings |
| */ |
| ixgbe->rx_rings = kmem_zalloc( |
| sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, |
| KM_NOSLEEP); |
| |
| if (ixgbe->rx_rings == NULL) { |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Allocate memory space for tx rings |
| */ |
| ixgbe->tx_rings = kmem_zalloc( |
| sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, |
| KM_NOSLEEP); |
| |
| if (ixgbe->tx_rings == NULL) { |
| kmem_free(ixgbe->rx_rings, |
| sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); |
| ixgbe->rx_rings = NULL; |
| return (IXGBE_FAILURE); |
| } |
| |
| /* |
| * Allocate memory space for rx ring groups |
| */ |
| ixgbe->rx_groups = kmem_zalloc( |
| sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, |
| KM_NOSLEEP); |
| |
| if (ixgbe->rx_groups == NULL) { |
| kmem_free(ixgbe->rx_rings, |
| sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); |
| kmem_free(ixgbe->tx_rings, |
| sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); |
| ixgbe->rx_rings = NULL; |
| ixgbe->tx_rings = NULL; |
| return (IXGBE_FAILURE); |
| } |
| |
| return (IXGBE_SUCCESS); |
| } |
| |
| /* |
| * ixgbe_free_rings - Free the memory space of rx/tx rings. |
| */ |
| static void |
| ixgbe_free_rings(ixgbe_t *ixgbe) |
| { |
| if (ixgbe->rx_rings != NULL) { |
| kmem_free(ixgbe->rx_rings, |
| sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); |
| ixgbe->rx_rings = NULL; |
| } |
| |
| if (ixgbe->tx_rings != NULL) { |
| kmem_free(ixgbe->tx_rings, |
| sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); |
| ixgbe->tx_rings = NULL; |
| } |
| |
| if (ixgbe->rx_groups != NULL) { |
| kmem_free(ixgbe->rx_groups, |
| sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); |
| ixgbe->rx_groups = NULL; |
| } |
| } |
| |
| static int |
| ixgbe_alloc_rx_data(ixgbe_t *ixgbe) |
| { |
| ixgbe_rx_ring_t *rx_ring; |
| int i; |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) |
| goto alloc_rx_rings_failure; |
| } |
| return (IXGBE_SUCCESS); |
| |
| alloc_rx_rings_failure: |
| ixgbe_free_rx_data(ixgbe); |
| return (IXGBE_FAILURE); |
| } |
| |
| static void |
| ixgbe_free_rx_data(ixgbe_t *ixgbe) |
| { |
| ixgbe_rx_ring_t *rx_ring; |
| ixgbe_rx_data_t *rx_data; |
| int i; |
| |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| |
| mutex_enter(&ixgbe->rx_pending_lock); |
| rx_data = rx_ring->rx_data; |
| |
| if (rx_data != NULL) { |
| rx_data->flag |= IXGBE_RX_STOPPED; |
| |
| if (rx_data->rcb_pending == 0) { |
| ixgbe_free_rx_ring_data(rx_data); |
| rx_ring->rx_data = NULL; |
| } |
| } |
| |
| mutex_exit(&ixgbe->rx_pending_lock); |
| } |
| } |
| |
| /* |
| * ixgbe_setup_rings - Setup rx/tx rings. |
| */ |
| static void |
| ixgbe_setup_rings(ixgbe_t *ixgbe) |
| { |
| /* |
| * Setup the rx/tx rings, including the following: |
| * |
| * 1. Setup the descriptor ring and the control block buffers; |
| * 2. Initialize necessary registers for receive/transmit; |
| * 3. Initialize software pointers/parameters for receive/transmit; |
| */ |
| ixgbe_setup_rx(ixgbe); |
| |
| ixgbe_setup_tx(ixgbe); |
| } |
| |
| static void |
| ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) |
| { |
| ixgbe_t *ixgbe = rx_ring->ixgbe; |
| ixgbe_rx_data_t *rx_data = rx_ring->rx_data; |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| rx_control_block_t *rcb; |
| union ixgbe_adv_rx_desc *rbd; |
| uint32_t size; |
| uint32_t buf_low; |
| uint32_t buf_high; |
| uint32_t reg_val; |
| int i; |
| |
| ASSERT(mutex_owned(&rx_ring->rx_lock)); |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| for (i = 0; i < ixgbe->rx_ring_size; i++) { |
| rcb = rx_data->work_list[i]; |
| rbd = &rx_data->rbd_ring[i]; |
| |
| rbd->read.pkt_addr = rcb->rx_buf.dma_address; |
| rbd->read.hdr_addr = NULL; |
| } |
| |
| /* |
| * Initialize the length register |
| */ |
| size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); |
| IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); |
| |
| /* |
| * Initialize the base address registers |
| */ |
| buf_low = (uint32_t)rx_data->rbd_area.dma_address; |
| buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); |
| IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); |
| IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); |
| |
| /* |
| * Setup head & tail pointers |
| */ |
| IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), |
| rx_data->ring_size - 1); |
| IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); |
| |
| rx_data->rbd_next = 0; |
| rx_data->lro_first = 0; |
| |
| /* |
| * Setup the Receive Descriptor Control Register (RXDCTL) |
| * PTHRESH=32 descriptors (half the internal cache) |
| * HTHRESH=0 descriptors (to minimize latency on fetch) |
| * WTHRESH defaults to 1 (writeback each descriptor) |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); |
| reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ |
| |
| /* Not a valid value for 82599, X540 or X550 */ |
| if (hw->mac.type == ixgbe_mac_82598EB) { |
| reg_val |= 0x0020; /* pthresh */ |
| } |
| IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); |
| |
| if (hw->mac.type == ixgbe_mac_82599EB || |
| hw->mac.type == ixgbe_mac_X540 || |
| hw->mac.type == ixgbe_mac_X550 || |
| hw->mac.type == ixgbe_mac_X550EM_x) { |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
| reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); |
| IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); |
| } |
| |
| /* |
| * Setup the Split and Replication Receive Control Register. |
| * Set the rx buffer size and the advanced descriptor type. |
| */ |
| reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | |
| IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
| reg_val |= IXGBE_SRRCTL_DROP_EN; |
| IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); |
| } |
| |
| static void |
| ixgbe_setup_rx(ixgbe_t *ixgbe) |
| { |
| ixgbe_rx_ring_t *rx_ring; |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| uint32_t reg_val; |
| uint32_t ring_mapping; |
| uint32_t i, index; |
| uint32_t psrtype_rss_bit; |
| |
| /* |
| * Ensure that Rx is disabled while setting up |
| * the Rx unit and Rx descriptor ring(s) |
| */ |
| ixgbe_disable_rx(hw); |
| |
| /* PSRTYPE must be configured for 82599 */ |
| if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && |
| ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { |
| reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | |
| IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; |
| reg_val |= IXGBE_PSRTYPE_L2HDR; |
| reg_val |= 0x80000000; |
| IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); |
| } else { |
| if (ixgbe->num_rx_groups > 32) { |
| psrtype_rss_bit = 0x20000000; |
| } else { |
| psrtype_rss_bit = 0x40000000; |
| } |
| for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { |
| reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | |
| IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; |
| reg_val |= IXGBE_PSRTYPE_L2HDR; |
| reg_val |= psrtype_rss_bit; |
| IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); |
| } |
| } |
| |
| /* |
| * Set filter control in FCTRL to determine types of packets are passed |
| * up to the driver. |
| * - Pass broadcast packets. |
| * - Do not pass flow control pause frames (82598-specific) |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
| reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ |
| if (hw->mac.type == ixgbe_mac_82598EB) { |
| reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ |
| } |
| IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); |
| |
| /* |
| * Hardware checksum settings |
| */ |
| if (ixgbe->rx_hcksum_enable) { |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
| reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ |
| IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); |
| } |
| |
| /* |
| * Setup VMDq and RSS for multiple receive queues |
| */ |
| switch (ixgbe->classify_mode) { |
| case IXGBE_CLASSIFY_RSS: |
| /* |
| * One group, only RSS is needed when more than |
| * one ring enabled. |
| */ |
| ixgbe_setup_rss(ixgbe); |
| break; |
| |
| case IXGBE_CLASSIFY_VMDQ: |
| /* |
| * Multiple groups, each group has one ring, |
| * only VMDq is needed. |
| */ |
| ixgbe_setup_vmdq(ixgbe); |
| break; |
| |
| case IXGBE_CLASSIFY_VMDQ_RSS: |
| /* |
| * Multiple groups and multiple rings, both |
| * VMDq and RSS are needed. |
| */ |
| ixgbe_setup_vmdq_rss(ixgbe); |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* |
| * Enable the receive unit. This must be done after filter |
| * control is set in FCTRL. On 82598, we disable the descriptor monitor. |
| * 82598 is the only adapter which defines this RXCTRL option. |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
| if (hw->mac.type == ixgbe_mac_82598EB) |
| reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ |
| reg_val |= IXGBE_RXCTRL_RXEN; |
| (void) ixgbe_enable_rx_dma(hw, reg_val); |
| |
| /* |
| * ixgbe_setup_rx_ring must be called after configuring RXCTRL |
| */ |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| rx_ring = &ixgbe->rx_rings[i]; |
| ixgbe_setup_rx_ring(rx_ring); |
| } |
| |
| /* |
| * Setup the per-ring statistics mapping. |
| */ |
| ring_mapping = 0; |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| index = ixgbe->rx_rings[i].hw_index; |
| ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); |
| ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); |
| IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); |
| } |
| |
| /* |
| * The Max Frame Size in MHADD/MAXFRS will be internally increased |
| * by four bytes if the packet has a VLAN field, so includes MTU, |
| * ethernet header and frame check sequence. |
| * Register is MAXFRS in 82599. |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); |
| reg_val &= ~IXGBE_MHADD_MFS_MASK; |
| reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) |
| + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; |
| IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); |
| |
| /* |
| * Setup Jumbo Frame enable bit |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
| if (ixgbe->default_mtu > ETHERMTU) |
| reg_val |= IXGBE_HLREG0_JUMBOEN; |
| else |
| reg_val &= ~IXGBE_HLREG0_JUMBOEN; |
| IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); |
| |
| /* |
| * Setup RSC for multiple receive queues. |
| */ |
| if (ixgbe->lro_enable) { |
| for (i = 0; i < ixgbe->num_rx_rings; i++) { |
| /* |
| * Make sure rx_buf_size * MAXDESC not greater |
| * than 65535. |
| * Intel recommends 4 for MAXDESC field value. |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); |
| reg_val |= IXGBE_RSCCTL_RSCEN; |
| if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) |
| reg_val |= IXGBE_RSCCTL_MAXDESC_1; |
| else |
| reg_val |= IXGBE_RSCCTL_MAXDESC_4; |
| IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); |
| } |
| |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); |
| reg_val |= IXGBE_RSCDBU_RSCACKDIS; |
| IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); |
| |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
| reg_val |= IXGBE_RDRXCTL_RSCACKC; |
| reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; |
| reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; |
| |
| IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); |
| } |
| } |
| |
| static void |
| ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) |
| { |
| ixgbe_t *ixgbe = tx_ring->ixgbe; |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| uint32_t size; |
| uint32_t buf_low; |
| uint32_t buf_high; |
| uint32_t reg_val; |
| |
| ASSERT(mutex_owned(&tx_ring->tx_lock)); |
| ASSERT(mutex_owned(&ixgbe->gen_lock)); |
| |
| /* |
| * Initialize the length register |
| */ |
| size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); |
| IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); |
| |
| /* |
| * Initialize the base address registers |
| */ |
| buf_low = (uint32_t)tx_ring->tbd_area.dma_address; |
| buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); |
| IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); |
| IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); |
| |
| /* |
| * Setup head & tail pointers |
| */ |
| IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); |
| |
| /* |
| * Setup head write-back |
| */ |
| if (ixgbe->tx_head_wb_enable) { |
| /* |
| * The memory of the head write-back is allocated using |
| * the extra tbd beyond the tail of the tbd ring. |
| */ |
| tx_ring->tbd_head_wb = (uint32_t *) |
| ((uintptr_t)tx_ring->tbd_area.address + size); |
| *tx_ring->tbd_head_wb = 0; |
| |
| buf_low = (uint32_t) |
| (tx_ring->tbd_area.dma_address + size); |
| buf_high = (uint32_t) |
| ((tx_ring->tbd_area.dma_address + size) >> 32); |
| |
| /* Set the head write-back enable bit */ |
| buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; |
| |
| IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); |
| IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); |
| |
| /* |
| * Turn off relaxed ordering for head write back or it will |
| * cause problems with the tx recycling |
| */ |
| |
| reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? |
| IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : |
| IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); |
| reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; |
| if (hw->mac.type == ixgbe_mac_82598EB) { |
| IXGBE_WRITE_REG(hw, |
| IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); |
| } else { |
| IXGBE_WRITE_REG(hw, |
| IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); |
| } |
| } else { |
| tx_ring->tbd_head_wb = NULL; |
| } |
| |
| tx_ring->tbd_head = 0; |
| tx_ring->tbd_tail = 0; |
| tx_ring->tbd_free = tx_ring->ring_size; |
| |
| if (ixgbe->tx_ring_init == B_TRUE) { |
| tx_ring->tcb_head = 0; |
| tx_ring->tcb_tail = 0; |
| tx_ring->tcb_free = tx_ring->free_list_size; |
| } |
| |
| /* |
| * Initialize the s/w context structure |
| */ |
| bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); |
| } |
| |
| static void |
| ixgbe_setup_tx(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| ixgbe_tx_ring_t *tx_ring; |
| uint32_t reg_val; |
| uint32_t ring_mapping; |
| int i; |
| |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| ixgbe_setup_tx_ring(tx_ring); |
| } |
| |
| /* |
| * Setup the per-ring statistics mapping. |
| */ |
| ring_mapping = 0; |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); |
| if ((i & 0x3) == 0x3) { |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), |
| ring_mapping); |
| break; |
| |
| case ixgbe_mac_82599EB: |
| case ixgbe_mac_X540: |
| case ixgbe_mac_X550: |
| case ixgbe_mac_X550EM_x: |
| IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), |
| ring_mapping); |
| break; |
| |
| default: |
| break; |
| } |
| |
| ring_mapping = 0; |
| } |
| } |
| if (i & 0x3) { |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); |
| break; |
| |
| case ixgbe_mac_82599EB: |
| case ixgbe_mac_X540: |
| case ixgbe_mac_X550: |
| case ixgbe_mac_X550EM_x: |
| IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); |
| break; |
| |
| default: |
| break; |
| } |
| } |
| |
| /* |
| * Enable CRC appending and TX padding (for short tx frames) |
| */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
| reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; |
| IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); |
| |
| /* |
| * enable DMA for 82599, X540 and X550 parts |
| */ |
| if (hw->mac.type == ixgbe_mac_82599EB || |
| hw->mac.type == ixgbe_mac_X540 || |
| hw->mac.type == ixgbe_mac_X550 || |
| hw->mac.type == ixgbe_mac_X550EM_x) { |
| /* DMATXCTL.TE must be set after all Tx config is complete */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); |
| reg_val |= IXGBE_DMATXCTL_TE; |
| IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); |
| |
| /* Disable arbiter to set MTQC */ |
| reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
| reg_val |= IXGBE_RTTDCS_ARBDIS; |
| IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); |
| IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); |
| reg_val &= ~IXGBE_RTTDCS_ARBDIS; |
| IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); |
| } |
| |
| /* |
| * Enabling tx queues .. |
| * For 82599 must be done after DMATXCTL.TE is set |
| */ |
| for (i = 0; i < ixgbe->num_tx_rings; i++) { |
| tx_ring = &ixgbe->tx_rings[i]; |
| reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); |
| reg_val |= IXGBE_TXDCTL_ENABLE; |
| IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); |
| } |
| } |
| |
| /* |
| * ixgbe_setup_rss - Setup receive-side scaling feature. |
| */ |
| static void |
| ixgbe_setup_rss(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| uint32_t mrqc; |
| |
| /* |
| * Initialize RETA/ERETA table |
| */ |
| ixgbe_setup_rss_table(ixgbe); |
| |
| /* |
| * Enable RSS & perform hash on these packet types |
| */ |
| mrqc = IXGBE_MRQC_RSSEN | |
| IXGBE_MRQC_RSS_FIELD_IPV4 | |
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX | |
| IXGBE_MRQC_RSS_FIELD_IPV6 | |
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_UDP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; |
| IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
| } |
| |
| /* |
| * ixgbe_setup_vmdq - Setup MAC classification feature |
| */ |
| static void |
| ixgbe_setup_vmdq(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| uint32_t vmdctl, i, vtctl; |
| |
| /* |
| * Setup the VMDq Control register, enable VMDq based on |
| * packet destination MAC address: |
| */ |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| /* |
| * VMDq Enable = 1; |
| * VMDq Filter = 0; MAC filtering |
| * Default VMDq output index = 0; |
| */ |
| vmdctl = IXGBE_VMD_CTL_VMDQ_EN; |
| IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); |
| break; |
| |
| case ixgbe_mac_82599EB: |
| case ixgbe_mac_X540: |
| case ixgbe_mac_X550: |
| case ixgbe_mac_X550EM_x: |
| /* |
| * Enable VMDq-only. |
| */ |
| vmdctl = IXGBE_MRQC_VMDQEN; |
| IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); |
| |
| for (i = 0; i < hw->mac.num_rar_entries; i++) { |
| IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); |
| } |
| |
| /* |
| * Enable Virtualization and Replication. |
| */ |
| vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; |
| IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); |
| |
| /* |
| * Enable receiving packets to all VFs |
| */ |
| IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); |
| IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); |
| break; |
| |
| default: |
| break; |
| } |
| } |
| |
| /* |
| * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. |
| */ |
| static void |
| ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) |
| { |
| struct ixgbe_hw *hw = &ixgbe->hw; |
| uint32_t i, mrqc; |
| uint32_t vtctl, vmdctl; |
| |
| /* |
| * Initialize RETA/ERETA table |
| */ |
| ixgbe_setup_rss_table(ixgbe); |
| |
| /* |
| * Enable and setup RSS and VMDq |
| */ |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| /* |
| * Enable RSS & Setup RSS Hash functions |
| */ |
| mrqc = IXGBE_MRQC_RSSEN | |
| IXGBE_MRQC_RSS_FIELD_IPV4 | |
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX | |
| IXGBE_MRQC_RSS_FIELD_IPV6 | |
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_UDP | |
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; |
| IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
| |
| <