| /* |
| * sfe_util.c: general ethernet mac driver framework version 2.6 |
| * |
| * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * 1. Redistributions of source code must retain the above copyright notice, |
| * this list of conditions and the following disclaimer. |
| * |
| * 2. Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * 3. Neither the name of the author nor the names of its contributors may be |
| * used to endorse or promote products derived from this software without |
| * specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| * DAMAGE. |
| */ |
| |
| /* |
| * Copyright 2010 Sun Microsystems, Inc. All rights reserved. |
| * Use is subject to license terms. |
| */ |
| |
| /* |
| * System Header files. |
| */ |
| #include <sys/types.h> |
| #include <sys/conf.h> |
| #include <sys/debug.h> |
| #include <sys/kmem.h> |
| #include <sys/vtrace.h> |
| #include <sys/ethernet.h> |
| #include <sys/modctl.h> |
| #include <sys/errno.h> |
| #include <sys/ddi.h> |
| #include <sys/sunddi.h> |
| #include <sys/stream.h> /* required for MBLK* */ |
| #include <sys/strsun.h> /* required for mionack() */ |
| #include <sys/byteorder.h> |
| #include <sys/sysmacros.h> |
| #include <sys/pci.h> |
| #include <inet/common.h> |
| #include <inet/led.h> |
| #include <inet/mi.h> |
| #include <inet/nd.h> |
| #include <sys/crc32.h> |
| |
| #include <sys/note.h> |
| |
| #include "sfe_mii.h" |
| #include "sfe_util.h" |
| |
| |
| |
| extern char ident[]; |
| |
| /* Debugging support */ |
| #ifdef GEM_DEBUG_LEVEL |
| static int gem_debug = GEM_DEBUG_LEVEL; |
| #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args |
| #else |
| #define DPRINTF(n, args) |
| #undef ASSERT |
| #define ASSERT(x) |
| #endif |
| |
| #define IOC_LINESIZE 0x40 /* Is it right for amd64? */ |
| |
| /* |
| * Useful macros and typedefs |
| */ |
| #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1)) |
| |
| #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1]) |
| #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2) |
| |
| #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9]) |
| #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6]) |
| |
| |
| #ifndef INT32_MAX |
| #define INT32_MAX 0x7fffffff |
| #endif |
| |
| #define VTAG_OFF (ETHERADDRL*2) |
| #ifndef VTAG_SIZE |
| #define VTAG_SIZE 4 |
| #endif |
| #ifndef VTAG_TPID |
| #define VTAG_TPID 0x8100U |
| #endif |
| |
| #define GET_TXBUF(dp, sn) \ |
| &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)] |
| |
| #define TXFLAG_VTAG(flag) \ |
| (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT) |
| |
| #define MAXPKTBUF(dp) \ |
| ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL) |
| |
| #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */ |
| #define BOOLEAN(x) ((x) != 0) |
| |
| /* |
| * Macros to distinct chip generation. |
| */ |
| |
| /* |
| * Private functions |
| */ |
| static void gem_mii_start(struct gem_dev *); |
| static void gem_mii_stop(struct gem_dev *); |
| |
| /* local buffer management */ |
| static void gem_nd_setup(struct gem_dev *dp); |
| static void gem_nd_cleanup(struct gem_dev *dp); |
| static int gem_alloc_memory(struct gem_dev *); |
| static void gem_free_memory(struct gem_dev *); |
| static void gem_init_rx_ring(struct gem_dev *); |
| static void gem_init_tx_ring(struct gem_dev *); |
| __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *); |
| |
| static void gem_tx_timeout(struct gem_dev *); |
| static void gem_mii_link_watcher(struct gem_dev *dp); |
| static int gem_mac_init(struct gem_dev *dp); |
| static int gem_mac_start(struct gem_dev *dp); |
| static int gem_mac_stop(struct gem_dev *dp, uint_t flags); |
| static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp); |
| |
| static struct ether_addr gem_etherbroadcastaddr = { |
| 0xff, 0xff, 0xff, 0xff, 0xff, 0xff |
| }; |
| |
| int gem_speed_value[] = {10, 100, 1000}; |
| |
| /* ============================================================== */ |
| /* |
| * Misc runtime routines |
| */ |
| /* ============================================================== */ |
| /* |
| * Ether CRC calculation according to 21143 data sheet |
| */ |
| uint32_t |
| gem_ether_crc_le(const uint8_t *addr, int len) |
| { |
| uint32_t crc; |
| |
| CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table); |
| return (crc); |
| } |
| |
| uint32_t |
| gem_ether_crc_be(const uint8_t *addr, int len) |
| { |
| int idx; |
| int bit; |
| uint_t data; |
| uint32_t crc; |
| #define CRC32_POLY_BE 0x04c11db7 |
| |
| crc = 0xffffffff; |
| for (idx = 0; idx < len; idx++) { |
| for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { |
| crc = (crc << 1) |
| ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0); |
| } |
| } |
| return (crc); |
| #undef CRC32_POLY_BE |
| } |
| |
| int |
| gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val) |
| { |
| char propname[32]; |
| |
| (void) sprintf(propname, prop_template, dp->name); |
| |
| return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip, |
| DDI_PROP_DONTPASS, propname, def_val)); |
| } |
| |
| static int |
| gem_population(uint32_t x) |
| { |
| int i; |
| int cnt; |
| |
| cnt = 0; |
| for (i = 0; i < 32; i++) { |
| if (x & (1 << i)) { |
| cnt++; |
| } |
| } |
| return (cnt); |
| } |
| |
| #ifdef GEM_DEBUG_LEVEL |
| #ifdef GEM_DEBUG_VLAN |
| static void |
| gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp, |
| boolean_t check_cksum) |
| { |
| char msg[180]; |
| uint8_t buf[18+20+20]; |
| uint8_t *p; |
| size_t offset; |
| uint_t ethertype; |
| uint_t proto; |
| uint_t ipproto = 0; |
| uint_t iplen; |
| uint_t iphlen; |
| uint_t tcplen; |
| uint_t udplen; |
| uint_t cksum; |
| int rest; |
| int len; |
| char *bp; |
| mblk_t *tp; |
| extern uint_t ip_cksum(mblk_t *, int, uint32_t); |
| |
| msg[0] = 0; |
| bp = msg; |
| |
| rest = sizeof (buf); |
| offset = 0; |
| for (tp = mp; tp; tp = tp->b_cont) { |
| len = tp->b_wptr - tp->b_rptr; |
| len = min(rest, len); |
| bcopy(tp->b_rptr, &buf[offset], len); |
| rest -= len; |
| offset += len; |
| if (rest == 0) { |
| break; |
| } |
| } |
| |
| offset = 0; |
| p = &buf[offset]; |
| |
| /* ethernet address */ |
| sprintf(bp, |
| "ether: %02x:%02x:%02x:%02x:%02x:%02x" |
| " -> %02x:%02x:%02x:%02x:%02x:%02x", |
| p[6], p[7], p[8], p[9], p[10], p[11], |
| p[0], p[1], p[2], p[3], p[4], p[5]); |
| bp = &msg[strlen(msg)]; |
| |
| /* vlag tag and etherrtype */ |
| ethertype = GET_ETHERTYPE(p); |
| if (ethertype == VTAG_TPID) { |
| sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14])); |
| bp = &msg[strlen(msg)]; |
| |
| offset += VTAG_SIZE; |
| p = &buf[offset]; |
| ethertype = GET_ETHERTYPE(p); |
| } |
| sprintf(bp, " type:%04x", ethertype); |
| bp = &msg[strlen(msg)]; |
| |
| /* ethernet packet length */ |
| sprintf(bp, " mblklen:%d", msgdsize(mp)); |
| bp = &msg[strlen(msg)]; |
| if (mp->b_cont) { |
| sprintf(bp, "("); |
| bp = &msg[strlen(msg)]; |
| for (tp = mp; tp; tp = tp->b_cont) { |
| if (tp == mp) { |
| sprintf(bp, "%d", tp->b_wptr - tp->b_rptr); |
| } else { |
| sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr); |
| } |
| bp = &msg[strlen(msg)]; |
| } |
| sprintf(bp, ")"); |
| bp = &msg[strlen(msg)]; |
| } |
| |
| if (ethertype != ETHERTYPE_IP) { |
| goto x; |
| } |
| |
| /* ip address */ |
| offset += sizeof (struct ether_header); |
| p = &buf[offset]; |
| ipproto = p[9]; |
| iplen = GET_NET16(&p[2]); |
| sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d", |
| p[12], p[13], p[14], p[15], |
| p[16], p[17], p[18], p[19], |
| ipproto, iplen); |
| bp = (void *)&msg[strlen(msg)]; |
| |
| iphlen = (p[0] & 0xf) * 4; |
| |
| /* cksum for psuedo header */ |
| cksum = *(uint16_t *)&p[12]; |
| cksum += *(uint16_t *)&p[14]; |
| cksum += *(uint16_t *)&p[16]; |
| cksum += *(uint16_t *)&p[18]; |
| cksum += BE_16(ipproto); |
| |
| /* tcp or udp protocol header */ |
| offset += iphlen; |
| p = &buf[offset]; |
| if (ipproto == IPPROTO_TCP) { |
| tcplen = iplen - iphlen; |
| sprintf(bp, ", tcp: len:%d cksum:%x", |
| tcplen, GET_NET16(&p[16])); |
| bp = (void *)&msg[strlen(msg)]; |
| |
| if (check_cksum) { |
| cksum += BE_16(tcplen); |
| cksum = (uint16_t)ip_cksum(mp, offset, cksum); |
| sprintf(bp, " (%s)", |
| (cksum == 0 || cksum == 0xffff) ? "ok" : "ng"); |
| bp = (void *)&msg[strlen(msg)]; |
| } |
| } else if (ipproto == IPPROTO_UDP) { |
| udplen = GET_NET16(&p[4]); |
| sprintf(bp, ", udp: len:%d cksum:%x", |
| udplen, GET_NET16(&p[6])); |
| bp = (void *)&msg[strlen(msg)]; |
| |
| if (GET_NET16(&p[6]) && check_cksum) { |
| cksum += *(uint16_t *)&p[4]; |
| cksum = (uint16_t)ip_cksum(mp, offset, cksum); |
| sprintf(bp, " (%s)", |
| (cksum == 0 || cksum == 0xffff) ? "ok" : "ng"); |
| bp = (void *)&msg[strlen(msg)]; |
| } |
| } |
| x: |
| cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg); |
| } |
| #endif /* GEM_DEBUG_VLAN */ |
| #endif /* GEM_DEBUG_LEVEL */ |
| |
| /* ============================================================== */ |
| /* |
| * IO cache flush |
| */ |
| /* ============================================================== */ |
| __INLINE__ void |
| gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) |
| { |
| int n; |
| int m; |
| int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift; |
| |
| /* sync active descriptors */ |
| if (rx_desc_unit_shift < 0 || nslot == 0) { |
| /* no rx descriptor ring */ |
| return; |
| } |
| |
| n = dp->gc.gc_rx_ring_size - head; |
| if ((m = nslot - n) > 0) { |
| (void) ddi_dma_sync(dp->desc_dma_handle, |
| (off_t)0, |
| (size_t)(m << rx_desc_unit_shift), |
| how); |
| nslot = n; |
| } |
| |
| (void) ddi_dma_sync(dp->desc_dma_handle, |
| (off_t)(head << rx_desc_unit_shift), |
| (size_t)(nslot << rx_desc_unit_shift), |
| how); |
| } |
| |
| __INLINE__ void |
| gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how) |
| { |
| int n; |
| int m; |
| int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift; |
| |
| /* sync active descriptors */ |
| if (tx_desc_unit_shift < 0 || nslot == 0) { |
| /* no tx descriptor ring */ |
| return; |
| } |
| |
| n = dp->gc.gc_tx_ring_size - head; |
| if ((m = nslot - n) > 0) { |
| (void) ddi_dma_sync(dp->desc_dma_handle, |
| (off_t)(dp->tx_ring_dma - dp->rx_ring_dma), |
| (size_t)(m << tx_desc_unit_shift), |
| how); |
| nslot = n; |
| } |
| |
| (void) ddi_dma_sync(dp->desc_dma_handle, |
| (off_t)((head << tx_desc_unit_shift) |
| + (dp->tx_ring_dma - dp->rx_ring_dma)), |
| (size_t)(nslot << tx_desc_unit_shift), |
| how); |
| } |
| |
| static void |
| gem_rx_start_default(struct gem_dev *dp, int head, int nslot) |
| { |
| gem_rx_desc_dma_sync(dp, |
| SLOT(head, dp->gc.gc_rx_ring_size), nslot, |
| DDI_DMA_SYNC_FORDEV); |
| } |
| |
| /* ============================================================== */ |
| /* |
| * Buffer management |
| */ |
| /* ============================================================== */ |
| static void |
| gem_dump_txbuf(struct gem_dev *dp, int level, const char *title) |
| { |
| cmn_err(level, |
| "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), " |
| "tx_softq: %d[%d] %d[%d] (+%d), " |
| "tx_free: %d[%d] %d[%d] (+%d), " |
| "tx_desc: %d[%d] %d[%d] (+%d), " |
| "intr: %d[%d] (+%d), ", |
| dp->name, title, |
| dp->tx_active_head, |
| SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size), |
| dp->tx_active_tail, |
| SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size), |
| dp->tx_active_tail - dp->tx_active_head, |
| dp->tx_softq_head, |
| SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size), |
| dp->tx_softq_tail, |
| SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size), |
| dp->tx_softq_tail - dp->tx_softq_head, |
| dp->tx_free_head, |
| SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size), |
| dp->tx_free_tail, |
| SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size), |
| dp->tx_free_tail - dp->tx_free_head, |
| dp->tx_desc_head, |
| SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size), |
| dp->tx_desc_tail, |
| SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size), |
| dp->tx_desc_tail - dp->tx_desc_head, |
| dp->tx_desc_intr, |
| SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size), |
| dp->tx_desc_intr - dp->tx_desc_head); |
| } |
| |
| static void |
| gem_free_rxbuf(struct rxbuf *rbp) |
| { |
| struct gem_dev *dp; |
| |
| dp = rbp->rxb_devp; |
| ASSERT(mutex_owned(&dp->intrlock)); |
| rbp->rxb_next = dp->rx_buf_freelist; |
| dp->rx_buf_freelist = rbp; |
| dp->rx_buf_freecnt++; |
| } |
| |
| /* |
| * gem_get_rxbuf: supply a receive buffer which have been mapped into |
| * DMA space. |
| */ |
| struct rxbuf * |
| gem_get_rxbuf(struct gem_dev *dp, int cansleep) |
| { |
| struct rxbuf *rbp; |
| uint_t count = 0; |
| int i; |
| int err; |
| |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d", |
| dp->rx_buf_freecnt)); |
| /* |
| * Get rx buffer management structure |
| */ |
| rbp = dp->rx_buf_freelist; |
| if (rbp) { |
| /* get one from the recycle list */ |
| ASSERT(dp->rx_buf_freecnt > 0); |
| |
| dp->rx_buf_freelist = rbp->rxb_next; |
| dp->rx_buf_freecnt--; |
| rbp->rxb_next = NULL; |
| return (rbp); |
| } |
| |
| /* |
| * Allocate a rx buffer management structure |
| */ |
| rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP); |
| if (rbp == NULL) { |
| /* no memory */ |
| return (NULL); |
| } |
| |
| /* |
| * Prepare a back pointer to the device structure which will be |
| * refered on freeing the buffer later. |
| */ |
| rbp->rxb_devp = dp; |
| |
| /* allocate a dma handle for rx data buffer */ |
| if ((err = ddi_dma_alloc_handle(dp->dip, |
| &dp->gc.gc_dma_attr_rxbuf, |
| (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT), |
| NULL, &rbp->rxb_dh)) != DDI_SUCCESS) { |
| |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d", |
| dp->name, __func__, err); |
| |
| kmem_free(rbp, sizeof (struct rxbuf)); |
| return (NULL); |
| } |
| |
| /* allocate a bounce buffer for rx */ |
| if ((err = ddi_dma_mem_alloc(rbp->rxb_dh, |
| ROUNDUP(dp->rx_buf_len, IOC_LINESIZE), |
| &dp->gc.gc_buf_attr, |
| /* |
| * if the nic requires a header at the top of receive buffers, |
| * it may access the rx buffer randomly. |
| */ |
| (dp->gc.gc_rx_header_len > 0) |
| ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING, |
| cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, |
| NULL, |
| &rbp->rxb_buf, &rbp->rxb_buf_len, |
| &rbp->rxb_bah)) != DDI_SUCCESS) { |
| |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_mem_alloc: failed, err=%d", |
| dp->name, __func__, err); |
| |
| ddi_dma_free_handle(&rbp->rxb_dh); |
| kmem_free(rbp, sizeof (struct rxbuf)); |
| return (NULL); |
| } |
| |
| /* Mapin the bounce buffer into the DMA space */ |
| if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh, |
| NULL, rbp->rxb_buf, dp->rx_buf_len, |
| ((dp->gc.gc_rx_header_len > 0) |
| ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT) |
| :(DDI_DMA_READ | DDI_DMA_STREAMING)), |
| cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, |
| NULL, |
| rbp->rxb_dmacookie, |
| &count)) != DDI_DMA_MAPPED) { |
| |
| ASSERT(err != DDI_DMA_INUSE); |
| DPRINTF(0, (CE_WARN, |
| "!%s: ddi_dma_addr_bind_handle: failed, err=%d", |
| dp->name, __func__, err)); |
| |
| /* |
| * we failed to allocate a dma resource |
| * for the rx bounce buffer. |
| */ |
| ddi_dma_mem_free(&rbp->rxb_bah); |
| ddi_dma_free_handle(&rbp->rxb_dh); |
| kmem_free(rbp, sizeof (struct rxbuf)); |
| return (NULL); |
| } |
| |
| /* correct the rest of the DMA mapping */ |
| for (i = 1; i < count; i++) { |
| ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]); |
| } |
| rbp->rxb_nfrags = count; |
| |
| /* Now we successfully prepared an rx buffer */ |
| dp->rx_buf_allocated++; |
| |
| return (rbp); |
| } |
| |
| /* ============================================================== */ |
| /* |
| * memory resource management |
| */ |
| /* ============================================================== */ |
| static int |
| gem_alloc_memory(struct gem_dev *dp) |
| { |
| caddr_t ring; |
| caddr_t buf; |
| size_t req_size; |
| size_t ring_len; |
| size_t buf_len; |
| ddi_dma_cookie_t ring_cookie; |
| ddi_dma_cookie_t buf_cookie; |
| uint_t count; |
| int i; |
| int err; |
| struct txbuf *tbp; |
| int tx_buf_len; |
| ddi_dma_attr_t dma_attr_txbounce; |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); |
| |
| dp->desc_dma_handle = NULL; |
| req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size; |
| |
| if (req_size > 0) { |
| /* |
| * Alloc RX/TX descriptors and a io area. |
| */ |
| if ((err = ddi_dma_alloc_handle(dp->dip, |
| &dp->gc.gc_dma_attr_desc, |
| DDI_DMA_SLEEP, NULL, |
| &dp->desc_dma_handle)) != DDI_SUCCESS) { |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_alloc_handle failed: %d", |
| dp->name, __func__, err); |
| return (ENOMEM); |
| } |
| |
| if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle, |
| req_size, &dp->gc.gc_desc_attr, |
| DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, |
| &ring, &ring_len, |
| &dp->desc_acc_handle)) != DDI_SUCCESS) { |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_mem_alloc failed: " |
| "ret %d, request size: %d", |
| dp->name, __func__, err, (int)req_size); |
| ddi_dma_free_handle(&dp->desc_dma_handle); |
| return (ENOMEM); |
| } |
| |
| if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle, |
| NULL, ring, ring_len, |
| DDI_DMA_RDWR | DDI_DMA_CONSISTENT, |
| DDI_DMA_SLEEP, NULL, |
| &ring_cookie, &count)) != DDI_SUCCESS) { |
| ASSERT(err != DDI_DMA_INUSE); |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_addr_bind_handle failed: %d", |
| dp->name, __func__, err); |
| ddi_dma_mem_free(&dp->desc_acc_handle); |
| ddi_dma_free_handle(&dp->desc_dma_handle); |
| return (ENOMEM); |
| } |
| ASSERT(count == 1); |
| |
| /* set base of rx descriptor ring */ |
| dp->rx_ring = ring; |
| dp->rx_ring_dma = ring_cookie.dmac_laddress; |
| |
| /* set base of tx descriptor ring */ |
| dp->tx_ring = dp->rx_ring + dp->rx_desc_size; |
| dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size; |
| |
| /* set base of io area */ |
| dp->io_area = dp->tx_ring + dp->tx_desc_size; |
| dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size; |
| } |
| |
| /* |
| * Prepare DMA resources for tx packets |
| */ |
| ASSERT(dp->gc.gc_tx_buf_size > 0); |
| |
| /* Special dma attribute for tx bounce buffers */ |
| dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf; |
| dma_attr_txbounce.dma_attr_sgllen = 1; |
| dma_attr_txbounce.dma_attr_align = |
| max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE); |
| |
| /* Size for tx bounce buffers must be max tx packet size. */ |
| tx_buf_len = MAXPKTBUF(dp); |
| tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE); |
| |
| ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL); |
| |
| for (i = 0, tbp = dp->tx_buf; |
| i < dp->gc.gc_tx_buf_size; i++, tbp++) { |
| |
| /* setup bounce buffers for tx packets */ |
| if ((err = ddi_dma_alloc_handle(dp->dip, |
| &dma_attr_txbounce, |
| DDI_DMA_SLEEP, NULL, |
| &tbp->txb_bdh)) != DDI_SUCCESS) { |
| |
| cmn_err(CE_WARN, |
| "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:" |
| " err=%d, i=%d", |
| dp->name, __func__, err, i); |
| goto err_alloc_dh; |
| } |
| |
| if ((err = ddi_dma_mem_alloc(tbp->txb_bdh, |
| tx_buf_len, |
| &dp->gc.gc_buf_attr, |
| DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, |
| &buf, &buf_len, |
| &tbp->txb_bah)) != DDI_SUCCESS) { |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed" |
| "ret %d, request size %d", |
| dp->name, __func__, err, tx_buf_len); |
| ddi_dma_free_handle(&tbp->txb_bdh); |
| goto err_alloc_dh; |
| } |
| |
| if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh, |
| NULL, buf, buf_len, |
| DDI_DMA_WRITE | DDI_DMA_STREAMING, |
| DDI_DMA_SLEEP, NULL, |
| &buf_cookie, &count)) != DDI_SUCCESS) { |
| ASSERT(err != DDI_DMA_INUSE); |
| cmn_err(CE_WARN, |
| "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d", |
| dp->name, __func__, err); |
| ddi_dma_mem_free(&tbp->txb_bah); |
| ddi_dma_free_handle(&tbp->txb_bdh); |
| goto err_alloc_dh; |
| } |
| ASSERT(count == 1); |
| tbp->txb_buf = buf; |
| tbp->txb_buf_dma = buf_cookie.dmac_laddress; |
| } |
| |
| return (0); |
| |
| err_alloc_dh: |
| if (dp->gc.gc_tx_buf_size > 0) { |
| while (i-- > 0) { |
| (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh); |
| ddi_dma_mem_free(&dp->tx_buf[i].txb_bah); |
| ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh); |
| } |
| } |
| |
| if (dp->desc_dma_handle) { |
| (void) ddi_dma_unbind_handle(dp->desc_dma_handle); |
| ddi_dma_mem_free(&dp->desc_acc_handle); |
| ddi_dma_free_handle(&dp->desc_dma_handle); |
| dp->desc_dma_handle = NULL; |
| } |
| |
| return (ENOMEM); |
| } |
| |
| static void |
| gem_free_memory(struct gem_dev *dp) |
| { |
| int i; |
| struct rxbuf *rbp; |
| struct txbuf *tbp; |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); |
| |
| /* Free TX/RX descriptors and tx padding buffer */ |
| if (dp->desc_dma_handle) { |
| (void) ddi_dma_unbind_handle(dp->desc_dma_handle); |
| ddi_dma_mem_free(&dp->desc_acc_handle); |
| ddi_dma_free_handle(&dp->desc_dma_handle); |
| dp->desc_dma_handle = NULL; |
| } |
| |
| /* Free dma handles for Tx */ |
| for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) { |
| /* Free bounce buffer associated to each txbuf */ |
| (void) ddi_dma_unbind_handle(tbp->txb_bdh); |
| ddi_dma_mem_free(&tbp->txb_bah); |
| ddi_dma_free_handle(&tbp->txb_bdh); |
| } |
| |
| /* Free rx buffer */ |
| while ((rbp = dp->rx_buf_freelist) != NULL) { |
| |
| ASSERT(dp->rx_buf_freecnt > 0); |
| |
| dp->rx_buf_freelist = rbp->rxb_next; |
| dp->rx_buf_freecnt--; |
| |
| /* release DMA mapping */ |
| ASSERT(rbp->rxb_dh != NULL); |
| |
| /* free dma handles for rx bbuf */ |
| /* it has dma mapping always */ |
| ASSERT(rbp->rxb_nfrags > 0); |
| (void) ddi_dma_unbind_handle(rbp->rxb_dh); |
| |
| /* free the associated bounce buffer and dma handle */ |
| ASSERT(rbp->rxb_bah != NULL); |
| ddi_dma_mem_free(&rbp->rxb_bah); |
| /* free the associated dma handle */ |
| ddi_dma_free_handle(&rbp->rxb_dh); |
| |
| /* free the base memory of rx buffer management */ |
| kmem_free(rbp, sizeof (struct rxbuf)); |
| } |
| } |
| |
| /* ============================================================== */ |
| /* |
| * Rx/Tx descriptor slot management |
| */ |
| /* ============================================================== */ |
| /* |
| * Initialize an empty rx ring. |
| */ |
| static void |
| gem_init_rx_ring(struct gem_dev *dp) |
| { |
| int i; |
| int rx_ring_size = dp->gc.gc_rx_ring_size; |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d", |
| dp->name, __func__, |
| rx_ring_size, dp->gc.gc_rx_buf_max)); |
| |
| /* make a physical chain of rx descriptors */ |
| for (i = 0; i < rx_ring_size; i++) { |
| (*dp->gc.gc_rx_desc_init)(dp, i); |
| } |
| gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); |
| |
| dp->rx_active_head = (seqnum_t)0; |
| dp->rx_active_tail = (seqnum_t)0; |
| |
| ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL); |
| ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL); |
| } |
| |
| /* |
| * Prepare rx buffers and put them into the rx buffer/descriptor ring. |
| */ |
| static void |
| gem_prepare_rx_buf(struct gem_dev *dp) |
| { |
| int i; |
| int nrbuf; |
| struct rxbuf *rbp; |
| |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| /* Now we have no active buffers in rx ring */ |
| |
| nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max); |
| for (i = 0; i < nrbuf; i++) { |
| if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) { |
| break; |
| } |
| gem_append_rxbuf(dp, rbp); |
| } |
| |
| gem_rx_desc_dma_sync(dp, |
| 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV); |
| } |
| |
| /* |
| * Reclaim active rx buffers in rx buffer ring. |
| */ |
| static void |
| gem_clean_rx_buf(struct gem_dev *dp) |
| { |
| int i; |
| struct rxbuf *rbp; |
| int rx_ring_size = dp->gc.gc_rx_ring_size; |
| #ifdef GEM_DEBUG_LEVEL |
| int total; |
| #endif |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free", |
| dp->name, __func__, dp->rx_buf_freecnt)); |
| /* |
| * clean up HW descriptors |
| */ |
| for (i = 0; i < rx_ring_size; i++) { |
| (*dp->gc.gc_rx_desc_clean)(dp, i); |
| } |
| gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV); |
| |
| #ifdef GEM_DEBUG_LEVEL |
| total = 0; |
| #endif |
| /* |
| * Reclaim allocated rx buffers |
| */ |
| while ((rbp = dp->rx_buf_head) != NULL) { |
| #ifdef GEM_DEBUG_LEVEL |
| total++; |
| #endif |
| /* remove the first one from rx buffer list */ |
| dp->rx_buf_head = rbp->rxb_next; |
| |
| /* recycle the rxbuf */ |
| gem_free_rxbuf(rbp); |
| } |
| dp->rx_buf_tail = (struct rxbuf *)NULL; |
| |
| DPRINTF(2, (CE_CONT, |
| "!%s: %s: %d buffers freeed, total: %d free", |
| dp->name, __func__, total, dp->rx_buf_freecnt)); |
| } |
| |
| /* |
| * Initialize an empty transmit buffer/descriptor ring |
| */ |
| static void |
| gem_init_tx_ring(struct gem_dev *dp) |
| { |
| int i; |
| int tx_buf_size = dp->gc.gc_tx_buf_size; |
| int tx_ring_size = dp->gc.gc_tx_ring_size; |
| |
| DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d", |
| dp->name, __func__, |
| dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size)); |
| |
| ASSERT(!dp->mac_active); |
| |
| /* initialize active list and free list */ |
| dp->tx_slots_base = |
| SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size); |
| dp->tx_softq_tail -= dp->tx_softq_head; |
| dp->tx_softq_head = (seqnum_t)0; |
| |
| dp->tx_active_head = dp->tx_softq_head; |
| dp->tx_active_tail = dp->tx_softq_head; |
| |
| dp->tx_free_head = dp->tx_softq_tail; |
| dp->tx_free_tail = dp->gc.gc_tx_buf_limit; |
| |
| dp->tx_desc_head = (seqnum_t)0; |
| dp->tx_desc_tail = (seqnum_t)0; |
| dp->tx_desc_intr = (seqnum_t)0; |
| |
| for (i = 0; i < tx_ring_size; i++) { |
| (*dp->gc.gc_tx_desc_init)(dp, i); |
| } |
| gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); |
| } |
| |
| __INLINE__ |
| static void |
| gem_txbuf_free_dma_resources(struct txbuf *tbp) |
| { |
| if (tbp->txb_mp) { |
| freemsg(tbp->txb_mp); |
| tbp->txb_mp = NULL; |
| } |
| tbp->txb_nfrags = 0; |
| tbp->txb_flag = 0; |
| } |
| #pragma inline(gem_txbuf_free_dma_resources) |
| |
| /* |
| * reclaim active tx buffers and reset positions in tx rings. |
| */ |
| static void |
| gem_clean_tx_buf(struct gem_dev *dp) |
| { |
| int i; |
| seqnum_t head; |
| seqnum_t tail; |
| seqnum_t sn; |
| struct txbuf *tbp; |
| int tx_ring_size = dp->gc.gc_tx_ring_size; |
| #ifdef GEM_DEBUG_LEVEL |
| int err; |
| #endif |
| |
| ASSERT(!dp->mac_active); |
| ASSERT(dp->tx_busy == 0); |
| ASSERT(dp->tx_softq_tail == dp->tx_free_head); |
| |
| /* |
| * clean up all HW descriptors |
| */ |
| for (i = 0; i < tx_ring_size; i++) { |
| (*dp->gc.gc_tx_desc_clean)(dp, i); |
| } |
| gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV); |
| |
| /* dequeue all active and loaded buffers */ |
| head = dp->tx_active_head; |
| tail = dp->tx_softq_tail; |
| |
| ASSERT(dp->tx_free_head - head >= 0); |
| tbp = GET_TXBUF(dp, head); |
| for (sn = head; sn != tail; sn++) { |
| gem_txbuf_free_dma_resources(tbp); |
| ASSERT(tbp->txb_mp == NULL); |
| dp->stats.errxmt++; |
| tbp = tbp->txb_next; |
| } |
| |
| #ifdef GEM_DEBUG_LEVEL |
| /* ensure no dma resources for tx are not in use now */ |
| err = 0; |
| while (sn != head + dp->gc.gc_tx_buf_size) { |
| if (tbp->txb_mp || tbp->txb_nfrags) { |
| DPRINTF(0, (CE_CONT, |
| "%s: %s: sn:%d[%d] mp:%p nfrags:%d", |
| dp->name, __func__, |
| sn, SLOT(sn, dp->gc.gc_tx_buf_size), |
| tbp->txb_mp, tbp->txb_nfrags)); |
| err = 1; |
| } |
| sn++; |
| tbp = tbp->txb_next; |
| } |
| |
| if (err) { |
| gem_dump_txbuf(dp, CE_WARN, |
| "gem_clean_tx_buf: tbp->txb_mp != NULL"); |
| } |
| #endif |
| /* recycle buffers, now no active tx buffers in the ring */ |
| dp->tx_free_tail += tail - head; |
| ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit); |
| |
| /* fix positions in tx buffer rings */ |
| dp->tx_active_head = dp->tx_free_head; |
| dp->tx_active_tail = dp->tx_free_head; |
| dp->tx_softq_head = dp->tx_free_head; |
| dp->tx_softq_tail = dp->tx_free_head; |
| } |
| |
| /* |
| * Reclaim transmitted buffers from tx buffer/descriptor ring. |
| */ |
| __INLINE__ int |
| gem_reclaim_txbuf(struct gem_dev *dp) |
| { |
| struct txbuf *tbp; |
| uint_t txstat; |
| int err = GEM_SUCCESS; |
| seqnum_t head; |
| seqnum_t tail; |
| seqnum_t sn; |
| seqnum_t desc_head; |
| int tx_ring_size = dp->gc.gc_tx_ring_size; |
| uint_t (*tx_desc_stat)(struct gem_dev *dp, |
| int slot, int ndesc) = dp->gc.gc_tx_desc_stat; |
| clock_t now; |
| |
| now = ddi_get_lbolt(); |
| if (now == (clock_t)0) { |
| /* make non-zero timestamp */ |
| now--; |
| } |
| |
| mutex_enter(&dp->xmitlock); |
| |
| head = dp->tx_active_head; |
| tail = dp->tx_active_tail; |
| |
| #if GEM_DEBUG_LEVEL > 2 |
| if (head != tail) { |
| cmn_err(CE_CONT, "!%s: %s: " |
| "testing active_head:%d[%d], active_tail:%d[%d]", |
| dp->name, __func__, |
| head, SLOT(head, dp->gc.gc_tx_buf_size), |
| tail, SLOT(tail, dp->gc.gc_tx_buf_size)); |
| } |
| #endif |
| #ifdef DEBUG |
| if (dp->tx_reclaim_busy == 0) { |
| /* check tx buffer management consistency */ |
| ASSERT(dp->tx_free_tail - dp->tx_active_head |
| == dp->gc.gc_tx_buf_limit); |
| /* EMPTY */ |
| } |
| #endif |
| dp->tx_reclaim_busy++; |
| |
| /* sync all active HW descriptors */ |
| gem_tx_desc_dma_sync(dp, |
| SLOT(dp->tx_desc_head, tx_ring_size), |
| dp->tx_desc_tail - dp->tx_desc_head, |
| DDI_DMA_SYNC_FORKERNEL); |
| |
| tbp = GET_TXBUF(dp, head); |
| desc_head = dp->tx_desc_head; |
| for (sn = head; sn != tail; |
| dp->tx_active_head = (++sn), tbp = tbp->txb_next) { |
| int ndescs; |
| |
| ASSERT(tbp->txb_desc == desc_head); |
| |
| ndescs = tbp->txb_ndescs; |
| if (ndescs == 0) { |
| /* skip errored descriptors */ |
| continue; |
| } |
| txstat = (*tx_desc_stat)(dp, |
| SLOT(tbp->txb_desc, tx_ring_size), ndescs); |
| |
| if (txstat == 0) { |
| /* not transmitted yet */ |
| break; |
| } |
| |
| if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) { |
| dp->tx_blocked = now; |
| } |
| |
| ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR)); |
| |
| if (txstat & GEM_TX_ERR) { |
| err = GEM_FAILURE; |
| cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]", |
| dp->name, sn, SLOT(sn, tx_ring_size)); |
| } |
| #if GEM_DEBUG_LEVEL > 4 |
| if (now - tbp->txb_stime >= 50) { |
| cmn_err(CE_WARN, "!%s: tx delay while %d mS", |
| dp->name, (now - tbp->txb_stime)*10); |
| } |
| #endif |
| /* free transmitted descriptors */ |
| desc_head += ndescs; |
| } |
| |
| if (dp->tx_desc_head != desc_head) { |
| /* we have reclaimed one or more tx buffers */ |
| dp->tx_desc_head = desc_head; |
| |
| /* If we passed the next interrupt position, update it */ |
| if (desc_head - dp->tx_desc_intr > 0) { |
| dp->tx_desc_intr = desc_head; |
| } |
| } |
| mutex_exit(&dp->xmitlock); |
| |
| /* free dma mapping resources associated with transmitted tx buffers */ |
| tbp = GET_TXBUF(dp, head); |
| tail = sn; |
| #if GEM_DEBUG_LEVEL > 2 |
| if (head != tail) { |
| cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]", |
| __func__, |
| head, SLOT(head, dp->gc.gc_tx_buf_size), |
| tail, SLOT(tail, dp->gc.gc_tx_buf_size)); |
| } |
| #endif |
| for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) { |
| gem_txbuf_free_dma_resources(tbp); |
| } |
| |
| /* recycle the tx buffers */ |
| mutex_enter(&dp->xmitlock); |
| if (--dp->tx_reclaim_busy == 0) { |
| /* we are the last thread who can update free tail */ |
| #if GEM_DEBUG_LEVEL > 4 |
| /* check all resouces have been deallocated */ |
| sn = dp->tx_free_tail; |
| tbp = GET_TXBUF(dp, new_tail); |
| while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) { |
| if (tbp->txb_nfrags) { |
| /* in use */ |
| break; |
| } |
| ASSERT(tbp->txb_mp == NULL); |
| tbp = tbp->txb_next; |
| sn++; |
| } |
| ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn); |
| #endif |
| dp->tx_free_tail = |
| dp->tx_active_head + dp->gc.gc_tx_buf_limit; |
| } |
| if (!dp->mac_active) { |
| /* someone may be waiting for me. */ |
| cv_broadcast(&dp->tx_drain_cv); |
| } |
| #if GEM_DEBUG_LEVEL > 2 |
| cmn_err(CE_CONT, "!%s: %s: called, " |
| "free_head:%d free_tail:%d(+%d) added:%d", |
| dp->name, __func__, |
| dp->tx_free_head, dp->tx_free_tail, |
| dp->tx_free_tail - dp->tx_free_head, tail - head); |
| #endif |
| mutex_exit(&dp->xmitlock); |
| |
| return (err); |
| } |
| #pragma inline(gem_reclaim_txbuf) |
| |
| |
| /* |
| * Make tx descriptors in out-of-order manner |
| */ |
| static void |
| gem_tx_load_descs_oo(struct gem_dev *dp, |
| seqnum_t start_slot, seqnum_t end_slot, uint64_t flags) |
| { |
| seqnum_t sn; |
| struct txbuf *tbp; |
| int tx_ring_size = dp->gc.gc_tx_ring_size; |
| int (*tx_desc_write) |
| (struct gem_dev *dp, int slot, |
| ddi_dma_cookie_t *dmacookie, |
| int frags, uint64_t flag) = dp->gc.gc_tx_desc_write; |
| clock_t now = ddi_get_lbolt(); |
| |
| sn = start_slot; |
| tbp = GET_TXBUF(dp, sn); |
| do { |
| #if GEM_DEBUG_LEVEL > 1 |
| if (dp->tx_cnt < 100) { |
| dp->tx_cnt++; |
| flags |= GEM_TXFLAG_INTR; |
| } |
| #endif |
| /* write a tx descriptor */ |
| tbp->txb_desc = sn; |
| tbp->txb_ndescs = (*tx_desc_write)(dp, |
| SLOT(sn, tx_ring_size), |
| tbp->txb_dmacookie, |
| tbp->txb_nfrags, flags | tbp->txb_flag); |
| tbp->txb_stime = now; |
| ASSERT(tbp->txb_ndescs == 1); |
| |
| flags = 0; |
| sn++; |
| tbp = tbp->txb_next; |
| } while (sn != end_slot); |
| } |
| |
| __INLINE__ |
| static size_t |
| gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp) |
| { |
| size_t min_pkt; |
| caddr_t bp; |
| size_t off; |
| mblk_t *tp; |
| size_t len; |
| uint64_t flag; |
| |
| ASSERT(tbp->txb_mp == NULL); |
| |
| /* we use bounce buffer for the packet */ |
| min_pkt = ETHERMIN; |
| bp = tbp->txb_buf; |
| off = 0; |
| tp = mp; |
| |
| flag = tbp->txb_flag; |
| if (flag & GEM_TXFLAG_SWVTAG) { |
| /* need to increase min packet size */ |
| min_pkt += VTAG_SIZE; |
| ASSERT((flag & GEM_TXFLAG_VTAG) == 0); |
| } |
| |
| /* copy the rest */ |
| for (; tp; tp = tp->b_cont) { |
| if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) { |
| bcopy(tp->b_rptr, &bp[off], len); |
| off += len; |
| } |
| } |
| |
| if (off < min_pkt && |
| (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) { |
| /* |
| * Extend the packet to minimum packet size explicitly. |
| * For software vlan packets, we shouldn't use tx autopad |
| * function because nics may not be aware of vlan. |
| * we must keep 46 octet of payload even if we use vlan. |
| */ |
| bzero(&bp[off], min_pkt - off); |
| off = min_pkt; |
| } |
| |
| (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV); |
| |
| tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma; |
| tbp->txb_dmacookie[0].dmac_size = off; |
| |
| DPRINTF(2, (CE_CONT, |
| "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d", |
| dp->name, __func__, |
| tbp->txb_dmacookie[0].dmac_laddress, |
| tbp->txb_dmacookie[0].dmac_size, |
| (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT, |
| min_pkt)); |
| |
| /* save misc info */ |
| tbp->txb_mp = mp; |
| tbp->txb_nfrags = 1; |
| #ifdef DEBUG_MULTIFRAGS |
| if (dp->gc.gc_tx_max_frags >= 3 && |
| tbp->txb_dmacookie[0].dmac_size > 16*3) { |
| tbp->txb_dmacookie[1].dmac_laddress = |
| tbp->txb_dmacookie[0].dmac_laddress + 16; |
| tbp->txb_dmacookie[2].dmac_laddress = |
| tbp->txb_dmacookie[1].dmac_laddress + 16; |
| |
| tbp->txb_dmacookie[2].dmac_size = |
| tbp->txb_dmacookie[0].dmac_size - 16*2; |
| tbp->txb_dmacookie[1].dmac_size = 16; |
| tbp->txb_dmacookie[0].dmac_size = 16; |
| tbp->txb_nfrags = 3; |
| } |
| #endif |
| return (off); |
| } |
| #pragma inline(gem_setup_txbuf_copy) |
| |
| __INLINE__ |
| static void |
| gem_tx_start_unit(struct gem_dev *dp) |
| { |
| seqnum_t head; |
| seqnum_t tail; |
| struct txbuf *tbp_head; |
| struct txbuf *tbp_tail; |
| |
| /* update HW descriptors from soft queue */ |
| ASSERT(mutex_owned(&dp->xmitlock)); |
| ASSERT(dp->tx_softq_head == dp->tx_active_tail); |
| |
| head = dp->tx_softq_head; |
| tail = dp->tx_softq_tail; |
| |
| DPRINTF(1, (CE_CONT, |
| "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]", |
| dp->name, __func__, head, tail, tail - head, |
| dp->tx_desc_head, dp->tx_desc_tail, |
| dp->tx_desc_tail - dp->tx_desc_head)); |
| |
| ASSERT(tail - head > 0); |
| |
| dp->tx_desc_tail = tail; |
| |
| tbp_head = GET_TXBUF(dp, head); |
| tbp_tail = GET_TXBUF(dp, tail - 1); |
| |
| ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail); |
| |
| dp->gc.gc_tx_start(dp, |
| SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size), |
| tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc); |
| |
| /* advance softq head and active tail */ |
| dp->tx_softq_head = dp->tx_active_tail = tail; |
| } |
| #pragma inline(gem_tx_start_unit) |
| |
| #ifdef GEM_DEBUG_LEVEL |
| static int gem_send_cnt[10]; |
| #endif |
| #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE) |
| #define EHLEN (sizeof (struct ether_header)) |
| /* |
| * check ether packet type and ip protocol |
| */ |
| static uint64_t |
| gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp) |
| { |
| mblk_t *tp; |
| ssize_t len; |
| uint_t vtag; |
| int off; |
| uint64_t flag; |
| |
| flag = 0ULL; |
| |
| /* |
| * prepare continuous header of the packet for protocol analysis |
| */ |
| if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) { |
| /* we use work buffer to copy mblk */ |
| for (tp = mp, off = 0; |
| tp && (off < PKT_MIN_SIZE); |
| tp = tp->b_cont, off += len) { |
| len = (long)tp->b_wptr - (long)tp->b_rptr; |
| len = min(len, PKT_MIN_SIZE - off); |
| bcopy(tp->b_rptr, &bp[off], len); |
| } |
| } else { |
| /* we can use mblk without copy */ |
| bp = mp->b_rptr; |
| } |
| |
| /* process vlan tag for GLD v3 */ |
| if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) { |
| if (dp->misc_flag & GEM_VLAN_HARD) { |
| vtag = GET_NET16(&bp[VTAG_OFF + 2]); |
| ASSERT(vtag); |
| flag |= vtag << GEM_TXFLAG_VTAG_SHIFT; |
| } else { |
| flag |= GEM_TXFLAG_SWVTAG; |
| } |
| } |
| return (flag); |
| } |
| #undef EHLEN |
| #undef PKT_MIN_SIZE |
| /* |
| * gem_send_common is an exported function because hw depend routines may |
| * use it for sending control frames like setup frames for 2114x chipset. |
| */ |
| mblk_t * |
| gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags) |
| { |
| int nmblk; |
| int avail; |
| mblk_t *tp; |
| mblk_t *mp; |
| int i; |
| struct txbuf *tbp; |
| seqnum_t head; |
| uint64_t load_flags; |
| uint64_t len_total = 0; |
| uint32_t bcast = 0; |
| uint32_t mcast = 0; |
| |
| ASSERT(mp_head != NULL); |
| |
| mp = mp_head; |
| nmblk = 1; |
| while ((mp = mp->b_next) != NULL) { |
| nmblk++; |
| } |
| #ifdef GEM_DEBUG_LEVEL |
| gem_send_cnt[0]++; |
| gem_send_cnt[min(nmblk, 9)]++; |
| #endif |
| /* |
| * Aquire resources |
| */ |
| mutex_enter(&dp->xmitlock); |
| if (dp->mac_suspended) { |
| mutex_exit(&dp->xmitlock); |
| mp = mp_head; |
| while (mp) { |
| tp = mp->b_next; |
| freemsg(mp); |
| mp = tp; |
| } |
| return (NULL); |
| } |
| |
| if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { |
| /* don't send data packets while mac isn't active */ |
| /* XXX - should we discard packets? */ |
| mutex_exit(&dp->xmitlock); |
| return (mp_head); |
| } |
| |
| /* allocate free slots */ |
| head = dp->tx_free_head; |
| avail = dp->tx_free_tail - head; |
| |
| DPRINTF(2, (CE_CONT, |
| "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d", |
| dp->name, __func__, |
| dp->tx_free_head, dp->tx_free_tail, avail, nmblk)); |
| |
| avail = min(avail, dp->tx_max_packets); |
| |
| if (nmblk > avail) { |
| if (avail == 0) { |
| /* no resources; short cut */ |
| DPRINTF(2, (CE_CONT, "!%s: no resources", __func__)); |
| dp->tx_max_packets = max(dp->tx_max_packets - 1, 1); |
| goto done; |
| } |
| nmblk = avail; |
| } |
| |
| dp->tx_free_head = head + nmblk; |
| load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0; |
| |
| /* update last interrupt position if tx buffers exhaust. */ |
| if (nmblk == avail) { |
| tbp = GET_TXBUF(dp, head + avail - 1); |
| tbp->txb_flag = GEM_TXFLAG_INTR; |
| dp->tx_desc_intr = head + avail; |
| } |
| mutex_exit(&dp->xmitlock); |
| |
| tbp = GET_TXBUF(dp, head); |
| |
| for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) { |
| uint8_t *bp; |
| uint64_t txflag; |
| |
| /* remove one from the mblk list */ |
| ASSERT(mp_head != NULL); |
| mp = mp_head; |
| mp_head = mp_head->b_next; |
| mp->b_next = NULL; |
| |
| /* statistics for non-unicast packets */ |
| bp = mp->b_rptr; |
| if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) { |
| if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet, |
| ETHERADDRL) == 0) { |
| bcast++; |
| } else { |
| mcast++; |
| } |
| } |
| |
| /* save misc info */ |
| txflag = tbp->txb_flag; |
| txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT; |
| txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf); |
| tbp->txb_flag = txflag; |
| |
| len_total += gem_setup_txbuf_copy(dp, mp, tbp); |
| } |
| |
| (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags); |
| |
| /* Append the tbp at the tail of the active tx buffer list */ |
| mutex_enter(&dp->xmitlock); |
| |
| if ((--dp->tx_busy) == 0) { |
| /* extend the tail of softq, as new packets have been ready. */ |
| dp->tx_softq_tail = dp->tx_free_head; |
| |
| if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) { |
| /* |
| * The device status has changed while we are |
| * preparing tx buf. |
| * As we are the last one that make tx non-busy. |
| * wake up someone who may wait for us. |
| */ |
| cv_broadcast(&dp->tx_drain_cv); |
| } else { |
| ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0); |
| gem_tx_start_unit(dp); |
| } |
| } |
| dp->stats.obytes += len_total; |
| dp->stats.opackets += nmblk; |
| dp->stats.obcast += bcast; |
| dp->stats.omcast += mcast; |
| done: |
| mutex_exit(&dp->xmitlock); |
| |
| return (mp_head); |
| } |
| |
| /* ========================================================== */ |
| /* |
| * error detection and restart routines |
| */ |
| /* ========================================================== */ |
| int |
| gem_restart_nic(struct gem_dev *dp, uint_t flags) |
| { |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); |
| #ifdef GEM_DEBUG_LEVEL |
| #if GEM_DEBUG_LEVEL > 1 |
| gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic"); |
| #endif |
| #endif |
| |
| if (dp->mac_suspended) { |
| /* should we return GEM_FAILURE ? */ |
| return (GEM_FAILURE); |
| } |
| |
| /* |
| * We should avoid calling any routines except xxx_chip_reset |
| * when we are resuming the system. |
| */ |
| if (dp->mac_active) { |
| if (flags & GEM_RESTART_KEEP_BUF) { |
| /* stop rx gracefully */ |
| dp->rxmode &= ~RXMODE_ENABLE; |
| (void) (*dp->gc.gc_set_rx_filter)(dp); |
| } |
| (void) gem_mac_stop(dp, flags); |
| } |
| |
| /* reset the chip. */ |
| if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) { |
| cmn_err(CE_WARN, "%s: %s: failed to reset chip", |
| dp->name, __func__); |
| goto err; |
| } |
| |
| if (gem_mac_init(dp) != GEM_SUCCESS) { |
| goto err; |
| } |
| |
| /* setup media mode if the link have been up */ |
| if (dp->mii_state == MII_STATE_LINKUP) { |
| if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) { |
| goto err; |
| } |
| } |
| |
| /* setup mac address and enable rx filter */ |
| dp->rxmode |= RXMODE_ENABLE; |
| if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) { |
| goto err; |
| } |
| |
| /* |
| * XXX - a panic happened because of linkdown. |
| * We must check mii_state here, because the link can be down just |
| * before the restart event happen. If the link is down now, |
| * gem_mac_start() will be called from gem_mii_link_check() when |
| * the link become up later. |
| */ |
| if (dp->mii_state == MII_STATE_LINKUP) { |
| /* restart the nic */ |
| ASSERT(!dp->mac_active); |
| (void) gem_mac_start(dp); |
| } |
| return (GEM_SUCCESS); |
| err: |
| return (GEM_FAILURE); |
| } |
| |
| |
| static void |
| gem_tx_timeout(struct gem_dev *dp) |
| { |
| clock_t now; |
| boolean_t tx_sched; |
| struct txbuf *tbp; |
| |
| mutex_enter(&dp->intrlock); |
| |
| tx_sched = B_FALSE; |
| now = ddi_get_lbolt(); |
| |
| mutex_enter(&dp->xmitlock); |
| if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) { |
| mutex_exit(&dp->xmitlock); |
| goto schedule_next; |
| } |
| mutex_exit(&dp->xmitlock); |
| |
| /* reclaim transmitted buffers to check the trasmitter hangs or not. */ |
| if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { |
| /* tx error happened, reset transmitter in the chip */ |
| (void) gem_restart_nic(dp, 0); |
| tx_sched = B_TRUE; |
| dp->tx_blocked = (clock_t)0; |
| |
| goto schedule_next; |
| } |
| |
| mutex_enter(&dp->xmitlock); |
| /* check if the transmitter thread is stuck */ |
| if (dp->tx_active_head == dp->tx_active_tail) { |
| /* no tx buffer is loaded to the nic */ |
| if (dp->tx_blocked && |
| now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) { |
| gem_dump_txbuf(dp, CE_WARN, |
| "gem_tx_timeout: tx blocked"); |
| tx_sched = B_TRUE; |
| dp->tx_blocked = (clock_t)0; |
| } |
| mutex_exit(&dp->xmitlock); |
| goto schedule_next; |
| } |
| |
| tbp = GET_TXBUF(dp, dp->tx_active_head); |
| if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) { |
| mutex_exit(&dp->xmitlock); |
| goto schedule_next; |
| } |
| mutex_exit(&dp->xmitlock); |
| |
| gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout"); |
| |
| /* discard untransmitted packet and restart tx. */ |
| (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT); |
| tx_sched = B_TRUE; |
| dp->tx_blocked = (clock_t)0; |
| |
| schedule_next: |
| mutex_exit(&dp->intrlock); |
| |
| /* restart the downstream if needed */ |
| if (tx_sched) { |
| mac_tx_update(dp->mh); |
| } |
| |
| DPRINTF(4, (CE_CONT, |
| "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d", |
| dp->name, BOOLEAN(dp->tx_blocked), |
| dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr)); |
| dp->timeout_id = |
| timeout((void (*)(void *))gem_tx_timeout, |
| (void *)dp, dp->gc.gc_tx_timeout_interval); |
| } |
| |
| /* ================================================================== */ |
| /* |
| * Interrupt handler |
| */ |
| /* ================================================================== */ |
| __INLINE__ |
| static void |
| gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head) |
| { |
| struct rxbuf *rbp; |
| seqnum_t tail; |
| int rx_ring_size = dp->gc.gc_rx_ring_size; |
| |
| ASSERT(rbp_head != NULL); |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d", |
| dp->name, __func__, dp->rx_active_head, dp->rx_active_tail)); |
| |
| /* |
| * Add new buffers into active rx buffer list |
| */ |
| if (dp->rx_buf_head == NULL) { |
| dp->rx_buf_head = rbp_head; |
| ASSERT(dp->rx_buf_tail == NULL); |
| } else { |
| dp->rx_buf_tail->rxb_next = rbp_head; |
| } |
| |
| tail = dp->rx_active_tail; |
| for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) { |
| /* need to notify the tail for the lower layer */ |
| dp->rx_buf_tail = rbp; |
| |
| dp->gc.gc_rx_desc_write(dp, |
| SLOT(tail, rx_ring_size), |
| rbp->rxb_dmacookie, |
| rbp->rxb_nfrags); |
| |
| dp->rx_active_tail = tail = tail + 1; |
| } |
| } |
| #pragma inline(gem_append_rxbuf) |
| |
| mblk_t * |
| gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len) |
| { |
| int rx_header_len = dp->gc.gc_rx_header_len; |
| uint8_t *bp; |
| mblk_t *mp; |
| |
| /* allocate a new mblk */ |
| if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) { |
| ASSERT(mp->b_next == NULL); |
| ASSERT(mp->b_cont == NULL); |
| |
| mp->b_rptr += VTAG_SIZE; |
| bp = mp->b_rptr; |
| mp->b_wptr = bp + len; |
| |
| /* |
| * flush the range of the entire buffer to invalidate |
| * all of corresponding dirty entries in iocache. |
| */ |
| (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len, |
| 0, DDI_DMA_SYNC_FORKERNEL); |
| |
| bcopy(rbp->rxb_buf + rx_header_len, bp, len); |
| } |
| return (mp); |
| } |
| |
| #ifdef GEM_DEBUG_LEVEL |
| uint_t gem_rx_pkts[17]; |
| #endif |
| |
| |
| int |
| gem_receive(struct gem_dev *dp) |
| { |
| uint64_t len_total = 0; |
| struct rxbuf *rbp; |
| mblk_t *mp; |
| int cnt = 0; |
| uint64_t rxstat; |
| struct rxbuf *newbufs; |
| struct rxbuf **newbufs_tailp; |
| mblk_t *rx_head; |
| mblk_t **rx_tailp; |
| int rx_ring_size = dp->gc.gc_rx_ring_size; |
| seqnum_t active_head; |
| uint64_t (*rx_desc_stat)(struct gem_dev *dp, |
| int slot, int ndesc); |
| int ethermin = ETHERMIN; |
| int ethermax = dp->mtu + sizeof (struct ether_header); |
| int rx_header_len = dp->gc.gc_rx_header_len; |
| |
| ASSERT(mutex_owned(&dp->intrlock)); |
| |
| DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p", |
| dp->name, dp->rx_buf_head)); |
| |
| rx_desc_stat = dp->gc.gc_rx_desc_stat; |
| newbufs_tailp = &newbufs; |
| rx_tailp = &rx_head; |
| for (active_head = dp->rx_active_head; |
| (rbp = dp->rx_buf_head) != NULL; active_head++) { |
| int len; |
| if (cnt == 0) { |
| cnt = max(dp->poll_pkt_delay*2, 10); |
| cnt = min(cnt, |
| dp->rx_active_tail - active_head); |
| gem_rx_desc_dma_sync(dp, |
| SLOT(active_head, rx_ring_size), |
| cnt, |
| DDI_DMA_SYNC_FORKERNEL); |
| } |
| |
| if (rx_header_len > 0) { |
| (void) ddi_dma_sync(rbp->rxb_dh, 0, |
| rx_header_len, DDI_DMA_SYNC_FORKERNEL); |
| } |
| |
| if (((rxstat = (*rx_desc_stat)(dp, |
| SLOT(active_head, rx_ring_size), |
| rbp->rxb_nfrags)) |
| & (GEM_RX_DONE | GEM_RX_ERR)) == 0) { |
| /* not received yet */ |
| break; |
| } |
| |
| /* Remove the head of the rx buffer list */ |
| dp->rx_buf_head = rbp->rxb_next; |
| cnt--; |
| |
| |
| if (rxstat & GEM_RX_ERR) { |
| goto next; |
| } |
| |
| len = rxstat & GEM_RX_LEN; |
| DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x", |
| dp->name, __func__, rxstat, len)); |
| |
| /* |
| * Copy the packet |
| */ |
| if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) { |
| /* no memory, discard the packet */ |
| dp->stats.norcvbuf++; |
| goto next; |
| } |
| |
| /* |
| * Process VLAN tag |
| */ |
| ethermin = ETHERMIN; |
| ethermax = dp->mtu + sizeof (struct ether_header); |
| if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) { |
| ethermax += VTAG_SIZE; |
| } |
| |
| /* check packet size */ |
| if (len < ethermin) { |
| dp->stats.errrcv++; |
| dp->stats.runt++; |
| freemsg(mp); |
| goto next; |
| } |
| |
| if (len > ethermax) { |
| dp->stats.errrcv++; |
| dp->stats.frame_too_long++; |
| freemsg(mp); |
| goto next; |
| } |
| |
| len_total += len; |
| |
| #ifdef GEM_DEBUG_VLAN |
| if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) { |
| gem_dump_packet(dp, (char *)__func__, mp, B_TRUE); |
| } |
| #endif |
| /* append received packet to temporaly rx buffer list */ |
| *rx_tailp = mp; |
| rx_tailp = &mp->b_next; |
| |
| if (mp->b_rptr[0] & 1) { |
| if (bcmp(mp->b_rptr, |
| gem_etherbroadcastaddr.ether_addr_octet, |
| ETHERADDRL) == 0) { |
| dp->stats.rbcast++; |
| } else { |
| dp->stats.rmcast++; |
| } |
| } |
| next: |
| ASSERT(rbp != NULL); |
| |
| /* append new one to temporal new buffer list */ |
| *newbufs_tailp = rbp; |
| newbufs_tailp = &rbp->rxb_next; |
| } |
| |
| /* advance rx_active_head */ |
| if ((cnt = active_head - dp->rx_active_head) > 0) { |
| dp->stats.rbytes += len_total; |
| dp->stats.rpackets += cnt; |
| } |
| dp->rx_active_head = active_head; |
| |
| /* terminate the working list */ |
| *newbufs_tailp = NULL; |
| *rx_tailp = NULL; |
| |
| if (dp->rx_buf_head == NULL) { |
| dp->rx_buf_tail = NULL; |
| } |
| |
| DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p", |
| dp->name, __func__, cnt, rx_head)); |
| |
| if (newbufs) { |
| /* |
| * fillfull rx list with new buffers |
| */ |
| seqnum_t head; |
| |
| /* save current tail */ |
| head = dp->rx_active_tail; |
| gem_append_rxbuf(dp, newbufs); |
| |
| /* call hw depend start routine if we have. */ |
| dp->gc.gc_rx_start(dp, |
| SLOT(head, rx_ring_size), dp->rx_active_tail - head); |
| } |
| |
| if (rx_head) { |
| /* |
| * send up received packets |
| */ |
| mutex_exit(&dp->intrlock); |
| mac_rx(dp->mh, NULL, rx_head); |
| mutex_enter(&dp->intrlock); |
| } |
| |
| #ifdef GEM_DEBUG_LEVEL |
| gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++; |
| #endif |
| return (cnt); |
| } |
| |
| boolean_t |
| gem_tx_done(struct gem_dev *dp) |
| { |
| boolean_t tx_sched = B_FALSE; |
| |
| if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) { |
| (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); |
| DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d", |
| dp->name, dp->tx_active_head, dp->tx_active_tail)); |
| tx_sched = B_TRUE; |
| goto x; |
| } |
| |
| mutex_enter(&dp->xmitlock); |
| |
| /* XXX - we must not have any packets in soft queue */ |
| ASSERT(dp->tx_softq_head == dp->tx_softq_tail); |
| /* |
| * If we won't have chance to get more free tx buffers, and blocked, |
| * it is worth to reschedule the downstream i.e. tx side. |
| */ |
| ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0); |
| if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) { |
| /* |
| * As no further tx-done interrupts are scheduled, this |
| * is the last chance to kick tx side, which may be |
| * blocked now, otherwise the tx side never works again. |
| */ |
| tx_sched = B_TRUE; |
| dp->tx_blocked = (clock_t)0; |
| dp->tx_max_packets = |
| min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit); |
| } |
| |
| mutex_exit(&dp->xmitlock); |
| |
| DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d", |
| dp->name, __func__, BOOLEAN(dp->tx_blocked))); |
| x: |
| return (tx_sched); |
| } |
| |
| static uint_t |
| gem_intr(struct gem_dev *dp) |
| { |
| uint_t ret; |
| |
| mutex_enter(&dp->intrlock); |
| if (dp->mac_suspended) { |
| mutex_exit(&dp->intrlock); |
| return (DDI_INTR_UNCLAIMED); |
| } |
| dp->intr_busy = B_TRUE; |
| |
| ret = (*dp->gc.gc_interrupt)(dp); |
| |
| if (ret == DDI_INTR_UNCLAIMED) { |
| dp->intr_busy = B_FALSE; |
| mutex_exit(&dp->intrlock); |
| return (ret); |
| } |
| |
| if (!dp->mac_active) { |
| cv_broadcast(&dp->tx_drain_cv); |
| } |
| |
| |
| dp->stats.intr++; |
| dp->intr_busy = B_FALSE; |
| |
| mutex_exit(&dp->intrlock); |
| |
| if (ret & INTR_RESTART_TX) { |
| DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name)); |
| mac_tx_update(dp->mh); |
| ret &= ~INTR_RESTART_TX; |
| } |
| return (ret); |
| } |
| |
| static void |
| gem_intr_watcher(struct gem_dev *dp) |
| { |
| (void) gem_intr(dp); |
| |
| /* schedule next call of tu_intr_watcher */ |
| dp->intr_watcher_id = |
| timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1); |
| } |
| |
| /* ======================================================================== */ |
| /* |
| * MII support routines |
| */ |
| /* ======================================================================== */ |
| static void |
| gem_choose_forcedmode(struct gem_dev *dp) |
| { |
| /* choose media mode */ |
| if (dp->anadv_1000fdx || dp->anadv_1000hdx) { |
| dp->speed = GEM_SPD_1000; |
| dp->full_duplex = dp->anadv_1000fdx; |
| } else if (dp->anadv_100fdx || dp->anadv_100t4) { |
| dp->speed = GEM_SPD_100; |
| dp->full_duplex = B_TRUE; |
| } else if (dp->anadv_100hdx) { |
| dp->speed = GEM_SPD_100; |
| dp->full_duplex = B_FALSE; |
| } else { |
| dp->speed = GEM_SPD_10; |
| dp->full_duplex = dp->anadv_10fdx; |
| } |
| } |
| |
| uint16_t |
| gem_mii_read(struct gem_dev *dp, uint_t reg) |
| { |
| if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { |
| (*dp->gc.gc_mii_sync)(dp); |
| } |
| return ((*dp->gc.gc_mii_read)(dp, reg)); |
| } |
| |
| void |
| gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val) |
| { |
| if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) { |
| (*dp->gc.gc_mii_sync)(dp); |
| } |
| (*dp->gc.gc_mii_write)(dp, reg, val); |
| } |
| |
| #define fc_cap_decode(x) \ |
| ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \ |
| (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0)) |
| |
| int |
| gem_mii_config_default(struct gem_dev *dp) |
| { |
| uint16_t mii_stat; |
| uint16_t val; |
| static uint16_t fc_cap_encode[4] = { |
| 0, /* none */ |
| MII_ABILITY_PAUSE, /* symmetric */ |
| MII_ABILITY_ASMPAUSE, /* tx */ |
| MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */ |
| }; |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__)); |
| |
| /* |
| * Configure bits in advertisement register |
| */ |
| mii_stat = dp->mii_status; |
| |
| DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b", |
| dp->name, __func__, mii_stat, MII_STATUS_BITS)); |
| |
| if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) { |
| /* it's funny */ |
| cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b", |
| dp->name, mii_stat, MII_STATUS_BITS); |
| return (GEM_FAILURE); |
| } |
| |
| /* Do not change the rest of the ability bits in the advert reg */ |
| val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL; |
| |
| DPRINTF(0, (CE_CONT, |
| "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d", |
| dp->name, __func__, |
| dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx, |
| dp->anadv_10fdx, dp->anadv_10hdx)); |
| |
| if (dp->anadv_100t4) { |
| val |= MII_ABILITY_100BASE_T4; |
| } |
| if (dp->anadv_100fdx) { |
| val |= MII_ABILITY_100BASE_TX_FD; |
| } |
| if (dp->anadv_100hdx) { |
| val |= MII_ABILITY_100BASE_TX; |
| } |
| if (dp->anadv_10fdx) { |
| val |= MII_ABILITY_10BASE_T_FD; |
| } |
| if (dp->anadv_10hdx) { |
| val |= MII_ABILITY_10BASE_T; |
| } |
| |
| /* set flow control capability */ |
| val |= fc_cap_encode[dp->anadv_flow_control]; |
| |
| DPRINTF(0, (CE_CONT, |
| "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d", |
| dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode, |
| dp->anadv_flow_control)); |
| |
| gem_mii_write(dp, MII_AN_ADVERT, val); |
| |
| if (mii_stat & MII_STATUS_XSTATUS) { |
| /* |
| * 1000Base-T GMII support |
| */ |
| if (!dp->anadv_autoneg) { |
| /* enable manual configuration */ |
| val = MII_1000TC_CFG_EN; |
| } else { |
| val = 0; |
| if (dp->anadv_1000fdx) { |
| val |= MII_1000TC_ADV_FULL; |
| } |
| if (dp->anadv_1000hdx) { |
| val |= MII_1000TC_ADV_HALF; |
| } |
| } |
| DPRINTF(0, (CE_CONT, |
| "!%s: %s: setting MII_1000TC reg:%b", |
| dp->name, __func__, val, MII_1000TC_BITS)); |
| |
| gem_mii_write(dp, MII_1000TC, val); |
| } |
| |
| return (GEM_SUCCESS); |
| } |
| |
| #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP) |
| #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN) |
| |
| static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = { |
| /* none symm tx rx/symm */ |
| /* none */ |
| {FLOW_CONTROL_NONE, |
| FLOW_CONTROL_NONE, |
| FLOW_CONTROL_NONE, |
| FLOW_CONTROL_NONE}, |
| /* sym */ |
| {FLOW_CONTROL_NONE, |
| FLOW_CONTROL_SYMMETRIC, |
| FLOW_CONTROL_NONE, |
| FLOW_CONTROL_SYMMETRIC}, |
| /* tx */ |
| {FLOW_CONTROL_NONE, |
| FLOW_CONTROL_NONE, |
| FLOW_CONTROL_NONE, |
| FLOW_CONTROL_TX_PAUSE}, |
| /* rx/symm */ |
| {FLOW_CONTROL_NONE, |
| FLOW_CONTROL_SYMMETRIC, |
| FLOW_CONTROL_RX_PAUSE, |
| FLOW_CONTROL_SYMMETRIC}, |
| }; |
| |
| static char *gem_fc_type[] = { |
| "without", |
| "with symmetric", |
| "with tx", |
| "with rx", |
| }; |
| |
| boolean_t |
| gem_mii_link_check(struct gem_dev *dp) |
| { |
| uint16_t old_mii_state; |
| boolean_t tx_sched = B_FALSE; |
| uint16_t status; |
| uint16_t advert; |
| uint16_t lpable; |
| uint16_t exp; |
| uint16_t ctl1000; |
| uint16_t stat1000; |
| uint16_t val; |
| clock_t now; |
| clock_t diff; |
| int linkdown_action; |
| boolean_t fix_phy = B_FALSE; |
| |
| now = ddi_get_lbolt(); |
| old_mii_state = dp->mii_state; |
| |
| DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d", |
| dp->name, __func__, now, dp->mii_state)); |
| |
| diff = now - dp->mii_last_check; |
| dp->mii_last_check = now; |
| |
| /* |
| * For NWAM, don't show linkdown state right |
| * after the system boots |
| */ |
| if (dp->linkup_delay > 0) { |
| if (dp->linkup_delay > diff) { |
| dp->linkup_delay -= diff; |
| } else { |
| /* link up timeout */ |
| dp->linkup_delay = -1; |
| } |
| } |
| |
| next_nowait: |
| switch (dp->mii_state) { |
| case MII_STATE_UNKNOWN: |
| /* power-up, DP83840 requires 32 sync bits */ |
| (*dp->gc.gc_mii_sync)(dp); |
| goto reset_phy; |
| |
| case MII_STATE_RESETTING: |
| dp->mii_timer -= diff; |
| if (dp->mii_timer > 0) { |
| /* don't read phy registers in resetting */ |
| dp->mii_interval = WATCH_INTERVAL_FAST; |
| goto next; |
| } |
| |
| /* Timer expired, ensure reset bit is not set */ |
| |
| if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) { |
| /* some phys need sync bits after reset */ |
| (*dp->gc.gc_mii_sync)(dp); |
| } |
| val = gem_mii_read(dp, MII_CONTROL); |
| if (val & MII_CONTROL_RESET) { |
| cmn_err(CE_NOTE, |
| "!%s: time:%ld resetting phy not complete." |
| " mii_control:0x%b", |
| dp->name, ddi_get_lbolt(), |
| val, MII_CONTROL_BITS); |
| } |
| |
| /* ensure neither isolated nor pwrdown nor auto-nego mode */ |
| /* XXX -- this operation is required for NS DP83840A. */ |
| gem_mii_write(dp, MII_CONTROL, 0); |
| |
| /* As resetting PHY has completed, configure PHY registers */ |
| if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) { |
| /* we failed to configure PHY. */ |
| goto reset_phy; |
| } |
| |
| /* mii_config may disable autonegatiation */ |
| gem_choose_forcedmode(dp); |
| |
| dp->mii_lpable = 0; |
| dp->mii_advert = 0; |
| dp->mii_exp = 0; |
| dp->mii_ctl1000 = 0; |
| dp->mii_stat1000 = 0; |
| dp->flow_control = FLOW_CONTROL_NONE; |
| |
| if (!dp->anadv_autoneg) { |
| /* skip auto-negotiation phase */ |
| dp->mii_state = MII_STATE_MEDIA_SETUP; |
| dp->mii_timer = 0; |
| dp->mii_interval = 0; |
| goto next_nowait; |
| } |
| |
| /* Issue auto-negotiation command */ |
| goto autonego; |
| |
| case MII_STATE_AUTONEGOTIATING: |
| /* |
| * Autonegotiation is in progress |
| */ |
| dp->mii_timer -= diff; |
| if (dp->mii_timer - |
| (dp->gc.gc_mii_an_timeout |
| - dp->gc.gc_mii_an_wait) > 0) { |
| /* |
| * wait for a while, typically autonegotiation |
| * completes in 2.3 - 2.5 sec. |
| */ |
| dp->mii_interval = WATCH_INTERVAL_FAST; |
| goto next; |
| } |
| |
| /* read PHY status */ |
| status = gem_mii_read(dp, MII_STATUS); |
| DPRINTF(4, (CE_CONT, |
| "!%s: %s: called: mii_state:%d MII_STATUS reg:%b", |
| dp->name, __func__, dp->mii_state, |
| status, MII_STATUS_BITS)); |
| |
| if (status & MII_STATUS_REMFAULT) { |
| /* |
| * The link parnert told me something wrong happend. |
| * What do we do ? |
| */ |
| cmn_err(CE_CONT, |
| "!%s: auto-negotiation failed: remote fault", |
| dp->name); |
| goto autonego; |
| } |
| |
| if ((status & MII_STATUS_ANDONE) == 0) { |
| if (dp->mii_timer <= 0) { |
| /* |
| * Auto-negotiation was timed out, |
| * try again w/o resetting phy. |
| */ |
| if (!dp->mii_supress_msg) { |
| cmn_err(CE_WARN, |
| "!%s: auto-negotiation failed: timeout", |
| dp->name); |
| dp->mii_supress_msg = B_TRUE; |
| } |
| goto autonego; |
| } |
| /* |
| * Auto-negotiation is in progress. Wait. |
| */ |
| dp->mii_interval = dp->gc.gc_mii_an_watch_interval; |
| goto next; |
| } |
| |
| /* |
| * Auto-negotiation have completed. |
| * Assume linkdown and fall through. |
| */ |
| dp->mii_supress_msg = B_FALSE; |
| dp->mii_state = MII_STATE_AN_DONE; |
| DPRINTF(0, (CE_CONT, |
| "!%s: auto-negotiation completed, MII_STATUS:%b", |
| dp->name, status, MII_STATUS_BITS)); |
| |
| if (dp->gc.gc_mii_an_delay > 0) { |
| dp->mii_timer = dp->gc.gc_mii_an_delay; |
| dp->mii_interval = drv_usectohz(20*1000); |
| goto next; |
| } |
| |
| dp->mii_timer = 0; |
| diff = 0; |
| goto next_nowait; |
| |
| case MII_STATE_AN_DONE: |
| /* |
| * Auto-negotiation have done. Now we can set up media. |
| */ |
| dp->mii_timer -= diff; |
| if (dp->mii_timer > 0) { |
| /* wait for a while */ |
| dp->mii_interval = WATCH_INTERVAL_FAST; |
| goto next; |
| } |
| |
| /* |
| * set up the result of auto negotiation |
| */ |
| |
| /* |
| * Read registers required to determin current |
| * duplex mode and media speed. |
| */ |
| if (dp->gc.gc_mii_an_delay > 0) { |
| /* |
| * As the link watcher context has been suspended, |
| * 'status' is invalid. We must status register here |
| */ |
| status = gem_mii_read(dp, MII_STATUS); |
| } |
| advert = gem_mii_read(dp, MII_AN_ADVERT); |
| lpable = gem_mii_read(dp, MII_AN_LPABLE); |
| exp = gem_mii_read(dp, MII_AN_EXPANSION); |
| if (exp == 0xffff) { |
| /* some phys don't have exp register */ |
| exp = 0; |
| } |
| ctl1000 = 0; |
| stat1000 = 0; |
| if (dp->mii_status & MII_STATUS_XSTATUS) { |
| ctl1000 = gem_mii_read(dp, MII_1000TC); |
| stat1000 = gem_mii_read(dp, MII_1000TS); |
| } |
| dp->mii_lpable = lpable; |
| dp->mii_advert = advert; |
| dp->mii_exp = exp; |
| dp->mii_ctl1000 = ctl1000; |
| dp->mii_stat1000 = stat1000; |
| |
| cmn_err(CE_CONT, |
| "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b", |
| dp->name, |
| advert, MII_ABILITY_BITS, |
| lpable, MII_ABILITY_BITS, |
| exp, MII_AN_EXP_BITS); |
| |
| if (dp->mii_status & MII_STATUS_XSTATUS) { |
| cmn_err(CE_CONT, |
| "! MII_1000TC:%b, MII_1000TS:%b", |
| ctl1000, MII_1000TC_BITS, |
| stat1000, MII_1000TS_BITS); |
| } |
| |
| if (gem_population(lpable) <= 1 && |
| (exp & MII_AN_EXP_LPCANAN) == 0) { |
| if ((advert & MII_ABILITY_TECH) != lpable) { |
| cmn_err(CE_WARN, |
| "!%s: but the link partnar doesn't seem" |
| " to have auto-negotiation capability." |
| " please check the link configuration.", |
| dp->name); |
| } |
| /* |
| * it should be result of parallel detection, which |
| * cannot detect duplex mode. |
| */ |
| if (lpable & MII_ABILITY_100BASE_TX) { |
| /* |
| * we prefer full duplex mode for 100Mbps |
| * connection, if we can. |
| */ |
| lpable |= advert & MII_ABILITY_100BASE_TX_FD; |
| } |
| |
| if ((advert & lpable) == 0 && |
| lpable & MII_ABILITY_10BASE_T) { |
| lpable |= advert & MII_ABILITY_10BASE_T_FD; |
| } |
| /* |
| * as the link partnar isn't auto-negotiatable, use |
| * fixed mode temporally. |
| */ |
| fix_phy = B_TRUE; |
| } else if (lpable == 0) { |
| cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name); |
| goto reset_phy; |
| } |
| /* |
| * configure current link mode according to AN priority. |
| */ |
| val = advert & lpable; |
| if ((ctl1000 & MII_1000TC_ADV_FULL) && |
| (stat1000 & MII_1000TS_LP_FULL)) { |
| /* 1000BaseT & full duplex */ |
| dp->speed = GEM_SPD_1000; |
| dp->full_duplex = B_TRUE; |
| } else if ((ctl1000 & MII_1000TC_ADV_HALF) && |
| (stat1000 & MII_1000TS_LP_HALF)) { |
| /* 1000BaseT & half duplex */ |
| dp->speed = GEM_SPD_1000; |
| dp->full_duplex = B_FALSE; |
| } else if (val & MII_ABILITY_100BASE_TX_FD) { |
| /* 100BaseTx & full duplex */ |
| dp->speed = GEM_SPD_100; |
| dp->full_duplex = B_TRUE; |
| } else if (val & MII_ABILITY_100BASE_T4) { |
| /* 100BaseT4 & full duplex */ |
| dp->speed = GEM_SPD_100; |
| dp->full_duplex = B_TRUE; |
| } else if (val & MII_ABILITY_100BASE_TX) { |
| /* 100BaseTx & half duplex */ |
| dp->speed = GEM_SPD_100; |
| dp->full_duplex = B_FALSE; |
| } else if (val & MII_ABILITY_10BASE_T_FD) { |
| /* 10BaseT & full duplex */ |
| dp->speed = GEM_SPD_10; |
| dp->full_duplex = B_TRUE; |
| } else if (val & MII_ABILITY_10BASE_T) { |
| /* 10BaseT & half duplex */ |
| dp->speed = GEM_SPD_10; |
| dp->full_duplex = B_FALSE; |
| } else { |
| /* |
| * It seems that the link partnar doesn't have |
| * auto-negotiation capability and our PHY |
| * could not report the correct current mode. |
| * We guess current mode by mii_control register. |
| */ |
| val = gem_mii_read(dp, MII_CONTROL); |
| |
| /* select 100m full or 10m half */ |
| dp->speed = (val & MII_CONTROL_100MB) ? |
| GEM_SPD_100 : GEM_SPD_10; |
| dp->full_duplex = dp->speed != GEM_SPD_10; |
| fix_phy = B_TRUE; |
| |
| cmn_err(CE_NOTE, |
| "!%s: auto-negotiation done but " |
| "common ability not found.\n" |
| "PHY state: control:%b advert:%b lpable:%b\n" |
| "guessing %d Mbps %s duplex mode", |
| dp->name, |
| val, MII_CONTROL_BITS, |
| advert, MII_ABILITY_BITS, |
| lpable, MII_ABILITY_BITS, |
| gem_speed_value[dp->speed], |
| dp->full_duplex ? "full" : "half"); |
| } |
| |
| if (dp->full_duplex) { |
| dp->flow_control = |
| gem_fc_result[fc_cap_decode(advert)] |
| [fc_cap_decode(lpable)]; |
| } else { |
| dp->flow_control = FLOW_CONTROL_NONE; |
| } |
| dp->mii_state = MII_STATE_MEDIA_SETUP; |
| /* FALLTHROUGH */ |
| |
| case MII_STATE_MEDIA_SETUP: |
| dp->mii_state = MII_STATE_LINKDOWN; |
| dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; |
| DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name)); |
| dp->mii_supress_msg = B_FALSE; |
| |
| /* use short interval */ |
| dp->mii_interval = WATCH_INTERVAL_FAST; |
| |
| if ((!dp->anadv_autoneg) || |
| dp->gc.gc_mii_an_oneshot || fix_phy) { |
| |
| /* |
| * write specified mode to phy. |
| */ |
| val = gem_mii_read(dp, MII_CONTROL); |
| val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX | |
| MII_CONTROL_ANE | MII_CONTROL_RSAN); |
| |
| if (dp->full_duplex) { |
| val |= MII_CONTROL_FDUPLEX; |
| } |
| |
| switch (dp->speed) { |
| case GEM_SPD_1000: |
| val |= MII_CONTROL_1000MB; |
| break; |
| |
| case GEM_SPD_100: |
| val |= MII_CONTROL_100MB; |
| break; |
| |
| default: |
| cmn_err(CE_WARN, "%s: unknown speed:%d", |
| dp->name, dp->speed); |
| /* FALLTHROUGH */ |
| case GEM_SPD_10: |
| /* for GEM_SPD_10, do nothing */ |
| break; |
| } |
| |
| if (dp->mii_status & MII_STATUS_XSTATUS) { |
| gem_mii_write(dp, |
| MII_1000TC, MII_1000TC_CFG_EN); |
| } |
| gem_mii_write(dp, MII_CONTROL, val); |
| } |
| |
| if (dp->nic_state >= NIC_STATE_INITIALIZED) { |
| /* notify the result of auto-negotiation to mac */ |
| (*dp->gc.gc_set_media)(dp); |
| } |
| |
| if ((void *)dp->gc.gc_mii_tune_phy) { |
| /* for built-in sis900 */ |
| /* XXX - this code should be removed. */ |
| (*dp->gc.gc_mii_tune_phy)(dp); |
| } |
| |
| goto next_nowait; |
| |
| case MII_STATE_LINKDOWN: |
| status = gem_mii_read(dp, MII_STATUS); |
| if (status & MII_STATUS_LINKUP) { |
| /* |
| * Link going up |
| */ |
| dp->mii_state = MII_STATE_LINKUP; |
| dp->mii_supress_msg = B_FALSE; |
| |
| DPRINTF(0, (CE_CONT, |
| "!%s: link up detected: mii_stat:%b", |
| dp->name, status, MII_STATUS_BITS)); |
| |
| /* |
| * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are |
| * ignored when MII_CONTROL_ANE is set. |
| */ |
| cmn_err(CE_CONT, |
| "!%s: Link up: %d Mbps %s duplex %s flow control", |
| dp->name, |
| gem_speed_value[dp->speed], |
| dp->full_duplex ? "full" : "half", |
| gem_fc_type[dp->flow_control]); |
| |
| dp->mii_interval = dp->gc.gc_mii_link_watch_interval; |
| |
| /* XXX - we need other timer to watch statictics */ |
| if (dp->gc.gc_mii_hw_link_detection && |
| dp->nic_state == NIC_STATE_ONLINE) { |
| dp->mii_interval = 0; |
| } |
| |
| if (dp->nic_state == NIC_STATE_ONLINE) { |
| if (!dp->mac_active) { |
| (void) gem_mac_start(dp); |
| } |
| tx_sched = B_TRUE; |
| } |
| goto next; |
| } |
| |
| dp->mii_supress_msg = B_TRUE; |
| if (dp->anadv_autoneg) { |
| dp->mii_timer -= diff; |
| if (dp->mii_timer <= 0) { |
| /* |
| * link down timer expired. |
| * need to restart auto-negotiation. |
| */ |
| linkdown_action = |
| dp->gc.gc_mii_linkdown_timeout_action; |
| goto restart_autonego; |
| } |
| } |
| /* don't change mii_state */ |
| break; |
| |
| case MII_STATE_LINKUP: |
| status = gem_mii_read(dp, MII_STATUS); |
| if ((status & MII_STATUS_LINKUP) == 0) { |
| /* |
| * Link going down |
| */ |
| cmn_err(CE_NOTE, |
| "!%s: link down detected: mii_stat:%b", |
| dp->name, status, MII_STATUS_BITS); |
| |
| if (dp->nic_state == NIC_STATE_ONLINE && |
| dp->mac_active && |
| dp->gc.gc_mii_stop_mac_on_linkdown) { |
| (void) gem_mac_stop(dp, 0); |
| |
| if (dp->tx_blocked) { |
| /* drain tx */ |
| tx_sched = B_TRUE; |
| } |
| } |
| |
| if (dp->anadv_autoneg) { |
| /* need to restart auto-negotiation */ |
| linkdown_action = dp->gc.gc_mii_linkdown_action; |
| goto restart_autonego; |
| } |
| |
| dp->mii_state = MII_STATE_LINKDOWN; |
| dp->mii_timer = dp->gc.gc_mii_linkdown_timeout; |
| |
| if ((void *)dp->gc.gc_mii_tune_phy) { |
| /* for built-in sis900 */ |
| (*dp->gc.gc_mii_tune_phy)(dp); |
| } |
| dp->mii_interval = dp->gc.gc_mii_link_watch_interval; |
| goto next; |
| } |
| |
| /* don't change mii_state */ |
| if (dp->gc.gc_mii_hw_link_detection && |
| dp->nic_state == NIC_STATE_ONLINE) { |
| dp->mii_interval = 0; |
| goto next; |
| } |
| break; |
| } |
| dp->mii_interval = dp->gc.gc_mii_link_watch_interval; |
| goto next; |
| |
| /* Actions on the end of state routine */ |
| |
| restart_autonego: |
| switch (linkdown_action) { |
| case MII_ACTION_RESET: |
| if (!dp->mii_supress_msg) { |
| cmn_err(CE_CONT, "!%s: resetting PHY", dp->name); |
| } |
| dp->mii_supress_msg = B_TRUE; |
| goto reset_phy; |
| |
| case MII_ACTION_NONE: |
| dp->mii_supress_msg = B_TRUE; |
| if (dp->gc.gc_mii_an_oneshot) { |
| goto autonego; |
| } |
| /* PHY will restart autonego automatically */ |
| dp->mii_state = MII_STATE_AUTONEGOTIATING; |
| dp->mii_timer = dp->gc.gc_mii_an_timeout; |
| dp->mii_interval = dp->gc.gc_mii_an_watch_interval; |
| goto next; |
| |
| case MII_ACTION_RSA: |
| if (!dp->mii_supress_msg) { |
| cmn_err(CE_CONT, "!%s: restarting auto-negotiation", |
| dp->name); |
| } |
| dp->mii_supress_msg = B_TRUE; |
| goto autonego; |
| |
| default: |
| cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d", |
| dp->name, dp->gc.gc_mii_linkdown_action); |
| dp->mii_supress_msg = B_TRUE; |
| } |
| /* NOTREACHED */ |
| |
| reset_phy: |
|