blob: 6b5c64363c48b4c6fa75c26b1b52320276cefae3 [file] [log] [blame]
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2007, Intel Corporation
* All rights reserved.
*/
/*
* Copyright (c) 2006
* Copyright (c) 2007
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
*/
#include <sys/types.h>
#include <sys/byteorder.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/strsubr.h>
#include <sys/ethernet.h>
#include <inet/common.h>
#include <inet/nd.h>
#include <inet/mi.h>
#include <sys/note.h>
#include <sys/stream.h>
#include <sys/strsun.h>
#include <sys/modctl.h>
#include <sys/devops.h>
#include <sys/dlpi.h>
#include <sys/mac_provider.h>
#include <sys/mac_wifi.h>
#include <sys/net80211.h>
#include <sys/net80211_proto.h>
#include <sys/varargs.h>
#include <sys/policy.h>
#include <sys/pci.h>
#include "iwk_calibration.h"
#include "iwk_hw.h"
#include "iwk_eeprom.h"
#include "iwk2_var.h"
#include <inet/wifi_ioctl.h>
#ifdef DEBUG
#define IWK_DEBUG_80211 (1 << 0)
#define IWK_DEBUG_CMD (1 << 1)
#define IWK_DEBUG_DMA (1 << 2)
#define IWK_DEBUG_EEPROM (1 << 3)
#define IWK_DEBUG_FW (1 << 4)
#define IWK_DEBUG_HW (1 << 5)
#define IWK_DEBUG_INTR (1 << 6)
#define IWK_DEBUG_MRR (1 << 7)
#define IWK_DEBUG_PIO (1 << 8)
#define IWK_DEBUG_RX (1 << 9)
#define IWK_DEBUG_SCAN (1 << 10)
#define IWK_DEBUG_TX (1 << 11)
#define IWK_DEBUG_RATECTL (1 << 12)
#define IWK_DEBUG_RADIO (1 << 13)
#define IWK_DEBUG_RESUME (1 << 14)
#define IWK_DEBUG_CALIBRATION (1 << 15)
uint32_t iwk_dbg_flags = 0;
#define IWK_DBG(x) \
iwk_dbg x
#else
#define IWK_DBG(x)
#endif
static void *iwk_soft_state_p = NULL;
static uint8_t iwk_fw_bin [] = {
#include "fw-iw/iw4965.ucode.hex"
};
/* DMA attributes for a shared page */
static ddi_dma_attr_t sh_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
0x1000, /* alignment in bytes */
0x1000, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* DMA attributes for a keep warm DRAM descriptor */
static ddi_dma_attr_t kw_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
0x1000, /* alignment in bytes */
0x1000, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* DMA attributes for a ring descriptor */
static ddi_dma_attr_t ring_desc_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
0x100, /* alignment in bytes */
0x100, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* DMA attributes for a cmd */
static ddi_dma_attr_t cmd_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
4, /* alignment in bytes */
0x100, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* DMA attributes for a rx buffer */
static ddi_dma_attr_t rx_buffer_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
0x100, /* alignment in bytes */
0x100, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/*
* DMA attributes for a tx buffer.
* the maximum number of segments is 4 for the hardware.
* now all the wifi drivers put the whole frame in a single
* descriptor, so we define the maximum number of segments 1,
* just the same as the rx_buffer. we consider leverage the HW
* ability in the future, that is why we don't define rx and tx
* buffer_dma_attr as the same.
*/
static ddi_dma_attr_t tx_buffer_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0xffffffffU, /* maximum DMAable byte count */
4, /* alignment in bytes */
0x100, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* DMA attributes for text and data part in the firmware */
static ddi_dma_attr_t fw_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffU, /* highest usable address */
0x7fffffff, /* maximum DMAable byte count */
0x10, /* alignment in bytes */
0x100, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffU, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
/* regs access attributes */
static ddi_device_acc_attr_t iwk_reg_accattr = {
DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
DDI_DEFAULT_ACC
};
/* DMA access attributes for Descriptor */
static ddi_device_acc_attr_t iwk_dma_descattr = {
DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
DDI_DEFAULT_ACC
};
/* DMA access attributes */
static ddi_device_acc_attr_t iwk_dma_accattr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
DDI_STRICTORDER_ACC,
DDI_DEFAULT_ACC
};
static int iwk_ring_init(iwk_sc_t *);
static void iwk_ring_free(iwk_sc_t *);
static int iwk_alloc_shared(iwk_sc_t *);
static void iwk_free_shared(iwk_sc_t *);
static int iwk_alloc_kw(iwk_sc_t *);
static void iwk_free_kw(iwk_sc_t *);
static int iwk_alloc_fw_dma(iwk_sc_t *);
static void iwk_free_fw_dma(iwk_sc_t *);
static int iwk_alloc_rx_ring(iwk_sc_t *);
static void iwk_reset_rx_ring(iwk_sc_t *);
static void iwk_free_rx_ring(iwk_sc_t *);
static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
int, int);
static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
static void iwk_node_free(ieee80211_node_t *);
static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static void iwk_mac_access_enter(iwk_sc_t *);
static void iwk_mac_access_exit(iwk_sc_t *);
static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t);
static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
uint32_t *, int);
static int iwk_load_firmware(iwk_sc_t *);
static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
iwk_rx_data_t *);
static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
iwk_rx_data_t *);
static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
static uint_t iwk_intr(caddr_t, caddr_t);
static int iwk_eep_load(iwk_sc_t *sc);
static void iwk_get_mac_from_eep(iwk_sc_t *sc);
static int iwk_eep_sem_down(iwk_sc_t *sc);
static void iwk_eep_sem_up(iwk_sc_t *sc);
static uint_t iwk_rx_softintr(caddr_t, caddr_t);
static uint8_t iwk_rate_to_plcp(int);
static int iwk_cmd(iwk_sc_t *, int, const void *, int, int);
static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
static int iwk_hw_set_before_auth(iwk_sc_t *);
static int iwk_scan(iwk_sc_t *);
static int iwk_config(iwk_sc_t *);
static void iwk_stop_master(iwk_sc_t *);
static int iwk_power_up(iwk_sc_t *);
static int iwk_preinit(iwk_sc_t *);
static int iwk_init(iwk_sc_t *);
static void iwk_stop(iwk_sc_t *);
static void iwk_amrr_init(iwk_amrr_t *);
static void iwk_amrr_timeout(iwk_sc_t *);
static void iwk_amrr_ratectl(void *, ieee80211_node_t *);
static int32_t iwk_curr_tempera(iwk_sc_t *sc);
static int iwk_tx_power_calibration(iwk_sc_t *sc);
static inline int iwk_is_24G_band(iwk_sc_t *sc);
static inline int iwk_is_fat_channel(iwk_sc_t *sc);
static int iwk_txpower_grp(uint16_t channel);
static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
uint16_t channel,
int is_24G, int is_fat, int is_hi_chan);
static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel);
static int iwk_division(int32_t num, int32_t denom, int32_t *res);
static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
int32_t x2, int32_t y2);
static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
struct iwk_eep_calib_channel_info *chan_info);
static int32_t iwk_voltage_compensation(int32_t eep_voltage,
int32_t curr_voltage);
static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
struct iwk_tx_power_db *tp_db);
static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
static int iwk_is_associated(iwk_sc_t *sc);
static int iwk_rxgain_diff_init(iwk_sc_t *sc);
static int iwk_rxgain_diff(iwk_sc_t *sc);
static int iwk_rx_sens_init(iwk_sc_t *sc);
static int iwk_rx_sens(iwk_sc_t *sc);
static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
static void iwk_write_event_log(iwk_sc_t *);
static void iwk_write_error_log(iwk_sc_t *);
static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
static int iwk_quiesce(dev_info_t *dip);
/*
* GLD specific operations
*/
static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
static int iwk_m_start(void *arg);
static void iwk_m_stop(void *arg);
static int iwk_m_unicst(void *arg, const uint8_t *macaddr);
static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
static int iwk_m_promisc(void *arg, boolean_t on);
static mblk_t *iwk_m_tx(void *arg, mblk_t *mp);
static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
static int iwk_m_setprop(void *arg, const char *pr_name,
mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
static int iwk_m_getprop(void *arg, const char *pr_name,
mac_prop_id_t wldp_pr_name, uint_t wldp_length, void *wldp_buf);
static void iwk_m_propinfo(void *arg, const char *pr_name,
mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
static void iwk_destroy_locks(iwk_sc_t *sc);
static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
static void iwk_thread(iwk_sc_t *sc);
static void iwk_watchdog(void *arg);
static int iwk_run_state_config_ibss(ieee80211com_t *ic);
static int iwk_run_state_config_sta(ieee80211com_t *ic);
static int iwk_fast_recover(iwk_sc_t *sc);
static int iwk_start_tx_beacon(ieee80211com_t *ic);
static int iwk_clean_add_node_ibss(struct ieee80211com *ic,
uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
/*
* Supported rates for 802.11b/g modes (in 500Kbps unit).
* 11a and 11n support will be added later.
*/
static const struct ieee80211_rateset iwk_rateset_11b =
{ 4, { 2, 4, 11, 22 } };
static const struct ieee80211_rateset iwk_rateset_11g =
{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
/*
* For mfthread only
*/
extern pri_t minclsyspri;
#define DRV_NAME_4965 "iwk"
/*
* Module Loading Data & Entry Points
*/
DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
static struct modldrv iwk_modldrv = {
&mod_driverops,
"Intel(R) 4965AGN driver(N)",
&iwk_devops
};
static struct modlinkage iwk_modlinkage = {
MODREV_1,
&iwk_modldrv,
NULL
};
int
_init(void)
{
int status;
status = ddi_soft_state_init(&iwk_soft_state_p,
sizeof (iwk_sc_t), 1);
if (status != DDI_SUCCESS)
return (status);
mac_init_ops(&iwk_devops, DRV_NAME_4965);
status = mod_install(&iwk_modlinkage);
if (status != DDI_SUCCESS) {
mac_fini_ops(&iwk_devops);
ddi_soft_state_fini(&iwk_soft_state_p);
}
return (status);
}
int
_fini(void)
{
int status;
status = mod_remove(&iwk_modlinkage);
if (status == DDI_SUCCESS) {
mac_fini_ops(&iwk_devops);
ddi_soft_state_fini(&iwk_soft_state_p);
}
return (status);
}
int
_info(struct modinfo *mip)
{
return (mod_info(&iwk_modlinkage, mip));
}
/*
* Mac Call Back entries
*/
mac_callbacks_t iwk_m_callbacks = {
MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
iwk_m_stat,
iwk_m_start,
iwk_m_stop,
iwk_m_promisc,
iwk_m_multicst,
iwk_m_unicst,
iwk_m_tx,
NULL,
iwk_m_ioctl,
NULL,
NULL,
NULL,
iwk_m_setprop,
iwk_m_getprop,
iwk_m_propinfo
};
#ifdef DEBUG
void
iwk_dbg(uint32_t flags, const char *fmt, ...)
{
va_list ap;
if (flags & iwk_dbg_flags) {
va_start(ap, fmt);
vcmn_err(CE_NOTE, fmt, ap);
va_end(ap);
}
}
#endif
/*
* device operations
*/
int
iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
iwk_sc_t *sc;
ieee80211com_t *ic;
int instance, err, i;
char strbuf[32];
wifi_data_t wd = { 0 };
mac_register_t *macp;
int intr_type;
int intr_count;
int intr_actual;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
sc = ddi_get_soft_state(iwk_soft_state_p,
ddi_get_instance(dip));
ASSERT(sc != NULL);
mutex_enter(&sc->sc_glock);
sc->sc_flags &= ~IWK_F_SUSPEND;
mutex_exit(&sc->sc_glock);
if (sc->sc_flags & IWK_F_RUNNING)
(void) iwk_init(sc);
mutex_enter(&sc->sc_glock);
sc->sc_flags |= IWK_F_LAZY_RESUME;
mutex_exit(&sc->sc_glock);
IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
return (DDI_SUCCESS);
default:
err = DDI_FAILURE;
goto attach_fail1;
}
instance = ddi_get_instance(dip);
err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN,
"iwk_attach(): failed to allocate soft state\n");
goto attach_fail1;
}
sc = ddi_get_soft_state(iwk_soft_state_p, instance);
sc->sc_dip = dip;
err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
&iwk_reg_accattr, &sc->sc_cfg_handle);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN,
"iwk_attach(): failed to map config spaces regs\n");
goto attach_fail2;
}
sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
(uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
(uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
if (!sc->sc_clsz)
sc->sc_clsz = 16;
sc->sc_clsz = (sc->sc_clsz << 2);
sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
IEEE80211_MTU + IEEE80211_CRC_LEN +
(IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
IEEE80211_WEP_CRCLEN), sc->sc_clsz);
/*
* Map operating registers
*/
err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
0, 0, &iwk_reg_accattr, &sc->sc_handle);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN,
"iwk_attach(): failed to map device regs\n");
goto attach_fail2a;
}
err = ddi_intr_get_supported_types(dip, &intr_type);
if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
cmn_err(CE_WARN, "iwk_attach(): "
"Fixed type interrupt is not supported\n");
goto attach_fail_intr_a;
}
err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
if ((err != DDI_SUCCESS) || (intr_count != 1)) {
cmn_err(CE_WARN, "iwk_attach(): "
"No fixed interrupts\n");
goto attach_fail_intr_a;
}
sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
intr_count, &intr_actual, 0);
if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
cmn_err(CE_WARN, "iwk_attach(): "
"ddi_intr_alloc() failed 0x%x\n", err);
goto attach_fail_intr_b;
}
err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"ddi_intr_get_pri() failed 0x%x\n", err);
goto attach_fail_intr_c;
}
mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(sc->sc_intr_pri));
mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(sc->sc_intr_pri));
mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(sc->sc_intr_pri));
mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(sc->sc_intr_pri));
cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
/*
* initialize the mfthread
*/
cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
sc->sc_mf_thread = NULL;
sc->sc_mf_thread_switch = 0;
/*
* Allocate shared page.
*/
err = iwk_alloc_shared(sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"failed to allocate shared page\n");
goto attach_fail3;
}
/*
* Allocate keep warm page.
*/
err = iwk_alloc_kw(sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"failed to allocate keep warm page\n");
goto attach_fail3a;
}
/*
* Do some necessary hardware initializations.
*/
err = iwk_preinit(sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"failed to init hardware\n");
goto attach_fail4;
}
/* initialize EEPROM */
err = iwk_eep_load(sc); /* get hardware configurations from eeprom */
if (err != 0) {
cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
goto attach_fail4;
}
if (LE_16(sc->sc_eep_map.calib_version) < EEP_TX_POWER_VERSION_NEW) {
cmn_err(CE_WARN, "older EEPROM detected\n");
goto attach_fail4;
}
iwk_get_mac_from_eep(sc);
err = iwk_ring_init(sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"failed to allocate and initialize ring\n");
goto attach_fail4;
}
sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
err = iwk_alloc_fw_dma(sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"failed to allocate firmware dma\n");
goto attach_fail5;
}
/*
* Initialize the wifi part, which will be used by
* generic layer
*/
ic = &sc->sc_ic;
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
ic->ic_state = IEEE80211_S_INIT;
ic->ic_maxrssi = 100; /* experimental number */
ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
/*
* use software WEP and TKIP, hardware CCMP;
*/
ic->ic_caps |= IEEE80211_C_AES_CCM;
/*
* Support WPA/WPA2
*/
ic->ic_caps |= IEEE80211_C_WPA;
/*
* support Adhoc mode
*/
ic->ic_caps |= IEEE80211_C_IBSS;
/* set supported .11b and .11g rates */
ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
/* set supported .11b and .11g channels (1 through 11) */
for (i = 1; i <= 11; i++) {
ic->ic_sup_channels[i].ich_freq =
ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
ic->ic_sup_channels[i].ich_flags =
IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
IEEE80211_CHAN_PASSIVE;
}
ic->ic_ibss_chan = &ic->ic_sup_channels[0];
ic->ic_xmit = iwk_send;
/*
* init Wifi layer
*/
ieee80211_attach(ic);
/*
* different instance has different WPA door
*/
(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
ddi_driver_name(dip),
ddi_get_instance(dip));
/*
* Override 80211 default routines
*/
sc->sc_newstate = ic->ic_newstate;
ic->ic_newstate = iwk_newstate;
ic->ic_watchdog = iwk_watchdog;
sc->sc_recv_mgmt = ic->ic_recv_mgmt;
ic->ic_recv_mgmt = iwk_recv_mgmt;
ic->ic_node_alloc = iwk_node_alloc;
ic->ic_node_free = iwk_node_free;
ic->ic_crypto.cs_key_set = iwk_key_set;
ieee80211_media_init(ic);
/*
* initialize default tx key
*/
ic->ic_def_txkey = 0;
err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
iwk_rx_softintr, (caddr_t)sc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"add soft interrupt failed\n");
goto attach_fail7;
}
/*
* Add the interrupt handler
*/
err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
(caddr_t)sc, NULL);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"ddi_intr_add_handle() failed\n");
goto attach_fail8;
}
err = ddi_intr_enable(sc->sc_intr_htable[0]);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_attach(): "
"ddi_intr_enable() failed\n");
goto attach_fail_intr_d;
}
/*
* Initialize pointer to device specific functions
*/
wd.wd_secalloc = WIFI_SEC_NONE;
wd.wd_opmode = ic->ic_opmode;
IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
macp = mac_alloc(MAC_VERSION);
if (macp == NULL) {
cmn_err(CE_WARN,
"iwk_attach(): failed to do mac_alloc()\n");
goto attach_fail9;
}
macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
macp->m_driver = sc;
macp->m_dip = dip;
macp->m_src_addr = ic->ic_macaddr;
macp->m_callbacks = &iwk_m_callbacks;
macp->m_min_sdu = 0;
macp->m_max_sdu = IEEE80211_MTU;
macp->m_pdata = &wd;
macp->m_pdata_size = sizeof (wd);
/*
* Register the macp to mac
*/
err = mac_register(macp, &ic->ic_mach);
mac_free(macp);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN,
"iwk_attach(): failed to do mac_register()\n");
goto attach_fail9;
}
/*
* Create minor node of type DDI_NT_NET_WIFI
*/
(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
instance + 1, DDI_NT_NET_WIFI, 0);
if (err != DDI_SUCCESS)
cmn_err(CE_WARN,
"iwk_attach(): failed to do ddi_create_minor_node()\n");
/*
* Notify link is down now
*/
mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
/*
* create the mf thread to handle the link status,
* recovery fatal error, etc.
*/
sc->sc_mf_thread_switch = 1;
if (sc->sc_mf_thread == NULL)
sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
sc->sc_flags |= IWK_F_ATTACHED;
return (DDI_SUCCESS);
attach_fail9:
(void) ddi_intr_disable(sc->sc_intr_htable[0]);
attach_fail_intr_d:
(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
attach_fail8:
(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
sc->sc_soft_hdl = NULL;
attach_fail7:
ieee80211_detach(ic);
attach_fail6:
iwk_free_fw_dma(sc);
attach_fail5:
iwk_ring_free(sc);
attach_fail4:
iwk_free_kw(sc);
attach_fail3a:
iwk_free_shared(sc);
attach_fail3:
iwk_destroy_locks(sc);
attach_fail_intr_c:
(void) ddi_intr_free(sc->sc_intr_htable[0]);
attach_fail_intr_b:
kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
attach_fail_intr_a:
ddi_regs_map_free(&sc->sc_handle);
attach_fail2a:
ddi_regs_map_free(&sc->sc_cfg_handle);
attach_fail2:
ddi_soft_state_free(iwk_soft_state_p, instance);
attach_fail1:
return (err);
}
int
iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
iwk_sc_t *sc;
int err;
sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
ASSERT(sc != NULL);
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
mutex_enter(&sc->sc_glock);
sc->sc_flags |= IWK_F_SUSPEND;
mutex_exit(&sc->sc_glock);
if (sc->sc_flags & IWK_F_RUNNING) {
iwk_stop(sc);
}
IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
if (!(sc->sc_flags & IWK_F_ATTACHED))
return (DDI_FAILURE);
err = mac_disable(sc->sc_ic.ic_mach);
if (err != DDI_SUCCESS)
return (err);
/*
* Destroy the mf_thread
*/
mutex_enter(&sc->sc_mt_lock);
sc->sc_mf_thread_switch = 0;
while (sc->sc_mf_thread != NULL) {
if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
break;
}
mutex_exit(&sc->sc_mt_lock);
iwk_stop(sc);
DELAY(500000);
/*
* Unregiste from the MAC layer subsystem
*/
(void) mac_unregister(sc->sc_ic.ic_mach);
mutex_enter(&sc->sc_glock);
iwk_free_fw_dma(sc);
iwk_ring_free(sc);
iwk_free_kw(sc);
iwk_free_shared(sc);
mutex_exit(&sc->sc_glock);
(void) ddi_intr_disable(sc->sc_intr_htable[0]);
(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
(void) ddi_intr_free(sc->sc_intr_htable[0]);
kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
sc->sc_soft_hdl = NULL;
/*
* detach ieee80211
*/
ieee80211_detach(&sc->sc_ic);
iwk_destroy_locks(sc);
ddi_regs_map_free(&sc->sc_handle);
ddi_regs_map_free(&sc->sc_cfg_handle);
ddi_remove_minor_node(dip, NULL);
ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
return (DDI_SUCCESS);
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
* DDI_FAILURE indicates an error condition and should almost never happen.
*/
int
iwk_quiesce(dev_info_t *dip)
{
iwk_sc_t *sc;
sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
ASSERT(sc != NULL);
/* no message prints and no lock accquisition */
#ifdef DEBUG
iwk_dbg_flags = 0;
#endif
sc->sc_flags |= IWK_F_QUIESCED;
iwk_stop(sc);
return (DDI_SUCCESS);
}
static void
iwk_destroy_locks(iwk_sc_t *sc)
{
cv_destroy(&sc->sc_mt_cv);
mutex_destroy(&sc->sc_mt_lock);
cv_destroy(&sc->sc_tx_cv);
cv_destroy(&sc->sc_cmd_cv);
cv_destroy(&sc->sc_fw_cv);
mutex_destroy(&sc->sc_tx_lock);
mutex_destroy(&sc->sc_glock);
}
/*
* Allocate an area of memory and a DMA handle for accessing it
*/
static int
iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
uint_t dma_flags, iwk_dma_t *dma_p)
{
caddr_t vaddr;
int err;
/*
* Allocate handle
*/
err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
if (err != DDI_SUCCESS) {
dma_p->dma_hdl = NULL;
return (DDI_FAILURE);
}
/*
* Allocate memory
*/
err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
if (err != DDI_SUCCESS) {
ddi_dma_free_handle(&dma_p->dma_hdl);
dma_p->dma_hdl = NULL;
dma_p->acc_hdl = NULL;
return (DDI_FAILURE);
}
/*
* Bind the two together
*/
dma_p->mem_va = vaddr;
err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
&dma_p->cookie, &dma_p->ncookies);
if (err != DDI_DMA_MAPPED) {
ddi_dma_mem_free(&dma_p->acc_hdl);
ddi_dma_free_handle(&dma_p->dma_hdl);
dma_p->acc_hdl = NULL;
dma_p->dma_hdl = NULL;
return (DDI_FAILURE);
}
dma_p->nslots = ~0U;
dma_p->size = ~0U;
dma_p->token = ~0U;
dma_p->offset = 0;
return (DDI_SUCCESS);
}
/*
* Free one allocated area of DMAable memory
*/
static void
iwk_free_dma_mem(iwk_dma_t *dma_p)
{
if (dma_p->dma_hdl != NULL) {
if (dma_p->ncookies) {
(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
dma_p->ncookies = 0;
}
ddi_dma_free_handle(&dma_p->dma_hdl);
dma_p->dma_hdl = NULL;
}
if (dma_p->acc_hdl != NULL) {
ddi_dma_mem_free(&dma_p->acc_hdl);
dma_p->acc_hdl = NULL;
}
}
/*
*
*/
static int
iwk_alloc_fw_dma(iwk_sc_t *sc)
{
int err = DDI_SUCCESS;
iwk_dma_t *dma_p;
char *t;
/*
* firmware image layout:
* |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
*/
t = (char *)(sc->sc_hdr + 1);
err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
&fw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_fw_text);
dma_p = &sc->sc_dma_fw_text;
IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
" text dma memory");
goto fail;
}
(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
t += LE_32(sc->sc_hdr->textsz);
err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
&fw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_fw_data);
dma_p = &sc->sc_dma_fw_data;
IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
" data dma memory");
goto fail;
}
(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
&fw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_fw_data_bak);
dma_p = &sc->sc_dma_fw_data_bak;
IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
"size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
" data bakeup dma memory");
goto fail;
}
(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
t += LE_32(sc->sc_hdr->datasz);
err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
&fw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_fw_init_text);
dma_p = &sc->sc_dma_fw_init_text;
IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
"size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
"init text dma memory");
goto fail;
}
(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
t += LE_32(sc->sc_hdr->init_textsz);
err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
&fw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_fw_init_data);
dma_p = &sc->sc_dma_fw_init_data;
IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
"size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
"init data dma memory");
goto fail;
}
(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
fail:
return (err);
}
static void
iwk_free_fw_dma(iwk_sc_t *sc)
{
iwk_free_dma_mem(&sc->sc_dma_fw_text);
iwk_free_dma_mem(&sc->sc_dma_fw_data);
iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
}
/*
* Allocate a shared page between host and NIC.
*/
static int
iwk_alloc_shared(iwk_sc_t *sc)
{
iwk_dma_t *dma_p;
int err = DDI_SUCCESS;
/* must be aligned on a 4K-page boundary */
err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
&sh_dma_attr, &iwk_dma_descattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_sh);
if (err != DDI_SUCCESS)
goto fail;
sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
dma_p = &sc->sc_dma_sh;
IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
return (err);
fail:
iwk_free_shared(sc);
return (err);
}
static void
iwk_free_shared(iwk_sc_t *sc)
{
iwk_free_dma_mem(&sc->sc_dma_sh);
}
/*
* Allocate a keep warm page.
*/
static int
iwk_alloc_kw(iwk_sc_t *sc)
{
iwk_dma_t *dma_p;
int err = DDI_SUCCESS;
/* must be aligned on a 4K-page boundary */
err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
&kw_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&sc->sc_dma_kw);
if (err != DDI_SUCCESS)
goto fail;
dma_p = &sc->sc_dma_kw;
IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
return (err);
fail:
iwk_free_kw(sc);
return (err);
}
static void
iwk_free_kw(iwk_sc_t *sc)
{
iwk_free_dma_mem(&sc->sc_dma_kw);
}
static int
iwk_alloc_rx_ring(iwk_sc_t *sc)
{
iwk_rx_ring_t *ring;
iwk_rx_data_t *data;
iwk_dma_t *dma_p;
int i, err = DDI_SUCCESS;
ring = &sc->sc_rxq;
ring->cur = 0;
err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
&ring_desc_dma_attr, &iwk_dma_descattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&ring->dma_desc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
goto fail;
}
ring->desc = (uint32_t *)ring->dma_desc.mem_va;
dma_p = &ring->dma_desc;
IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
/*
* Allocate Rx buffers.
*/
for (i = 0; i < RX_QUEUE_SIZE; i++) {
data = &ring->data[i];
err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
&rx_buffer_dma_attr, &iwk_dma_accattr,
DDI_DMA_READ | DDI_DMA_STREAMING,
&data->dma_data);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
"failed\n", i);
goto fail;
}
/*
* the physical address bit [8-36] are used,
* instead of bit [0-31] in 3945.
*/
ring->desc[i] = (uint32_t)
(data->dma_data.cookie.dmac_address >> 8);
}
dma_p = &ring->data[0].dma_data;
IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
"size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
return (err);
fail:
iwk_free_rx_ring(sc);
return (err);
}
static void
iwk_reset_rx_ring(iwk_sc_t *sc)
{
int n;
iwk_mac_access_enter(sc);
IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
for (n = 0; n < 2000; n++) {
if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
break;
DELAY(1000);
}
if (n == 2000)
IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
iwk_mac_access_exit(sc);
sc->sc_rxq.cur = 0;
}
static void
iwk_free_rx_ring(iwk_sc_t *sc)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE; i++) {
if (sc->sc_rxq.data[i].dma_data.dma_hdl)
IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
DDI_DMA_SYNC_FORCPU);
iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
}
if (sc->sc_rxq.dma_desc.dma_hdl)
IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
}
static int
iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
int slots, int qid)
{
iwk_tx_data_t *data;
iwk_tx_desc_t *desc_h;
uint32_t paddr_desc_h;
iwk_cmd_t *cmd_h;
uint32_t paddr_cmd_h;
iwk_dma_t *dma_p;
int i, err = DDI_SUCCESS;
ring->qid = qid;
ring->count = TFD_QUEUE_SIZE_MAX;
ring->window = slots;
ring->queued = 0;
ring->cur = 0;
err = iwk_alloc_dma_mem(sc,
TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
&ring_desc_dma_attr, &iwk_dma_descattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&ring->dma_desc);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
"failed\n", qid);
goto fail;
}
dma_p = &ring->dma_desc;
IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
paddr_desc_h = ring->dma_desc.cookie.dmac_address;
err = iwk_alloc_dma_mem(sc,
TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
&cmd_dma_attr, &iwk_dma_accattr,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
&ring->dma_cmd);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
"failed\n", qid);
goto fail;
}
dma_p = &ring->dma_cmd;
IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
/*
* Allocate Tx buffers.
*/
ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
KM_NOSLEEP);
if (ring->data == NULL) {
cmn_err(CE_WARN, "could not allocate tx data slots\n");
goto fail;
}
for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
data = &ring->data[i];
err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
&tx_buffer_dma_attr, &iwk_dma_accattr,
DDI_DMA_WRITE | DDI_DMA_STREAMING,
&data->dma_data);
if (err != DDI_SUCCESS) {
cmn_err(CE_WARN, "dma alloc tx ring "
"buf[%d] failed\n", i);
goto fail;
}
data->desc = desc_h + i;
data->paddr_desc = paddr_desc_h +
_PTRDIFF(data->desc, desc_h);
data->cmd = cmd_h + i; /* (i % slots); */
/* ((i % slots) * sizeof (iwk_cmd_t)); */
data->paddr_cmd = paddr_cmd_h +
_PTRDIFF(data->cmd, cmd_h);
}
dma_p = &ring->data[0].dma_data;
IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
"size:%lx]\n",
dma_p->ncookies, dma_p->cookie.dmac_address,
dma_p->cookie.dmac_size));
return (err);
fail:
if (ring->data)
kmem_free(ring->data,
sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
iwk_free_tx_ring(sc, ring);
return (err);
}
static void
iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
{
iwk_tx_data_t *data;
int i, n;
iwk_mac_access_enter(sc);
IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
for (n = 0; n < 200; n++) {
if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
break;
DELAY(10);
}
if (n == 200) {
IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
ring->qid));
}
iwk_mac_access_exit(sc);
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
}
ring->queued = 0;
ring->cur = 0;
}
/*ARGSUSED*/
static void
iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
{
int i;
if (ring->dma_desc.dma_hdl != NULL)
IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
iwk_free_dma_mem(&ring->dma_desc);
if (ring->dma_cmd.dma_hdl != NULL)
IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
iwk_free_dma_mem(&ring->dma_cmd);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
if (ring->data[i].dma_data.dma_hdl)
IWK_DMA_SYNC(ring->data[i].dma_data,
DDI_DMA_SYNC_FORDEV);
iwk_free_dma_mem(&ring->data[i].dma_data);
}
kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
}
}
static int
iwk_ring_init(iwk_sc_t *sc)
{
int i, err = DDI_SUCCESS;
for (i = 0; i < IWK_NUM_QUEUES; i++) {
if (i == IWK_CMD_QUEUE_NUM)
continue;
err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
i);
if (err != DDI_SUCCESS)
goto fail;
}
err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
if (err != DDI_SUCCESS)
goto fail;
err = iwk_alloc_rx_ring(sc);
if (err != DDI_SUCCESS)
goto fail;
return (err);
fail:
return (err);
}
static void
iwk_ring_free(iwk_sc_t *sc)
{
int i = IWK_NUM_QUEUES;
iwk_free_rx_ring(sc);
while (--i >= 0) {
iwk_free_tx_ring(sc, &sc->sc_txq[i]);
}
}
/* ARGSUSED */
static ieee80211_node_t *
iwk_node_alloc(ieee80211com_t *ic)
{
iwk_amrr_t *amrr;
amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
if (amrr != NULL)
iwk_amrr_init(amrr);
return (&amrr->in);
}
static void
iwk_node_free(ieee80211_node_t *in)
{
ieee80211com_t *ic = in->in_ic;
ic->ic_node_cleanup(in);
if (in->in_wpa_ie != NULL)
ieee80211_free(in->in_wpa_ie);
kmem_free(in, sizeof (iwk_amrr_t));
}
/*ARGSUSED*/
static int
iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
{
iwk_sc_t *sc = (iwk_sc_t *)ic;
ieee80211_node_t *in = ic->ic_bss;
enum ieee80211_state ostate = ic->ic_state;
int i, err = IWK_SUCCESS;
mutex_enter(&sc->sc_glock);
switch (nstate) {
case IEEE80211_S_SCAN:
switch (ostate) {
case IEEE80211_S_INIT:
{
iwk_add_sta_t node;
sc->sc_flags |= IWK_F_SCANNING;
sc->sc_scan_pending = 0;
iwk_set_led(sc, 2, 10, 2);
/*
* clear association to receive beacons from
* all BSS'es
*/
sc->sc_config.assoc_id = 0;
sc->sc_config.filter_flags &=
~LE_32(RXON_FILTER_ASSOC_MSK);
IWK_DBG((IWK_DEBUG_80211, "config chan %d "
"flags %x filter_flags %x\n", sc->sc_config.chan,
sc->sc_config.flags, sc->sc_config.filter_flags));
err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
sizeof (iwk_rxon_cmd_t), 1);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN,
"could not clear association\n");
sc->sc_flags &= ~IWK_F_SCANNING;
mutex_exit(&sc->sc_glock);
return (err);
}
/* add broadcast node to send probe request */
(void) memset(&node, 0, sizeof (node));
(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
node.id = IWK_BROADCAST_ID;
err = iwk_cmd(sc, REPLY_ADD_STA, &node,
sizeof (node), 1);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "could not add "
"broadcast node\n");
sc->sc_flags &= ~IWK_F_SCANNING;
mutex_exit(&sc->sc_glock);
return (err);
}
break;
}
case IEEE80211_S_AUTH:
case IEEE80211_S_ASSOC:
case IEEE80211_S_RUN:
sc->sc_flags |= IWK_F_SCANNING;
sc->sc_scan_pending = 0;
iwk_set_led(sc, 2, 10, 2);
/* FALLTHRU */
case IEEE80211_S_SCAN:
mutex_exit(&sc->sc_glock);
/* step to next channel before actual FW scan */
err = sc->sc_newstate(ic, nstate, arg);
mutex_enter(&sc->sc_glock);
if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
cmn_err(CE_WARN,
"could not initiate scan\n");
sc->sc_flags &= ~IWK_F_SCANNING;
ieee80211_cancel_scan(ic);
}
mutex_exit(&sc->sc_glock);
return (err);
default:
break;
}
sc->sc_clk = 0;
break;
case IEEE80211_S_AUTH:
if (ostate == IEEE80211_S_SCAN) {
sc->sc_flags &= ~IWK_F_SCANNING;
}
/* reset state to handle reassociations correctly */
sc->sc_config.assoc_id = 0;
sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
/*
* before sending authentication and association request frame,
* we need do something in the hardware, such as setting the
* channel same to the target AP...
*/
if ((err = iwk_hw_set_before_auth(sc)) != 0) {
cmn_err(CE_WARN, "could not setup firmware for "
"authentication\n");
mutex_exit(&sc->sc_glock);
return (err);
}
break;
case IEEE80211_S_RUN:
if (ostate == IEEE80211_S_SCAN) {
sc->sc_flags &= ~IWK_F_SCANNING;
}
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
/* let LED blink when monitoring */
iwk_set_led(sc, 2, 10, 10);
break;
}
IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
/* IBSS mode */
if (ic->ic_opmode == IEEE80211_M_IBSS) {
/*
* clean all nodes in ibss node table
* in order to be consistent with hardware
*/
err = iwk_run_state_config_ibss(ic);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "iwk_newstate(): "
"failed to update configuration "
"in IBSS mode\n");
mutex_exit(&sc->sc_glock);
return (err);
}
}
/* none IBSS mode */
if (ic->ic_opmode != IEEE80211_M_IBSS) {
/* update adapter's configuration */
err = iwk_run_state_config_sta(ic);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "iwk_newstate(): "
"failed to update configuration "
"in none IBSS mode\n");
mutex_exit(&sc->sc_glock);
return (err);
}
}
/* obtain current temperature of chipset */
sc->sc_tempera = iwk_curr_tempera(sc);
/*
* make Tx power calibration to determine
* the gains of DSP and radio
*/
err = iwk_tx_power_calibration(sc);
if (err) {
cmn_err(CE_WARN, "iwk_newstate(): "
"failed to set tx power table\n");
mutex_exit(&sc->sc_glock);
return (err);
}
if (ic->ic_opmode == IEEE80211_M_IBSS) {
/*
* allocate and transmit beacon frames
*/
err = iwk_start_tx_beacon(ic);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "iwk_newstate(): "
"can't transmit beacon frames\n");
mutex_exit(&sc->sc_glock);
return (err);
}
}
/* start automatic rate control */
mutex_enter(&sc->sc_mt_lock);
if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
/* set rate to some reasonable initial value */
i = in->in_rates.ir_nrates - 1;
while (i > 0 && IEEE80211_RATE(i) > 72)
i--;
in->in_txrate = i;
} else {
sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
}
mutex_exit(&sc->sc_mt_lock);
/* set LED on after associated */
iwk_set_led(sc, 2, 0, 1);
break;
case IEEE80211_S_INIT:
if (ostate == IEEE80211_S_SCAN) {
sc->sc_flags &= ~IWK_F_SCANNING;
}
/* set LED off after init */
iwk_set_led(sc, 2, 1, 0);
break;
case IEEE80211_S_ASSOC:
if (ostate == IEEE80211_S_SCAN) {
sc->sc_flags &= ~IWK_F_SCANNING;
}
break;
}
mutex_exit(&sc->sc_glock);
err = sc->sc_newstate(ic, nstate, arg);
if (nstate == IEEE80211_S_RUN) {
mutex_enter(&sc->sc_glock);
/*
* make initialization for Receiver
* sensitivity calibration
*/
err = iwk_rx_sens_init(sc);
if (err) {
cmn_err(CE_WARN, "iwk_newstate(): "
"failed to init RX sensitivity\n");
mutex_exit(&sc->sc_glock);
return (err);
}
/* make initialization for Receiver gain balance */
err = iwk_rxgain_diff_init(sc);
if (err) {
cmn_err(CE_WARN, "iwk_newstate(): "
"failed to init phy calibration\n");
mutex_exit(&sc->sc_glock);
return (err);
}
mutex_exit(&sc->sc_glock);
}
return (err);
}
static void
iwk_watchdog(void *arg)
{
iwk_sc_t *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
#ifdef DEBUG
timeout_id_t timeout_id = ic->ic_watchdog_timer;
#endif
ieee80211_stop_watchdog(ic);
if ((ic->ic_state != IEEE80211_S_AUTH) &&
(ic->ic_state != IEEE80211_S_ASSOC))
return;
if (ic->ic_bss->in_fails > 0) {
IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
"node (0x%x)\n", timeout_id, &ic->ic_bss));
ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
} else {
IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
"node (0x%x), retry (%d)\n",
timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
ieee80211_watchdog(ic);
}
}
/*ARGSUSED*/
static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
const uint8_t mac[IEEE80211_ADDR_LEN])
{
iwk_sc_t *sc = (iwk_sc_t *)ic;
iwk_add_sta_t node;
int err;
uint8_t index1;
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
case IEEE80211_CIPHER_TKIP:
return (1); /* sofeware do it. */
case IEEE80211_CIPHER_AES_CCM:
break;
default:
return (0);
}
sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_DIS_DECRYPT_MSK |
RXON_FILTER_DIS_GRP_DECRYPT_MSK);
mutex_enter(&sc->sc_glock);
/* update ap/multicast node */
(void) memset(&node, 0, sizeof (node));
if (IEEE80211_IS_MULTICAST(mac)) {
(void) memset(node.bssid, 0xff, 6);
node.id = IWK_BROADCAST_ID;
} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
mutex_exit(&sc->sc_glock);
mutex_enter(&sc->sc_ibss.node_tb_lock);
/*
* search for node in ibss node table
*/
for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
index1++) {
if (sc->sc_ibss.ibss_node_tb[index1].used &&
IEEE80211_ADDR_EQ(sc->sc_ibss.
ibss_node_tb[index1].node.bssid,
mac)) {
break;
}
}
if (index1 >= IWK_BROADCAST_ID) {
cmn_err(CE_WARN, "iwk_key_set(): "
"have no this node in hardware node table\n");
mutex_exit(&sc->sc_ibss.node_tb_lock);
return (0);
} else {
/*
* configure key for given node in hardware
*/
if (k->wk_flags & IEEE80211_KEY_XMIT) {
sc->sc_ibss.ibss_node_tb[index1].
node.key_flags = 0;
sc->sc_ibss.ibss_node_tb[index1].
node.keyp = k->wk_keyix;
} else {
sc->sc_ibss.ibss_node_tb[index1].
node.key_flags = (1 << 14);
sc->sc_ibss.ibss_node_tb[index1].
node.keyp = k->wk_keyix + 4;
}
(void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
k->wk_key, k->wk_keylen);
sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
(STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
sc->sc_ibss.ibss_node_tb[index1].node.key_flags =
LE_16(sc->sc_ibss.ibss_node_tb[index1].
node.key_flags);
sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
STA_MODIFY_KEY_MASK;
sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
mutex_enter(&sc->sc_glock);
err = iwk_cmd(sc, REPLY_ADD_STA,
&sc->sc_ibss.ibss_node_tb[index1].node,
sizeof (iwk_add_sta_t), 1);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "iwk_key_set(): "
"failed to update IBSS node in hardware\n");
mutex_exit(&sc->sc_glock);
mutex_exit(&sc->sc_ibss.node_tb_lock);
return (0);
}
mutex_exit(&sc->sc_glock);
}
mutex_exit(&sc->sc_ibss.node_tb_lock);
return (1);
} else {
IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
node.id = IWK_AP_ID;
}
if (k->wk_flags & IEEE80211_KEY_XMIT) {
node.key_flags = 0;
node.keyp = k->wk_keyix;
} else {
node.key_flags = (1 << 14);
node.keyp = k->wk_keyix + 4;
}
(void) memcpy(node.key, k->wk_key, k->wk_keylen);
node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
node.key_flags = LE_16(node.key_flags);
node.sta_mask = STA_MODIFY_KEY_MASK;
node.control = 1;
err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
if (err != IWK_SUCCESS) {
cmn_err(CE_WARN, "iwk_key_set():"
"failed to update ap node\n");
mutex_exit(&sc->sc_glock);
return (0);
}
mutex_exit(&sc->sc_glock);
return (1);
}
/*
* exclusive access to mac begin.
*/
static void
iwk_mac_access_enter(iwk_sc_t *sc)
{
uint32_t tmp;
int n;
tmp = IWK_READ(sc, CSR_GP_CNTRL);
IWK_WRITE(sc, CSR_GP_CNTRL,
tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* wait until we succeed */
for (n = 0; n < 1000; n++) {
if ((IWK_READ(sc, CSR_GP_CNTRL) &
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
break;
DELAY(10);
}
if (n == 1000)
IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
}
/*
* exclusive access to mac end.
*/
static void
iwk_mac_access_exit(iwk_sc_t *sc)
{
uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
IWK_WRITE(sc, CSR_GP_CNTRL,
tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
static uint32_t
iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
{
IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
}
static void
iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
{
IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
}
static uint32_t
iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
{
IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
}
static void
iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
{
IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
}
static void
iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
uint32_t *data, int wlen)
{
for (; wlen > 0; wlen--, data++, addr += 4)
iwk_reg_write(sc, addr, LE_32(*data));
}
/*
* ucode load/initialization steps:
* 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
* BSM contains a small memory that *always* stays powered up, so it can
* retain the bootstrap program even when the card is in a power-saving
* power-down state. The BSM loads the small program into ARC processor's
* instruction memory when triggered by power-up.
* 2) load Initialize image via bootstrap program.
* The Initialize image sets up regulatory and calibration data for the
* Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
* The 4965 reply contains calibration data for temperature, voltage and tx gain
* correction.
*/
static int
iwk_load_firmware(iwk_sc_t *sc)
{
uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
uint32_t size = LE_32(sc->sc_hdr->bootsz);
int n, err = IWK_SUCCESS;
/*
* The physical address bit [4-35] of the initialize uCode.
* In the initialize alive notify interrupt the physical address of
* the runtime ucode will be set for loading.
*/
iwk_mac_access_enter(sc);
iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
sc->sc_dma_fw_init_text.cookie.dmac_size);
iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
sc->sc_dma_fw_init_data.cookie.dmac_size);
/* load bootstrap code into BSM memory */
iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
size / sizeof (uint32_t));
iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
/*
* prepare to load initialize uCode
*/
iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
/* wait while the adapter is busy loading the firmware */
for (n = 0; n < 1000; n++) {
if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
BSM_WR_CTRL_REG_BIT_START))
break;
DELAY(10);
}
if (n == 1000) {
cmn_err(CE_WARN, "timeout transferring firmware\n");
err = ETIMEDOUT;
return (err);
}
/* for future power-save mode use */
iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
iwk_mac_access_exit(sc);
return (err);
}
/*ARGSUSED*/
static void
iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
{
ieee80211com_t *ic = &sc->sc_ic;
iwk_rx_ring_t *ring = &sc->sc_rxq;
iwk_rx_phy_res_t *stat;
ieee80211_node_t *in;
uint32_t *tail;
struct ieee80211_frame *wh;
mblk_t *mp;
uint16_t len, rssi, mrssi, agc;
int16_t t;
uint32_t ants, i;
struct iwk_rx_non_cfg_phy *phyinfo;
uint32_t crc;
/* assuming not 11n here. cope with 11n in phase-II */
stat = (iwk_rx_phy_res_t *)(desc + 1);
if (stat->cfg_phy_cnt > 20) {
return;
}
for (i = 0; i < RX_RES_PHY_CNT; i++)
stat->non_cfg_phy[i] = LE_16(stat->non_cfg_phy[i]);
phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
mrssi = 0;
ants = (LE_16(stat->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) >>
RX_PHY_FLAGS_ANTENNAE_OFFSET;
for (i = 0; i < 3; i++) {
if (ants & (1 << i))
mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
}
t = mrssi - agc - 44; /* t is the dBM value */
/*
* convert dBm to percentage ???
*/
rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
(75 * 75);
if (rssi > 100)
rssi = 100;
if (rssi < 1)
rssi = 1;
len = LE_16(stat->byte_count);
tail = (uint32_t *)((caddr_t)(stat + 1) + stat->cfg_phy_cnt + len);
bcopy(tail, &crc, 4);
IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
"rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
"cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
len, stat->rate.r.s.rate, LE_16(stat->channel),
LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
stat->cfg_phy_cnt, LE_32(crc)));
if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
return;
}
/*
* discard Rx frames with bad CRC
*/
if ((LE_32(crc) &
(RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
(RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
LE_32(crc)));
sc->sc_rx_err++;
return;
}
wh = (struct ieee80211_frame *)
((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
sc->sc_assoc_id));
}
#ifdef DEBUG
if (iwk_dbg_flags & IWK_DEBUG_RX)
ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
#endif
in = ieee80211_find_rxnode(ic, wh);
mp = allocb(len, BPRI_MED);
if (mp) {
(void) memcpy(mp->b_wptr, wh, len);
mp->b_wptr += len;
/* send the frame to the 802.11 layer */
(void) ieee80211_input(ic, mp, in, rssi, 0);
} else {
sc->sc_rx_nobuf++;
IWK_DBG((IWK_DEBUG_RX,
"iwk_rx_intr(): alloc rx buf failed\n"));
}
/* release node reference */
ieee80211_free_node(in);
}
/*ARGSUSED*/
static void
iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
{
ieee80211com_t *ic = &sc->sc_ic;
iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
" retries=%d frame_count=%x nkill=%d "
"rate=%x duration=%d status=%x\n",
desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
stat->bt_kill_count, stat->rate.r.s.rate,
LE_16(stat->duration), LE_32(stat->status)));
amrr->txcnt++;
IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
if (stat->ntries > 0) {
amrr->retrycnt++;
sc->sc_tx_retries++;
IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
sc->sc_tx_retries));
}
sc->sc_tx_timer = 0;
mutex_enter(&sc->sc_tx_lock);
ring->queued--;
if (ring->queued < 0)
ring->queued = 0;
if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
sc->sc_need_reschedule = 0;
mutex_exit(&sc->sc_tx_lock);
mac_tx_update(ic->ic_mach);
mutex_enter(&sc->sc_tx_lock);
}
mutex_exit(&sc->sc_tx_lock);
}
static void
iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
{
if ((desc->hdr.qid & 7) != 4) {
return;
}
mutex_enter(&sc->sc_glock);
sc->sc_flags |= IWK_F_CMD_DONE;
cv_signal(&sc->sc_cmd_cv);
mutex_exit(&sc->sc_glock);
IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
"qid=%x idx=%d flags=%x type=0x%x\n",
desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
desc->hdr.type));
}
static void
iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
{
uint32_t base, i;
struct iwk_alive_resp *ar =
(struct iwk_alive_resp *)(desc + 1);
/* the microcontroller is ready */
IWK_DBG((IWK_DEBUG_FW,
"microcode alive notification minor: %x major: %x type:"
" %x subtype: %x\n",
ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
IWK_DBG((IWK_DEBUG_FW,
"microcontroller initialization failed\n"));
}
if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
IWK_DBG((IWK_DEBUG_FW,
"initialization alive received.\n"));
(void) memcpy(&sc->sc_card_alive_init, ar,
sizeof (struct iwk_init_alive_resp));
/* XXX get temperature */
iwk_mac_access_enter(sc);
iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
sc->sc_dma_fw_text.cookie.dmac_address >> 4);
iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
sc->sc_dma_fw_data.cookie.dmac_size);
iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
iwk_mac_access_exit(sc);
} else {
IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
(void) memcpy(&sc->sc_card_alive_run, ar,
sizeof (struct iwk_alive_resp));
/*
* Init SCD related registers to make Tx work. XXX
*/
iwk_mac_access_enter(sc);
/* read sram address of data base */
sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
i < 128; i += 4)
iwk_mem_write(sc, base + i, 0);
/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
i < 256; i += 4)
iwk_mem_write(sc, base + i, 0);
/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
iwk_mem_write(sc, base + i, 0);
iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
sc->sc_dma_sh.cookie.dmac_address >> 10);
iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
/* initiate the tx queues */
for (i = 0; i < IWK_NUM_QUEUES; i++) {
iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
iwk_mem_write(sc, sc->sc_scd_base +
SCD_CONTEXT_QUEUE_OFFSET(i),
(SCD_WIN_SIZE & 0x7f));
iwk_mem_write(sc, sc->sc_scd_base +
SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
(SCD_FRAME_LIMIT & 0x7f) << 16);
}
/* interrupt enable on each queue0-7 */
iwk_reg_write(sc, SCD_INTERRUPT_MASK,
(1 << IWK_NUM_QUEUES) - 1);
/* enable each channel 0-7 */
iwk_reg_write(sc, SCD_TXFACT,
SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
/*
* queue 0-7 maps to FIFO 0-7 and
* all queues work under FIFO mode (none-scheduler-ack)
*/
for (i = 0; i < 7; i++) {
iwk_reg_write(sc,
SCD_QUEUE_STATUS_BITS(i),
(1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(i << SCD_QUEUE_STTS_REG_POS_TXF)|
SCD_QUEUE_STTS_REG_MSK);
}
iwk_mac_access_exit(sc);
sc->sc_flags |= IWK_F_FW_INIT;
cv_signal(&sc->sc_fw_cv);
}
}
static uint_t
/* LINTED: argument unused in function: unused */
iwk_rx_softintr(caddr_t arg, caddr_t unused)
{
iwk_sc_t *sc = (iwk_sc_t *)arg;
ieee80211com_t *ic = &sc->sc_ic;
iwk_rx_desc_t *desc;
iwk_rx_data_t *data;
uint32_t index;
mutex_enter(&sc->sc_glock);
if (sc->sc_rx_softint_pending != 1) {
mutex_exit(&sc->sc_glock);
return (DDI_INTR_UNCLAIMED);
}
/* disable interrupts */
IWK_WRITE(sc, CSR_INT_MASK, 0);
mutex_exit(&sc->sc_glock);
/*
* firmware has moved the index of the rx queue, driver get it,
* and deal with it.
*/
index = sc->sc_shared->val0 & 0xfff;
while (sc->sc_rxq.cur != index) {
data = &sc->sc_rxq.data[sc->sc_rxq.cur];
desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
" cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
/* a command other than a tx need to be replied */
if (!(desc->hdr.qid & 0x80) &&
(desc->hdr.type != REPLY_RX_PHY_CMD) &&
(desc->hdr.type != REPLY_TX) &&
(desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
(desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
(desc->hdr.type != SENSITIVITY_CMD))
iwk_cmd_intr(sc, desc);
switch (desc->hdr.type) {
case REPLY_4965_RX:
iwk_rx_intr(sc, desc, data);
break;
case REPLY_TX:
iwk_tx_intr(sc, desc, data);
break;
case REPLY_ALIVE:
iwk_ucode_alive(sc, desc);
break;
case CARD_STATE_NOTIFICATION:
{
uint32_t *status = (uint32_t *)(desc + 1);
IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
LE_32(*status)));
if (LE_32(*status) & 1) {
/*
* the radio button has to be pushed(OFF). It
* is considered as a hw error, the
* iwk_thread() tries to recover it after the
* button is pushed again(ON)
*/
cmn_err(CE_NOTE,
"iwk_rx_softintr(): "
"Radio transmitter is off\n");
sc->sc_ostate = sc->sc_ic.ic_state;
ieee80211_new_state(&sc->sc_ic,
IEEE80211_S_INIT, -1);
sc->sc_flags |=
(IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
}
break;
}
case SCAN_START_NOTIFICATION:
{
iwk_start_scan_t *scan =
(iwk_start_scan_t *)(desc + 1);
IWK_DBG((IWK_DEBUG_SCAN,
"scanning channel %d status %x\n",
scan->chan, LE_32(scan->status)));
ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
break;
}
case SCAN_COMPLETE_NOTIFICATION:
{
iwk_stop_scan_t *scan =
(iwk_stop_scan_t *)(desc + 1);
IWK_DBG((IWK_DEBUG_SCAN,
"completed channel %d (burst of %d) status %02x\n",
scan->chan, scan->nchan, scan->status));
sc->sc_scan_pending++;
break;
}
case STATISTICS_NOTIFICATION:
/* handle statistics notification */
iwk_statistics_notify(sc, desc);
break;
}
sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
}
/*
* driver dealt with what reveived in rx queue and tell the information
* to the firmware.
*/
index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
mutex_enter(&sc->sc_glock);
/* re-enable interrupts */
IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
sc->sc_rx_softint_pending = 0;
mutex_exit(&sc->sc_glock);
return (DDI_INTR_CLAIMED);
}
static uint_t
/* LINTED: argument unused in function: unused */
iwk_intr(caddr_t arg, caddr_t unused)
{
iwk_sc_t *sc = (iwk_sc_t *)arg;
uint32_t r, rfh;
mutex_enter(&sc->sc_glock);
if (sc->sc_flags & IWK_F_SUSPEND) {
mutex_exit(&sc->sc_glock);
return (DDI_INTR_UNCLAIMED);
}
r = IWK_READ(sc, CSR_INT);
if (r == 0 || r == 0xffffffff) {
mutex_exit(&sc->sc_glock);
return (DDI_INTR_UNCLAIMED);
}
IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
/* disable interrupts */
IWK_WRITE(sc, CSR_INT_MASK, 0);
/* ack interrupts */
IWK_WRITE(sc, CSR_INT, r);
IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
if (sc->sc_soft_hdl == NULL) {
mutex_exit(&sc->sc_glock);
return (DDI_INTR_CLAIMED);
}
if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
cmn_err(CE_WARN, "fatal firmware error\n");
mutex_exit(&sc->sc_glock);
#ifdef DEBUG
/* dump event and error logs to dmesg */
iwk_write_error_log(sc);
iwk_write_event_log(sc);
#endif /* DEBUG */
iwk_stop(sc);
sc->sc_ostate = sc->sc_ic.ic_state;
/* not capable of fast recovery */
if (!IWK_CHK_FAST_RECOVER(sc))
ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
return (DDI_INTR_CLAIMED);
}
if (r & BIT_INT_RF_KILL) {
uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
if (tmp & (1 << 27))
cmn_err(CE_NOTE, "RF switch: radio on\n");
}
if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
(rfh & FH_INT_RX_MASK)) {
sc->sc_rx_softint_pending = 1;
(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
}
if (r & BIT_INT_ALIVE) {
IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
}
/* re-enable interrupts */
IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
mutex_exit(&sc->sc_glock);
return (DDI_INTR_CLAIMED);
}
static uint8_t
iwk_rate_to_plcp(int rate)
{
uint8_t ret;
switch (rate) {
/* CCK rates */
case 2:
ret = 0xa;
break;
case 4:
ret = 0x14;
break;
case 11:
ret = 0x37;
break;
case 22:
ret = 0x6e;
break;
/* OFDM rates */
case 12:
ret = 0xd;
break;
case 18:
ret = 0xf;
break;
case 24:
ret = 0x5;
break;
case 36:
ret = 0x7;
break;
case 48:
ret = 0x9;
break;
case 72:
ret = 0xb;
break;
case 96:
ret = 0x1;
break;
case 108:
ret = 0x3;
break;
default:
ret = 0;
break;
}
return (ret);
}
static mblk_t *
iwk_m_tx(void *arg, mblk_t *mp)
{
iwk_sc_t *sc = (iwk_sc_t *)arg;
ieee80211com_t *ic = &sc->sc_ic;
mblk_t *next;
if (sc->sc_flags & IWK_F_SUSPEND) {
freemsgchain(mp);
return (NULL);
}
if (ic->ic_state != IEEE80211_S_RUN) {
freemsgchain(mp);
return (NULL);
}
if ((sc->sc_flags & IWK_F_HW_ERR_RECOVER) &&
IWK_CHK_FAST_RECOVER(sc)) {
IWK_DBG((IWK_DEBUG_FW, "iwk_m_tx(): hold queue\n"));
return (mp);
}
while (mp != NULL) {
next = mp->b_next;
mp->b_next = NULL;
if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
mp->b_next = next;
break;
}
mp = next;
}
return (mp);
}
/* ARGSUSED */
static int
iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
{
iwk_sc_t *sc = (iwk_sc_t *)ic;
iwk_tx_ring_t *ring;
iwk_tx_desc_t *desc;
iwk_tx_data_t *data;
iwk_cmd_t *cmd;
iwk_tx_cmd_t *tx;
ieee80211_node_t *in;
struct ieee80211_frame *wh;
struct ieee80211_key *k = NULL;
mblk_t *m, *m0;
int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
uint16_t masks = 0;
uint8_t index, index1, index2;
ring = &sc->sc_txq[0];
data = &ring->data[ring->cur];
desc = data->desc;
cmd = data->cmd;
bzero(desc, sizeof (*desc));
bzero(cmd, sizeof (*cmd));
mutex_enter(&sc->sc_tx_lock);
if (sc->sc_flags & IWK_F_SUSPEND) {
mutex_exit(&sc->sc_tx_lock);
if ((type & IEEE80211_FC0_TYPE_MASK) !=
IEEE80211_FC0_TYPE_DATA) {
freemsg(mp);
}
err = IWK_FAIL;
goto exit;
}
if (ring->queued > ring->count - 64) {
IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
sc->sc_need_reschedule = 1;
mutex_exit(&sc->sc_tx_lock);
if ((type & IEEE80211_FC0_TYPE_MASK) !=
IEEE80211_FC0_TYPE_DATA) {
freemsg(mp);
}
sc->sc_tx_nobuf++;
err = IWK_FAIL;
goto exit;
}
mutex_exit(&sc->sc_tx_lock);
hdrlen = sizeof (struct ieee80211_frame);
m = allocb(msgdsize(mp) + 32, BPRI_MED);
if (m == NULL) { /* can not alloc buf, drop this package */
cmn_err(CE_WARN,
"iwk_send(): failed to allocate msgbuf\n");
freemsg(mp);
err = IWK_SUCCESS;
goto exit;
}
for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
mblen = MBLKL(m0);
(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
off += mblen;
}
m->b_wptr += off;
freemsg(mp);