blob: c0d3f5c89dcb0e2e59896cf985c39fe30bfee7a8 [file] [log] [blame]
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright 2005-08 Adaptec, Inc.
* Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
* Copyright (c) 2000 Michael Smith
* Copyright (c) 2001 Scott Long
* Copyright (c) 2000 BSDi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/modctl.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/ddi.h>
#include <sys/devops.h>
#include <sys/pci.h>
#include <sys/types.h>
#include <sys/ddidmareq.h>
#include <sys/scsi/scsi.h>
#include <sys/ksynch.h>
#include <sys/sunddi.h>
#include <sys/byteorder.h>
#include "aac_regs.h"
#include "aac.h"
/*
* FMA header files
*/
#include <sys/ddifm.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/fm/io/ddi.h>
/*
* For minor nodes created by the SCSA framework, minor numbers are
* formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
* number less than 64.
*
* To support cfgadm, need to confirm the SCSA framework by creating
* devctl/scsi and driver specific minor nodes under SCSA format,
* and calling scsi_hba_xxx() functions aacordingly.
*/
#define AAC_MINOR 32
#define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR)
#define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
#define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
#define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran)
#define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
#define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip))
#define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
#define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd)))
#define AAC_PD(t) ((t) - AAC_MAX_LD)
#define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \
&(softs)->containers[(t)].dev : \
((t) < AAC_MAX_DEV(softs)) ? \
&(softs)->nondasds[AAC_PD(t)].dev : NULL)
#define AAC_DEVCFG_BEGIN(softs, tgt) \
aac_devcfg((softs), (tgt), 1)
#define AAC_DEVCFG_END(softs, tgt) \
aac_devcfg((softs), (tgt), 0)
#define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private)
#define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
if (!(cond)) { \
int count = (timeout) * 10; \
while (count) { \
drv_usecwait(100); \
if (cond) \
break; \
count--; \
} \
(timeout) = (count + 9) / 10; \
} \
}
#define AAC_SENSE_DATA_DESCR_LEN \
(sizeof (struct scsi_descr_sense_hdr) + \
sizeof (struct scsi_information_sense_descr))
#define AAC_ARQ64_LENGTH \
(sizeof (struct scsi_arq_status) + \
AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
#define AAC_GETGXADDR(cmdlen, cdbp) \
((cmdlen == 6) ? GETG0ADDR(cdbp) : \
(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
#define AAC_CDB_INQUIRY_CMDDT 0x02
#define AAC_CDB_INQUIRY_EVPD 0x01
#define AAC_VPD_PAGE_CODE 1
#define AAC_VPD_PAGE_LENGTH 3
#define AAC_VPD_PAGE_DATA 4
#define AAC_VPD_ID_CODESET 0
#define AAC_VPD_ID_TYPE 1
#define AAC_VPD_ID_LENGTH 3
#define AAC_VPD_ID_DATA 4
#define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
#define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
#define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
/* 00b - peripheral device addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
/* 01b - flat space addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
/* 10b - logical unit addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
/* Return the size of FIB with data part type data_type */
#define AAC_FIB_SIZEOF(data_type) \
(sizeof (struct aac_fib_header) + sizeof (data_type))
/* Return the container size defined in mir */
#define AAC_MIR_SIZE(softs, acc, mir) \
(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
/* The last entry of aac_cards[] is for unknown cards */
#define AAC_UNKNOWN_CARD \
(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
#define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
#define BUF_IS_READ(bp) ((bp)->b_flags & B_READ)
#define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL)
#define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC))
#define PCI_MEM_GET32(softs, off) \
ddi_get32((softs)->pci_mem_handle, \
(void *)((softs)->pci_mem_base_vaddr + (off)))
#define PCI_MEM_PUT32(softs, off, val) \
ddi_put32((softs)->pci_mem_handle, \
(void *)((softs)->pci_mem_base_vaddr + (off)), \
(uint32_t)(val))
#define PCI_MEM_GET16(softs, off) \
ddi_get16((softs)->pci_mem_handle, \
(void *)((softs)->pci_mem_base_vaddr + (off)))
#define PCI_MEM_PUT16(softs, off, val) \
ddi_put16((softs)->pci_mem_handle, \
(void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
/* Write host data at valp to device mem[off] repeatedly count times */
#define PCI_MEM_REP_PUT8(softs, off, valp, count) \
ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
(uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
count, DDI_DEV_AUTOINCR)
/* Read device data at mem[off] to host addr valp repeatedly count times */
#define PCI_MEM_REP_GET8(softs, off, valp, count) \
ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
(uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
count, DDI_DEV_AUTOINCR)
#define AAC_GET_FIELD8(acc, d, s, field) \
(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
#define AAC_GET_FIELD32(acc, d, s, field) \
(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
#define AAC_GET_FIELD64(acc, d, s, field) \
(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
#define AAC_REP_GET_FIELD8(acc, d, s, field, r) \
ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
(uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
#define AAC_REP_GET_FIELD32(acc, d, s, field, r) \
ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
(uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
#define AAC_ENABLE_INTR(softs) { \
if (softs->flags & AAC_FLAGS_NEW_COMM) \
PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
else \
PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
softs->state |= AAC_STATE_INTR; \
}
#define AAC_DISABLE_INTR(softs) { \
PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \
softs->state &= ~AAC_STATE_INTR; \
}
#define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask)
#define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR)
#define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val)
#define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE)
#define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val)
#define AAC_FWSTATUS_GET(softs) \
((softs)->aac_if.aif_get_fwstatus(softs))
#define AAC_MAILBOX_GET(softs, mb) \
((softs)->aac_if.aif_get_mailbox((softs), (mb)))
#define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
(arg0), (arg1), (arg2), (arg3)))
#define AAC_MGT_SLOT_NUM 2
#define AAC_THROTTLE_DRAIN -1
#define AAC_QUIESCE_TICK 1 /* 1 second */
#define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */
#define AAC_DEFAULT_TICK 10 /* 10 seconds */
#define AAC_SYNC_TICK (30*60) /* 30 minutes */
/* Poll time for aac_do_poll_io() */
#define AAC_POLL_TIME 60 /* 60 seconds */
/* IOP reset */
#define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
#define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */
#define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */
/*
* Hardware access functions
*/
static int aac_rx_get_fwstatus(struct aac_softstate *);
static int aac_rx_get_mailbox(struct aac_softstate *, int);
static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
static int aac_rkt_get_fwstatus(struct aac_softstate *);
static int aac_rkt_get_mailbox(struct aac_softstate *, int);
static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
/*
* SCSA function prototypes
*/
static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
static int aac_quiesce(dev_info_t *);
static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
/*
* Interrupt handler functions
*/
static int aac_query_intrs(struct aac_softstate *, int);
static int aac_add_intrs(struct aac_softstate *);
static void aac_remove_intrs(struct aac_softstate *);
static int aac_enable_intrs(struct aac_softstate *);
static int aac_disable_intrs(struct aac_softstate *);
static uint_t aac_intr_old(caddr_t);
static uint_t aac_intr_new(caddr_t);
static uint_t aac_softintr(caddr_t);
/*
* Internal functions in attach
*/
static int aac_check_card_type(struct aac_softstate *);
static int aac_check_firmware(struct aac_softstate *);
static int aac_common_attach(struct aac_softstate *);
static void aac_common_detach(struct aac_softstate *);
static int aac_probe_containers(struct aac_softstate *);
static int aac_alloc_comm_space(struct aac_softstate *);
static int aac_setup_comm_space(struct aac_softstate *);
static void aac_free_comm_space(struct aac_softstate *);
static int aac_hba_setup(struct aac_softstate *);
/*
* Sync FIB operation functions
*/
int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t, uint32_t *);
static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
/*
* Command queue operation functions
*/
static void aac_cmd_initq(struct aac_cmd_queue *);
static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
/*
* FIB queue operation functions
*/
static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
static int aac_fib_dequeue(struct aac_softstate *, int, int *);
/*
* Slot operation functions
*/
static int aac_create_slots(struct aac_softstate *);
static void aac_destroy_slots(struct aac_softstate *);
static void aac_alloc_fibs(struct aac_softstate *);
static void aac_destroy_fibs(struct aac_softstate *);
static struct aac_slot *aac_get_slot(struct aac_softstate *);
static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
static void aac_free_fib(struct aac_slot *);
/*
* Internal functions
*/
static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
uint16_t);
static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
static void aac_start_waiting_io(struct aac_softstate *);
static void aac_drain_comp_q(struct aac_softstate *);
int aac_do_io(struct aac_softstate *, struct aac_cmd *);
static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
static int aac_send_command(struct aac_softstate *, struct aac_slot *);
static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
static int aac_dma_sync_ac(struct aac_cmd *);
static int aac_shutdown(struct aac_softstate *);
static int aac_reset_adapter(struct aac_softstate *);
static int aac_do_quiesce(struct aac_softstate *softs);
static int aac_do_unquiesce(struct aac_softstate *softs);
static void aac_unhold_bus(struct aac_softstate *, int);
static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
int, int);
/*
* Adapter Initiated FIB handling function
*/
static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t,
struct aac_fib *, int);
static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *);
/*
* Event handling related functions
*/
static void aac_timer(void *);
static void aac_event_thread(struct aac_softstate *);
static void aac_event_disp(struct aac_softstate *, int);
/*
* IOCTL interface related functions
*/
static int aac_open(dev_t *, int, int, cred_t *);
static int aac_close(dev_t, int, int, cred_t *);
static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
/*
* FMA Prototypes
*/
static void aac_fm_init(struct aac_softstate *);
static void aac_fm_fini(struct aac_softstate *);
static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
int aac_check_acc_handle(ddi_acc_handle_t);
int aac_check_dma_handle(ddi_dma_handle_t);
void aac_fm_ereport(struct aac_softstate *, char *);
/*
* Auto enumeration functions
*/
static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
void *, dev_info_t **);
static int aac_handle_dr(struct aac_softstate *, int, int, int);
extern pri_t minclsyspri;
#ifdef DEBUG
/*
* UART debug output support
*/
#define AAC_PRINT_BUFFER_SIZE 512
#define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */
#define AAC_FW_DBG_STRLEN_OFFSET 0x00
#define AAC_FW_DBG_FLAGS_OFFSET 0x04
#define AAC_FW_DBG_BLED_OFFSET 0x08
static int aac_get_fw_debug_buffer(struct aac_softstate *);
static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
static char aac_fmt[] = " %s";
static char aac_fmt_header[] = " %s.%d: %s";
static kmutex_t aac_prt_mutex;
/*
* Debug flags to be put into the softstate flags field
* when initialized
*/
uint32_t aac_debug_flags =
/* AACDB_FLAGS_KERNEL_PRINT | */
/* AACDB_FLAGS_FW_PRINT | */
/* AACDB_FLAGS_MISC | */
/* AACDB_FLAGS_FUNC1 | */
/* AACDB_FLAGS_FUNC2 | */
/* AACDB_FLAGS_SCMD | */
/* AACDB_FLAGS_AIF | */
/* AACDB_FLAGS_FIB | */
/* AACDB_FLAGS_IOCTL | */
0;
uint32_t aac_debug_fib_flags =
/* AACDB_FLAGS_FIB_RW | */
/* AACDB_FLAGS_FIB_IOCTL | */
/* AACDB_FLAGS_FIB_SRB | */
/* AACDB_FLAGS_FIB_SYNC | */
/* AACDB_FLAGS_FIB_HEADER | */
/* AACDB_FLAGS_FIB_TIMEOUT | */
0;
#endif /* DEBUG */
static struct cb_ops aac_cb_ops = {
aac_open, /* open */
aac_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
aac_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* cb_prop_op */
NULL, /* streamtab */
D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
CB_REV, /* cb_rev */
nodev, /* async I/O read entry point */
nodev /* async I/O write entry point */
};
static struct dev_ops aac_dev_ops = {
DEVO_REV,
0,
aac_getinfo,
nulldev,
nulldev,
aac_attach,
aac_detach,
aac_reset,
&aac_cb_ops,
NULL,
NULL,
aac_quiesce,
};
static struct modldrv aac_modldrv = {
&mod_driverops,
"AAC Driver " AAC_DRIVER_VERSION,
&aac_dev_ops,
};
static struct modlinkage aac_modlinkage = {
MODREV_1,
&aac_modldrv,
NULL
};
static struct aac_softstate *aac_softstatep;
/*
* Supported card list
* ordered in vendor id, subvendor id, subdevice id, and device id
*/
static struct aac_card_type aac_cards[] = {
{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Si"},
{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Si"},
{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
"Dell", "PERC 3/Di"},
{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
"Dell", "PERC 320/DC"},
{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
"Adaptec", "2200S"},
{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
"Adaptec", "2120S"},
{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
"Adaptec", "2200S"},
{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "ICP", "9024RO"},
{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "ICP", "9014RO"},
{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "ICP", "9047MA"},
{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "ICP", "9087MA"},
{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "ICP", "9085LI"},
{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "ICP", "5085BR"},
{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
0, AAC_TYPE_SATA, "ICP", "9067MA"},
{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
};
/*
* Hardware access functions for i960 based cards
*/
static struct aac_interface aac_rx_interface = {
aac_rx_get_fwstatus,
aac_rx_get_mailbox,
aac_rx_set_mailbox
};
/*
* Hardware access functions for Rocket based cards
*/
static struct aac_interface aac_rkt_interface = {
aac_rkt_get_fwstatus,
aac_rkt_get_mailbox,
aac_rkt_set_mailbox
};
ddi_device_acc_attr_t aac_acc_attr = {
DDI_DEVICE_ATTR_V1,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
DDI_DEFAULT_ACC
};
static struct {
int size;
int notify;
} aac_qinfo[] = {
{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
{AAC_HOST_HIGH_CMD_ENTRIES, 0},
{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
{AAC_HOST_HIGH_RESP_ENTRIES, 0},
{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
};
/*
* Default aac dma attributes
*/
static ddi_dma_attr_t aac_dma_attr = {
DMA_ATTR_V0,
0, /* lowest usable address */
0xffffffffull, /* high DMA address range */
0xffffffffull, /* DMA counter register */
AAC_DMA_ALIGN, /* DMA address alignment */
1, /* DMA burstsizes */
1, /* min effective DMA size */
0xffffffffull, /* max DMA xfer size */
0xffffffffull, /* segment boundary */
1, /* s/g list length */
AAC_BLK_SIZE, /* granularity of device */
0 /* DMA transfer flags */
};
static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */
static uint32_t aac_timebase = 0; /* internal timer in seconds */
/*
* Warlock directives
*
* Different variables with the same types have to be protected by the
* same mutex; otherwise, warlock will complain with "variables don't
* seem to be protected consistently". For example,
* aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
* by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
* declare them as protected explictly at aac_cmd_dequeue().
*/
_NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
mode_format mode_geometry mode_header aac_cmd))
_NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
aac_sge))
_NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
aac_sg_table aac_srb))
_NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
_NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
_NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
int
_init(void)
{
int rval = 0;
#ifdef DEBUG
mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
#endif
DBCALLED(NULL, 1);
if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
sizeof (struct aac_softstate), 0)) != 0)
goto error;
if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
ddi_soft_state_fini((void *)&aac_softstatep);
goto error;
}
if ((rval = mod_install(&aac_modlinkage)) != 0) {
ddi_soft_state_fini((void *)&aac_softstatep);
scsi_hba_fini(&aac_modlinkage);
goto error;
}
return (rval);
error:
AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
#ifdef DEBUG
mutex_destroy(&aac_prt_mutex);
#endif
return (rval);
}
int
_info(struct modinfo *modinfop)
{
DBCALLED(NULL, 1);
return (mod_info(&aac_modlinkage, modinfop));
}
/*
* An HBA driver cannot be unload unless you reboot,
* so this function will be of no use.
*/
int
_fini(void)
{
int rval;
DBCALLED(NULL, 1);
if ((rval = mod_remove(&aac_modlinkage)) != 0)
goto error;
scsi_hba_fini(&aac_modlinkage);
ddi_soft_state_fini((void *)&aac_softstatep);
#ifdef DEBUG
mutex_destroy(&aac_prt_mutex);
#endif
return (0);
error:
AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
return (rval);
}
static int
aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
int instance, i;
struct aac_softstate *softs = NULL;
int attach_state = 0;
char *data;
DBCALLED(NULL, 1);
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
instance = ddi_get_instance(dip);
/* Get soft state */
if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
goto error;
}
softs = ddi_get_soft_state(aac_softstatep, instance);
attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
softs->instance = instance;
softs->devinfo_p = dip;
softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
softs->addr_dma_attr.dma_attr_granular = 1;
softs->acc_attr = aac_acc_attr;
softs->reg_attr = aac_acc_attr;
softs->card = AAC_UNKNOWN_CARD;
#ifdef DEBUG
softs->debug_flags = aac_debug_flags;
softs->debug_fib_flags = aac_debug_fib_flags;
#endif
/* Initialize FMA */
aac_fm_init(softs);
/* Check the card type */
if (aac_check_card_type(softs) == AACERR) {
AACDB_PRINT(softs, CE_WARN, "Card not supported");
goto error;
}
/* We have found the right card and everything is OK */
attach_state |= AAC_ATTACH_CARD_DETECTED;
/* Map PCI mem space */
if (ddi_regs_map_setup(dip, 1,
(caddr_t *)&softs->pci_mem_base_vaddr, 0,
softs->map_size_min, &softs->reg_attr,
&softs->pci_mem_handle) != DDI_SUCCESS)
goto error;
softs->map_size = softs->map_size_min;
attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
AAC_DISABLE_INTR(softs);
/* Init mutexes and condvars */
mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(softs->intr_pri));
mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(softs->intr_pri));
mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(softs->intr_pri));
mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(softs->intr_pri));
mutex_init(&softs->aifq_mutex, NULL,
MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
cv_init(&softs->event, NULL, CV_DRIVER, NULL);
cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL);
cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL);
cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL);
cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL);
attach_state |= AAC_ATTACH_KMUTEX_INITED;
/* Init the cmd queues */
for (i = 0; i < AAC_CMDQ_NUM; i++)
aac_cmd_initq(&softs->q_wait[i]);
aac_cmd_initq(&softs->q_busy);
aac_cmd_initq(&softs->q_comp);
/* Check for legacy device naming support */
softs->legacy = 1; /* default to use legacy name */
if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
"legacy-name-enable", &data) == DDI_SUCCESS)) {
if (strcmp(data, "no") == 0) {
AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
softs->legacy = 0;
}
ddi_prop_free(data);
}
/*
* Everything has been set up till now,
* we will do some common attach.
*/
mutex_enter(&softs->io_lock);
if (aac_common_attach(softs) == AACERR) {
mutex_exit(&softs->io_lock);
goto error;
}
mutex_exit(&softs->io_lock);
attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
/* Check for buf breakup support */
if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
"breakup-enable", &data) == DDI_SUCCESS)) {
if (strcmp(data, "yes") == 0) {
AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
softs->flags |= AAC_FLAGS_BRKUP;
}
ddi_prop_free(data);
}
softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
if (softs->flags & AAC_FLAGS_BRKUP) {
softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
}
if (aac_hba_setup(softs) != AACOK)
goto error;
attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
/* Create devctl/scsi nodes for cfgadm */
if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
goto error;
}
attach_state |= AAC_ATTACH_CREATE_DEVCTL;
if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
goto error;
}
attach_state |= AAC_ATTACH_CREATE_SCSI;
/* Create aac node for app. to issue ioctls */
if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
DDI_PSEUDO, 0) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
goto error;
}
/* Common attach is OK, so we are attached! */
softs->state |= AAC_STATE_RUN;
/* Create event thread */
softs->fibctx_p = &softs->aifctx;
if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread,
softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) {
AACDB_PRINT(softs, CE_WARN, "aif thread create failed");
softs->state &= ~AAC_STATE_RUN;
goto error;
}
aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
/* Create a thread for command timeout */
softs->timeout_id = timeout(aac_timer, (void *)softs,
(aac_tick * drv_usectohz(1000000)));
/* Common attach is OK, so we are attached! */
ddi_report_dev(dip);
AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
return (DDI_SUCCESS);
error:
if (attach_state & AAC_ATTACH_CREATE_SCSI)
ddi_remove_minor_node(dip, "scsi");
if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
ddi_remove_minor_node(dip, "devctl");
if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
aac_common_detach(softs);
if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
(void) scsi_hba_detach(dip);
scsi_hba_tran_free(AAC_DIP2TRAN(dip));
}
if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
mutex_destroy(&softs->io_lock);
mutex_destroy(&softs->q_comp_mutex);
mutex_destroy(&softs->time_mutex);
mutex_destroy(&softs->ev_lock);
mutex_destroy(&softs->aifq_mutex);
cv_destroy(&softs->event);
cv_destroy(&softs->sync_fib_cv);
cv_destroy(&softs->drain_cv);
cv_destroy(&softs->event_wait_cv);
cv_destroy(&softs->event_disp_cv);
cv_destroy(&softs->aifq_cv);
}
if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
ddi_regs_map_free(&softs->pci_mem_handle);
aac_fm_fini(softs);
if (attach_state & AAC_ATTACH_CARD_DETECTED)
softs->card = AACERR;
if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
ddi_soft_state_free(aac_softstatep, instance);
return (DDI_FAILURE);
}
static int
aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
DBCALLED(softs, 1);
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
mutex_enter(&softs->io_lock);
AAC_DISABLE_INTR(softs);
softs->state = AAC_STATE_STOPPED;
ddi_remove_minor_node(dip, "aac");
ddi_remove_minor_node(dip, "scsi");
ddi_remove_minor_node(dip, "devctl");
mutex_exit(&softs->io_lock);
aac_common_detach(softs);
mutex_enter(&softs->io_lock);
(void) scsi_hba_detach(dip);
scsi_hba_tran_free(tran);
mutex_exit(&softs->io_lock);
/* Stop timer */
mutex_enter(&softs->time_mutex);
if (softs->timeout_id) {
timeout_id_t tid = softs->timeout_id;
softs->timeout_id = 0;
mutex_exit(&softs->time_mutex);
(void) untimeout(tid);
mutex_enter(&softs->time_mutex);
}
mutex_exit(&softs->time_mutex);
/* Destroy event thread */
mutex_enter(&softs->ev_lock);
cv_signal(&softs->event_disp_cv);
cv_wait(&softs->event_wait_cv, &softs->ev_lock);
mutex_exit(&softs->ev_lock);
cv_destroy(&softs->aifq_cv);
cv_destroy(&softs->event_disp_cv);
cv_destroy(&softs->event_wait_cv);
cv_destroy(&softs->drain_cv);
cv_destroy(&softs->sync_fib_cv);
cv_destroy(&softs->event);
mutex_destroy(&softs->aifq_mutex);
mutex_destroy(&softs->ev_lock);
mutex_destroy(&softs->time_mutex);
mutex_destroy(&softs->q_comp_mutex);
mutex_destroy(&softs->io_lock);
ddi_regs_map_free(&softs->pci_mem_handle);
aac_fm_fini(softs);
softs->hwif = AAC_HWIF_UNKNOWN;
softs->card = AAC_UNKNOWN_CARD;
ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
{
struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
DBCALLED(softs, 1);
mutex_enter(&softs->io_lock);
AAC_DISABLE_INTR(softs);
(void) aac_shutdown(softs);
mutex_exit(&softs->io_lock);
return (DDI_SUCCESS);
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
* DDI_FAILURE indicates an error condition and should almost never happen.
*/
static int
aac_quiesce(dev_info_t *dip)
{
struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
if (softs == NULL)
return (DDI_FAILURE);
_NOTE(ASSUMING_PROTECTED(softs->state))
AAC_DISABLE_INTR(softs);
return (DDI_SUCCESS);
}
/* ARGSUSED */
static int
aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
void **result)
{
int error = DDI_SUCCESS;
switch (infocmd) {
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
break;
default:
error = DDI_FAILURE;
}
return (error);
}
/*
* Bring the controller down to a dormant state and detach all child devices.
* This function is called before detach or system shutdown.
* Note: we can assume that the q_wait on the controller is empty, as we
* won't allow shutdown if any device is open.
*/
static int
aac_shutdown(struct aac_softstate *softs)
{
ddi_acc_handle_t acc;
struct aac_close_command *cc;
int rval;
(void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
acc = softs->sync_ac.slotp->fib_acc_handle;
cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
ddi_put32(acc, &cc->Command, VM_CloseAll);
ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
/* Flush all caches, set FW to write through mode */
rval = aac_sync_fib(softs, ContainerCommand,
AAC_FIB_SIZEOF(struct aac_close_command));
aac_sync_fib_slot_release(softs, &softs->sync_ac);
AACDB_PRINT(softs, CE_NOTE,
"shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
return (rval);
}
static uint_t
aac_softintr(caddr_t arg)
{
struct aac_softstate *softs = (void *)arg;
if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
aac_drain_comp_q(softs);
}
return (DDI_INTR_CLAIMED);
}
/*
* Setup auto sense data for pkt
*/
static void
aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
uchar_t add_code, uchar_t qual_code, uint64_t info)
{
struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
pkt->pkt_state |= STATE_ARQ_DONE;
*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_state =
STATE_GOT_BUS |
STATE_GOT_TARGET |
STATE_SENT_CMD |
STATE_XFERRED_DATA;
arqstat->sts_rqpkt_statistics = 0;
if (info <= 0xfffffffful) {
arqstat->sts_sensedata.es_valid = 1;
arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
arqstat->sts_sensedata.es_key = key;
arqstat->sts_sensedata.es_add_code = add_code;
arqstat->sts_sensedata.es_qual_code = qual_code;
arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF;
arqstat->sts_sensedata.es_info_4 = info & 0xFF;
} else { /* 64-bit LBA */
struct scsi_descr_sense_hdr *dsp;
struct scsi_information_sense_descr *isd;
dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
dsp->ds_class = CLASS_EXTENDED_SENSE;
dsp->ds_code = CODE_FMT_DESCR_CURRENT;
dsp->ds_key = key;
dsp->ds_add_code = add_code;
dsp->ds_qual_code = qual_code;
dsp->ds_addl_sense_length =
sizeof (struct scsi_information_sense_descr);
isd = (struct scsi_information_sense_descr *)(dsp+1);
isd->isd_descr_type = DESCR_INFORMATION;
isd->isd_valid = 1;
isd->isd_information[0] = (info >> 56) & 0xFF;
isd->isd_information[1] = (info >> 48) & 0xFF;
isd->isd_information[2] = (info >> 40) & 0xFF;
isd->isd_information[3] = (info >> 32) & 0xFF;
isd->isd_information[4] = (info >> 24) & 0xFF;
isd->isd_information[5] = (info >> 16) & 0xFF;
isd->isd_information[6] = (info >> 8) & 0xFF;
isd->isd_information[7] = (info) & 0xFF;
}
}
/*
* Setup auto sense data for HARDWARE ERROR
*/
static void
aac_set_arq_data_hwerr(struct aac_cmd *acp)
{
union scsi_cdb *cdbp;
uint64_t err_blkno;
cdbp = (void *)acp->pkt->pkt_cdbp;
err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
}
/*
* Send a command to the adapter in New Comm. interface
*/
static int
aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
{
uint32_t index, device;
index = PCI_MEM_GET32(softs, AAC_IQUE);
if (index == 0xffffffffUL) {
index = PCI_MEM_GET32(softs, AAC_IQUE);
if (index == 0xffffffffUL)
return (AACERR);
}
device = index;
PCI_MEM_PUT32(softs, device,
(uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
device += 4;
PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
device += 4;
PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
PCI_MEM_PUT32(softs, AAC_IQUE, index);
return (AACOK);
}
static void
aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
{
struct aac_device *dvp = acp->dvp;
int q = AAC_CMDQ(acp);
if (acp->slotp) { /* outstanding cmd */
if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
aac_release_slot(softs, acp->slotp);
acp->slotp = NULL;
}
if (dvp) {
dvp->ncmds[q]--;
if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
aac_set_throttle(softs, dvp, q,
softs->total_slots);
/*
* Setup auto sense data for UNIT ATTENTION
* Each lun should generate a unit attention
* condition when reset.
* Phys. drives are treated as logical ones
* during error recovery.
*/
if (dvp->type == AAC_DEV_LD) {
struct aac_container *ctp =
(struct aac_container *)dvp;
if (ctp->reset == 0)
goto noreset;
AACDB_PRINT(softs, CE_NOTE,
"Unit attention: reset");
ctp->reset = 0;
aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
0x29, 0x02, 0);
}
}
noreset:
softs->bus_ncmds[q]--;
aac_cmd_delete(&softs->q_busy, acp);
} else { /* cmd in waiting queue */
aac_cmd_delete(&softs->q_wait[q], acp);
}
if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
mutex_enter(&softs->q_comp_mutex);
aac_cmd_enqueue(&softs->q_comp, acp);
mutex_exit(&softs->q_comp_mutex);
} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
cv_broadcast(&softs->event);
}
}
static void
aac_handle_io(struct aac_softstate *softs, int index)
{
struct aac_slot *slotp;
struct aac_cmd *acp;
uint32_t fast;
fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
index >>= 2;
/* Make sure firmware reported index is valid */
ASSERT(index >= 0 && index < softs->total_slots);
slotp = &softs->io_slot[index];
ASSERT(slotp->index == index);
acp = slotp->acp;
if (acp == NULL || acp->slotp != slotp) {
cmn_err(CE_WARN,
"Firmware error: invalid slot index received from FW");
return;
}
acp->flags |= AAC_CMD_CMPLT;
(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
/*
* For fast response IO, the firmware do not return any FIB
* data, so we need to fill in the FIB status and state so that
* FIB users can handle it correctly.
*/
if (fast) {
uint32_t state;
state = ddi_get32(slotp->fib_acc_handle,
&slotp->fibp->Header.XferState);
/*
* Update state for CPU not for device, no DMA sync
* needed
*/
ddi_put32(slotp->fib_acc_handle,
&slotp->fibp->Header.XferState,
state | AAC_FIBSTATE_DONEADAP);
ddi_put32(slotp->fib_acc_handle,
(void *)&slotp->fibp->data[0], ST_OK);
}
/* Handle completed ac */
acp->ac_comp(softs, acp);
} else {
ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
acp->flags |= AAC_CMD_ERR;
if (acp->pkt) {
acp->pkt->pkt_reason = CMD_TRAN_ERR;
acp->pkt->pkt_statistics = 0;
}
}
aac_end_io(softs, acp);
}
/*
* Interrupt handler for New Comm. interface
* New Comm. interface use a different mechanism for interrupt. No explict
* message queues, and driver need only accesses the mapped PCI mem space to
* find the completed FIB or AIF.
*/
static int
aac_process_intr_new(struct aac_softstate *softs)
{
uint32_t index;
index = AAC_OUTB_GET(softs);
if (index == 0xfffffffful)
index = AAC_OUTB_GET(softs);
if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
return (0);
}
if (index != 0xfffffffful) {
do {
if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
aac_handle_io(softs, index);
} else if (index != 0xfffffffeul) {
struct aac_fib *fibp; /* FIB in AIF queue */
uint16_t fib_size;
/*
* 0xfffffffe means that the controller wants
* more work, ignore it for now. Otherwise,
* AIF received.
*/
index &= ~2;
fibp = (struct aac_fib *)(softs-> \
pci_mem_base_vaddr + index);
fib_size = PCI_MEM_GET16(softs, index + \
offsetof(struct aac_fib, Header.Size));
aac_save_aif(softs, softs->pci_mem_handle,
fibp, fib_size);
/*
* AIF memory is owned by the adapter, so let it
* know that we are done with it.
*/
AAC_OUTB_SET(softs, index);
AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
}
index = AAC_OUTB_GET(softs);
} while (index != 0xfffffffful);
/*
* Process waiting cmds before start new ones to
* ensure first IOs are serviced first.
*/
aac_start_waiting_io(softs);
return (AAC_DB_COMMAND_READY);
} else {
return (0);
}
}
static uint_t
aac_intr_new(caddr_t arg)
{
struct aac_softstate *softs = (void *)arg;
uint_t rval;
mutex_enter(&softs->io_lock);
if (aac_process_intr_new(softs))
rval = DDI_INTR_CLAIMED;
else
rval = DDI_INTR_UNCLAIMED;
mutex_exit(&softs->io_lock);
aac_drain_comp_q(softs);
return (rval);
}
/*
* Interrupt handler for old interface
* Explicit message queues are used to send FIB to and get completed FIB from
* the adapter. Driver and adapter maitain the queues in the producer/consumer
* manner. The driver has to query the queues to find the completed FIB.
*/
static int
aac_process_intr_old(struct aac_softstate *softs)
{
uint16_t status;
status = AAC_STATUS_GET(softs);
if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
return (DDI_INTR_UNCLAIMED);
}
if (status & AAC_DB_RESPONSE_READY) {
int slot_idx;
/* ACK the intr */
AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
(void) AAC_STATUS_GET(softs);
while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
&slot_idx) == AACOK)
aac_handle_io(softs, slot_idx);
/*
* Process waiting cmds before start new ones to
* ensure first IOs are serviced first.
*/
aac_start_waiting_io(softs);
return (AAC_DB_RESPONSE_READY);
} else if (status & AAC_DB_COMMAND_READY) {
int aif_idx;
AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
(void) AAC_STATUS_GET(softs);
if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
AACOK) {
ddi_acc_handle_t acc = softs->comm_space_acc_handle;
struct aac_fib *fibp; /* FIB in communication space */
uint16_t fib_size;
uint32_t fib_xfer_state;
uint32_t addr, size;
ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
#define AAC_SYNC_AIF(softs, aif_idx, type) \
{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
offsetof(struct aac_comm_space, \
adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
(type)); }
/* Copy AIF from adapter to the empty AIF slot */
AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
fibp = &softs->comm_space->adapter_fibs[aif_idx];
fib_size = ddi_get16(acc, &fibp->Header.Size);
aac_save_aif(softs, acc, fibp, fib_size);
/* Complete AIF back to adapter with good status */
fib_xfer_state = LE_32(fibp->Header.XferState);
if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
ddi_put32(acc, &fibp->Header.XferState,
fib_xfer_state | AAC_FIBSTATE_DONEHOST);
ddi_put32(acc, (void *)&fibp->data[0], ST_OK);
if (fib_size > AAC_FIB_SIZE)
ddi_put16(acc, &fibp->Header.Size,
AAC_FIB_SIZE);
AAC_SYNC_AIF(softs, aif_idx,
DDI_DMA_SYNC_FORDEV);
}
/* Put the AIF response on the response queue */
addr = ddi_get32(acc,
&softs->comm_space->adapter_fibs[aif_idx]. \
Header.SenderFibAddress);
size = (uint32_t)ddi_get16(acc,
&softs->comm_space->adapter_fibs[aif_idx]. \
Header.Size);
ddi_put32(acc,
&softs->comm_space->adapter_fibs[aif_idx]. \
Header.ReceiverFibAddress, addr);
if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
addr, size) == AACERR)
cmn_err(CE_NOTE, "!AIF ack failed");
}
return (AAC_DB_COMMAND_READY);
} else if (status & AAC_DB_PRINTF_READY) {
/* ACK the intr */
AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
(void) AAC_STATUS_GET(softs);
(void) ddi_dma_sync(softs->comm_space_dma_handle,
offsetof(struct aac_comm_space, adapter_print_buf),
AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
DDI_SUCCESS)
cmn_err(CE_NOTE, "MSG From Adapter: %s",
softs->comm_space->adapter_print_buf);
else
ddi_fm_service_impact(softs->devinfo_p,
DDI_SERVICE_UNAFFECTED);
AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
return (AAC_DB_PRINTF_READY);
} else if (status & AAC_DB_COMMAND_NOT_FULL) {
/*
* Without these two condition statements, the OS could hang
* after a while, especially if there are a lot of AIF's to
* handle, for instance if a drive is pulled from an array
* under heavy load.
*/
AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
return (AAC_DB_COMMAND_NOT_FULL);
} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
return (AAC_DB_RESPONSE_NOT_FULL);
} else {
return (0);
}
}
static uint_t
aac_intr_old(caddr_t arg)
{
struct aac_softstate *softs = (void *)arg;
int rval;
mutex_enter(&softs->io_lock);
if (aac_process_intr_old(softs))
rval = DDI_INTR_CLAIMED;
else
rval = DDI_INTR_UNCLAIMED;
mutex_exit(&softs->io_lock);
aac_drain_comp_q(softs);
return (rval);
}
/*
* Query FIXED or MSI interrupts
*/
static int
aac_query_intrs(struct aac_softstate *softs, int intr_type)
{
dev_info_t *dip = softs->devinfo_p;
int avail, actual, count;
int i, flag, ret;
AACDB_PRINT(softs, CE_NOTE,
"aac_query_intrs:interrupt type 0x%x", intr_type);
/* Get number of interrupts */
ret = ddi_intr_get_nintrs(dip, intr_type, &count);
if ((ret != DDI_SUCCESS) || (count == 0)) {
AACDB_PRINT(softs, CE_WARN,
"ddi_intr_get_nintrs() failed, ret %d count %d",
ret, count);
return (DDI_FAILURE);
}
/* Get number of available interrupts */
ret = ddi_intr_get_navail(dip, intr_type, &avail);
if ((ret != DDI_SUCCESS) || (avail == 0)) {
AACDB_PRINT(softs, CE_WARN,
"ddi_intr_get_navail() failed, ret %d avail %d",
ret, avail);
return (DDI_FAILURE);
}
AACDB_PRINT(softs, CE_NOTE,
"ddi_intr_get_nvail returned %d, navail() returned %d",
count, avail);
/* Allocate an array of interrupt handles */
softs->intr_size = count * sizeof (ddi_intr_handle_t);
softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
if (intr_type == DDI_INTR_TYPE_MSI) {
count = 1; /* only one vector needed by now */
flag = DDI_INTR_ALLOC_STRICT;
} else { /* must be DDI_INTR_TYPE_FIXED */
flag = DDI_INTR_ALLOC_NORMAL;
}
/* Call ddi_intr_alloc() */
ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
count, &actual, flag);
if ((ret != DDI_SUCCESS) || (actual == 0)) {
AACDB_PRINT(softs, CE_WARN,
"ddi_intr_alloc() failed, ret = %d", ret);
actual = 0;
goto error;
}
if (actual < count) {
AACDB_PRINT(softs, CE_NOTE,
"Requested: %d, Received: %d", count, actual);
goto error;
}
softs->intr_cnt = actual;
/* Get priority for first msi, assume remaining are all the same */
if ((ret = ddi_intr_get_pri(softs->htable[0],
&softs->intr_pri)) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"ddi_intr_get_pri() failed, ret = %d", ret);
goto error;
}
/* Test for high level mutex */
if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
AACDB_PRINT(softs, CE_WARN,
"aac_query_intrs: Hi level interrupt not supported");
goto error;
}
return (DDI_SUCCESS);
error:
/* Free already allocated intr */
for (i = 0; i < actual; i++)
(void) ddi_intr_free(softs->htable[i]);
kmem_free(softs->htable, softs->intr_size);
return (DDI_FAILURE);
}
/*
* Register FIXED or MSI interrupts, and enable them
*/
static int
aac_add_intrs(struct aac_softstate *softs)
{
int i, ret;
int actual;
ddi_intr_handler_t *aac_intr;
actual = softs->intr_cnt;
aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
aac_intr_new : aac_intr_old);
/* Call ddi_intr_add_handler() */
for (i = 0; i < actual; i++) {
if ((ret = ddi_intr_add_handler(softs->htable[i],
aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
cmn_err(CE_WARN,
"ddi_intr_add_handler() failed ret = %d", ret);
/* Free already allocated intr */
for (i = 0; i < actual; i++)
(void) ddi_intr_free(softs->htable[i]);
kmem_free(softs->htable, softs->intr_size);
return (DDI_FAILURE);
}
}
if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
!= DDI_SUCCESS) {
cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
/* Free already allocated intr */
for (i = 0; i < actual; i++)
(void) ddi_intr_free(softs->htable[i]);
kmem_free(softs->htable, softs->intr_size);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Unregister FIXED or MSI interrupts
*/
static void
aac_remove_intrs(struct aac_softstate *softs)
{
int i;
/* Disable all interrupts */
(void) aac_disable_intrs(softs);
/* Call ddi_intr_remove_handler() */
for (i = 0; i < softs->intr_cnt; i++) {
(void) ddi_intr_remove_handler(softs->htable[i]);
(void) ddi_intr_free(softs->htable[i]);
}
kmem_free(softs->htable, softs->intr_size);
}
static int
aac_enable_intrs(struct aac_softstate *softs)
{
int rval = AACOK;
if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
/* for MSI block enable */
if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
DDI_SUCCESS)
rval = AACERR;
} else {
int i;
/* Call ddi_intr_enable() for legacy/MSI non block enable */
for (i = 0; i < softs->intr_cnt; i++) {
if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
rval = AACERR;
}
}
return (rval);
}
static int
aac_disable_intrs(struct aac_softstate *softs)
{
int rval = AACOK;
if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
/* Call ddi_intr_block_disable() */
if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
DDI_SUCCESS)
rval = AACERR;
} else {
int i;
for (i = 0; i < softs->intr_cnt; i++) {
if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
rval = AACERR;
}
}
return (rval);
}
/*
* Set pkt_reason and OR in pkt_statistics flag
*/
static void
aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
uchar_t reason, uint_t stat)
{
#ifndef __lock_lint
_NOTE(ARGUNUSED(softs))
#endif
if (acp->pkt->pkt_reason == CMD_CMPLT)
acp->pkt->pkt_reason = reason;
acp->pkt->pkt_statistics |= stat;
}
/*
* Handle a finished pkt of soft SCMD
*/
static void
aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
{
ASSERT(acp->pkt);
acp->flags |= AAC_CMD_CMPLT;
acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
STATE_SENT_CMD | STATE_GOT_STATUS;
if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
acp->pkt->pkt_resid = 0;
/* AAC_CMD_NO_INTR means no complete callback */
if (!(acp->flags & AAC_CMD_NO_INTR)) {
mutex_enter(&softs->q_comp_mutex);
aac_cmd_enqueue(&softs->q_comp, acp);
mutex_exit(&softs->q_comp_mutex);
ddi_trigger_softintr(softs->softint_id);
}
}
/*
* Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
*/
/*
* Handle completed logical device IO command
*/
/*ARGSUSED*/
static void
aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
struct aac_slot *slotp = acp->slotp;
struct aac_blockread_response *resp;
uint32_t status;
ASSERT(!(acp->flags & AAC_CMD_SYNC));
ASSERT(!(acp->flags & AAC_CMD_NO_CB));
acp->pkt->pkt_state |= STATE_GOT_STATUS;
/*
* block_read/write has a similar response header, use blockread
* response for both.
*/
resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
if (status == ST_OK) {
acp->pkt->pkt_resid = 0;
acp->pkt->pkt_state |= STATE_XFERRED_DATA;
} else {
aac_set_arq_data_hwerr(acp);
}
}
/*
* Handle completed phys. device IO command
*/
static void
aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
struct aac_fib *fibp = acp->slotp->fibp;
struct scsi_pkt *pkt = acp->pkt;
struct aac_srb_reply *resp;
uint32_t resp_status;
ASSERT(!(acp->flags & AAC_CMD_SYNC));
ASSERT(!(acp->flags & AAC_CMD_NO_CB));
resp = (struct aac_srb_reply *)&fibp->data[0];
resp_status = ddi_get32(acc, &resp->status);
/* First check FIB status */
if (resp_status == ST_OK) {
uint32_t scsi_status;
uint32_t srb_status;
uint32_t data_xfer_length;
scsi_status = ddi_get32(acc, &resp->scsi_status);
srb_status = ddi_get32(acc, &resp->srb_status);
data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
*pkt->pkt_scbp = (uint8_t)scsi_status;
pkt->pkt_state |= STATE_GOT_STATUS;
if (scsi_status == STATUS_GOOD) {
uchar_t cmd = ((union scsi_cdb *)(void *)
(pkt->pkt_cdbp))->scc_cmd;
/* Next check SRB status */
switch (srb_status & 0x3f) {
case SRB_STATUS_DATA_OVERRUN:
AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
"scmd=%d, xfer=%d, buflen=%d",
(uint32_t)cmd, data_xfer_length,
acp->bcount);
switch (cmd) {
case SCMD_READ:
case SCMD_WRITE:
case SCMD_READ_G1:
case SCMD_WRITE_G1:
case SCMD_READ_G4:
case SCMD_WRITE_G4:
case SCMD_READ_G5:
case SCMD_WRITE_G5:
aac_set_pkt_reason(softs, acp,
CMD_DATA_OVR, 0);
break;
}
/*FALLTHRU*/
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
/*
* pkt_resid should only be calculated if the
* status is ERROR_RECOVERY/PENDING/SUCCESS/
* OVERRUN/UNDERRUN
*/
if (data_xfer_length) {
pkt->pkt_state |= STATE_XFERRED_DATA;
pkt->pkt_resid = acp->bcount - \
data_xfer_length;
ASSERT(pkt->pkt_resid >= 0);
}
break;
case SRB_STATUS_ABORTED:
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_ABORTED, xfer=%d, resid=%d",
data_xfer_length, pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_ABORTED,
STAT_ABORTED);
break;
case SRB_STATUS_ABORT_FAILED:
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_ABORT_FAILED, xfer=%d, " \
"resid=%d", data_xfer_length,
pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
0);
break;
case SRB_STATUS_PARITY_ERROR:
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_PARITY_ERROR, xfer=%d, " \
"resid=%d", data_xfer_length,
pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_PATH_ID:
case SRB_STATUS_INVALID_TARGET_ID:
case SRB_STATUS_INVALID_LUN:
case SRB_STATUS_SELECTION_TIMEOUT:
#ifdef DEBUG
if (AAC_DEV_IS_VALID(acp->dvp)) {
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_NO_DEVICE(%d), " \
"xfer=%d, resid=%d ",
srb_status & 0x3f,
data_xfer_length, pkt->pkt_resid);
}
#endif
aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
break;
case SRB_STATUS_COMMAND_TIMEOUT:
case SRB_STATUS_TIMEOUT:
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
"resid=%d", data_xfer_length,
pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
STAT_TIMEOUT);
break;
case SRB_STATUS_BUS_RESET:
AACDB_PRINT(softs, CE_NOTE,
"SRB_STATUS_BUS_RESET, xfer=%d, " \
"resid=%d", data_xfer_length,
pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_RESET,
STAT_BUS_RESET);
break;
default:
AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
"xfer=%d, resid=%d", srb_status & 0x3f,
data_xfer_length, pkt->pkt_resid);
aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
break;
}
} else if (scsi_status == STATUS_CHECK) {
/* CHECK CONDITION */
struct scsi_arq_status *arqstat =
(void *)(pkt->pkt_scbp);
uint32_t sense_data_size;
pkt->pkt_state |= STATE_ARQ_DONE;
*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
arqstat->sts_rqpkt_reason = CMD_CMPLT;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_state =
STATE_GOT_BUS |
STATE_GOT_TARGET |
STATE_SENT_CMD |
STATE_XFERRED_DATA;
arqstat->sts_rqpkt_statistics = 0;
sense_data_size = ddi_get32(acc,
&resp->sense_data_size);
ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
AACDB_PRINT(softs, CE_NOTE,
"CHECK CONDITION: sense len=%d, xfer len=%d",
sense_data_size, data_xfer_length);
if (sense_data_size > SENSE_LENGTH)
sense_data_size = SENSE_LENGTH;
ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
(uint8_t *)resp->sense_data, sense_data_size,
DDI_DEV_AUTOINCR);
} else {
AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
"scsi_status=%d, srb_status=%d",
scsi_status, srb_status);
aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
}
} else {
AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
resp_status);
aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
}
}
/*
* Handle completed IOCTL command
*/
/*ARGSUSED*/
void
aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
struct aac_slot *slotp = acp->slotp;
/*
* NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
* may wait on softs->event, so use cv_broadcast() instead
* of cv_signal().
*/
ASSERT(acp->flags & AAC_CMD_SYNC);
ASSERT(acp->flags & AAC_CMD_NO_CB);
/* Get the size of the response FIB from its FIB.Header.Size field */
acp->fib_size = ddi_get16(slotp->fib_acc_handle,
&slotp->fibp->Header.Size);
ASSERT(acp->fib_size <= softs->aac_max_fib_size);
ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
(uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
}
/*
* Handle completed sync fib command
*/
/*ARGSUSED*/
void
aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
}
/*
* Handle completed Flush command
*/
/*ARGSUSED*/
static void
aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
struct aac_slot *slotp = acp->slotp;
ddi_acc_handle_t acc = slotp->fib_acc_handle;
struct aac_synchronize_reply *resp;
uint32_t status;
ASSERT(!(acp->flags & AAC_CMD_SYNC));
acp->pkt->pkt_state |= STATE_GOT_STATUS;
resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
status = ddi_get32(acc, &resp->Status);
if (status != CT_OK)
aac_set_arq_data_hwerr(acp);
}
/*ARGSUSED*/
static void
aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
{
struct aac_slot *slotp = acp->slotp;
ddi_acc_handle_t acc = slotp->fib_acc_handle;
struct aac_Container_resp *resp;
uint32_t status;
ASSERT(!(acp->flags & AAC_CMD_SYNC));
acp->pkt->pkt_state |= STATE_GOT_STATUS;
resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
status = ddi_get32(acc, &resp->Status);
if (status != 0) {
AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
aac_set_arq_data_hwerr(acp);
}
}
/*
* Access PCI space to see if the driver can support the card
*/
static int
aac_check_card_type(struct aac_softstate *softs)
{
ddi_acc_handle_t pci_config_handle;
int card_index;
uint32_t pci_cmd;
/* Map pci configuration space */
if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
return (AACERR);
}
softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
softs->subvendid = pci_config_get16(pci_config_handle,
PCI_CONF_SUBVENID);
softs->subsysid = pci_config_get16(pci_config_handle,
PCI_CONF_SUBSYSID);
card_index = 0;
while (!CARD_IS_UNKNOWN(card_index)) {
if ((aac_cards[card_index].vendor == softs->vendid) &&
(aac_cards[card_index].device == softs->devid) &&
(aac_cards[card_index].subvendor == softs->subvendid) &&
(aac_cards[card_index].subsys == softs->subsysid)) {
break;
}
card_index++;
}
softs->card = card_index;
softs->hwif = aac_cards[card_index].hwif;
/*
* Unknown aac card
* do a generic match based on the VendorID and DeviceID to
* support the new cards in the aac family
*/
if (CARD_IS_UNKNOWN(card_index)) {
if (softs->vendid != 0x9005) {
AACDB_PRINT(softs, CE_WARN,
"Unknown vendor 0x%x", softs->vendid);
goto error;
}
switch (softs->devid) {
case 0x285:
softs->hwif = AAC_HWIF_I960RX;
break;
case 0x286:
softs->hwif = AAC_HWIF_RKT;
break;
default:
AACDB_PRINT(softs, CE_WARN,
"Unknown device \"pci9005,%x\"", softs->devid);
goto error;
}
}
/* Set hardware dependent interface */
switch (softs->hwif) {
case AAC_HWIF_I960RX:
softs->aac_if = aac_rx_interface;
softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
break;
case AAC_HWIF_RKT:
softs->aac_if = aac_rkt_interface;
softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
break;
default:
AACDB_PRINT(softs, CE_WARN,
"Unknown hardware interface %d", softs->hwif);
goto error;
}
/* Set card names */
(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
AAC_VENDOR_LEN);
(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
AAC_PRODUCT_LEN);
/* Set up quirks */
softs->flags = aac_cards[card_index].quirks;
/* Force the busmaster enable bit on */
pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
if ((pci_cmd & PCI_COMM_ME) == 0) {
pci_cmd |= PCI_COMM_ME;
pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
if ((pci_cmd & PCI_COMM_ME) == 0) {
cmn_err(CE_CONT, "?Cannot enable busmaster bit");
goto error;
}
}
/* Set memory base to map */
softs->pci_mem_base_paddr = 0xfffffff0UL & \
pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
pci_config_teardown(&pci_config_handle);
return (AACOK); /* card type detected */
error:
pci_config_teardown(&pci_config_handle);
return (AACERR); /* no matched card found */
}
/*
* Do the usual interrupt handler setup stuff.
*/
static int
aac_register_intrs(struct aac_softstate *softs)
{
dev_info_t *dip;
int intr_types;
ASSERT(softs->devinfo_p);
dip = softs->devinfo_p;
/* Get the type of device intrrupts */
if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"ddi_intr_get_supported_types() failed");
return (AACERR);
}
AACDB_PRINT(softs, CE_NOTE,
"ddi_intr_get_supported_types() ret: 0x%x", intr_types);
/* Query interrupt, and alloc/init all needed struct */
if (intr_types & DDI_INTR_TYPE_MSI) {
if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
!= DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"MSI interrupt query failed");
return (AACERR);
}
softs->intr_type = DDI_INTR_TYPE_MSI;
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
!= DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"FIXED interrupt query failed");
return (AACERR);
}
softs->intr_type = DDI_INTR_TYPE_FIXED;
} else {
AACDB_PRINT(softs, CE_WARN,
"Device cannot suppport both FIXED and MSI interrupts");
return (AACERR);
}
/* Connect interrupt handlers */
if (aac_add_intrs(softs) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"Interrupt registration failed, intr type: %s",
softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
return (AACERR);
}
(void) aac_enable_intrs(softs);
if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
AACDB_PRINT(softs, CE_WARN,
"Can not setup soft interrupt handler!");
aac_remove_intrs(softs);
return (AACERR);
}
return (AACOK);
}
static void
aac_unregister_intrs(struct aac_softstate *softs)
{
aac_remove_intrs(softs);
ddi_remove_softintr(softs->softint_id);
}
/*
* Check the firmware to determine the features to support and the FIB
* parameters to use.
*/
static int
aac_check_firmware(struct aac_softstate *softs)
{
uint32_t options;
uint32_t atu_size;
ddi_acc_handle_t pci_handle;
uint8_t *data;
uint32_t max_fibs;
uint32_t max_fib_size;
uint32_t sg_tablesize;
uint32_t max_sectors;
uint32_t status;
/* Get supported options */
if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
&status)) != AACOK) {
if (status != SRB_STATUS_INVALID_REQUEST) {
cmn_err(CE_CONT,
"?Fatal error: request adapter info error");
return (AACERR);
}
options = 0;
atu_size = 0;
} else {
options = AAC_MAILBOX_GET(softs, 1);
atu_size = AAC_MAILBOX_GET(softs, 2);
}
if (softs->state & AAC_STATE_RESET) {
if ((softs->support_opt == options) &&
(softs->atu_size == atu_size))
return (AACOK);
cmn_err(CE_WARN,
"?Fatal error: firmware changed, system needs reboot");
return (AACERR);
}
/*
* The following critical settings are initialized only once during
* driver attachment.
*/
softs->support_opt = options;
softs->atu_size = atu_size;
/* Process supported options */
if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
(softs->flags & AAC_FLAGS_NO4GB) == 0) {
AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
softs->flags |= AAC_FLAGS_4GB_WINDOW;
} else {
/*
* Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
* only. IO is handled by the DMA engine which does not suffer
* from the ATU window programming workarounds necessary for
* CPU copy operations.
*/
softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
}
if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
softs->flags |= AAC_FLAGS_SG_64BIT;
}
if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
softs->flags |= AAC_FLAGS_ARRAY_64BIT;
AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
}
if (options & AAC_SUPPORTED_NONDASD) {
if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
"nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
if (strcmp((char *)data, "yes") == 0) {
AACDB_PRINT(softs, CE_NOTE,
"!Enable Non-DASD access");
softs->flags |= AAC_FLAGS_NONDASD;
}
ddi_prop_free(data);
}
}
/* Read preferred settings */
max_fib_size = 0;
if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
0, 0, 0, 0, NULL)) == AACOK) {
options = AAC_MAILBOX_GET(softs, 1);
max_fib_size = (options & 0xffff);
max_sectors = (options >> 16) << 1;
options = AAC_MAILBOX_GET(softs, 2);
sg_tablesize = (options >> 16);
options = AAC_MAILBOX_GET(softs, 3);
max_fibs = (options & 0xffff);
}
/* Enable new comm. and rawio at the same time */
if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
(max_fib_size != 0)) {
/* read out and save PCI MBR */
if ((atu_size > softs->map_size) &&
(ddi_regs_map_setup(softs->devinfo_p, 1,
(caddr_t *)&data, 0, atu_size, &softs->reg_attr,
&pci_handle) == DDI_SUCCESS)) {
ddi_regs_map_free(&softs->pci_mem_handle);
softs->pci_mem_handle = pci_handle;
softs->pci_mem_base_vaddr = data;
softs->map_size = atu_size;
}
if (atu_size == softs->map_size) {
softs->flags |= AAC_FLAGS_NEW_COMM;
AACDB_PRINT(softs, CE_NOTE,
"!Enable New Comm. interface");
}
}
/* Set FIB parameters */
if (softs->flags & AAC_FLAGS_NEW_COMM) {
softs->aac_max_fibs = max_fibs;
softs->aac_max_fib_size = max_fib_size;
softs->aac_max_sectors = max_sectors;
softs->aac_sg_tablesize = sg_tablesize;
softs->flags |= AAC_FLAGS_RAW_IO;
AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
} else {
softs->aac_max_fibs =
(softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
softs->aac_max_fib_size = AAC_FIB_SIZE;
softs->aac_max_sectors = 128; /* 64K */
if (softs->flags & AAC_FLAGS_17SG)
softs->aac_sg_tablesize = 17;
else if (softs->flags & AAC_FLAGS_34SG)
softs->aac_sg_tablesize = 34;
else if (softs->flags & AAC_FLAGS_SG_64BIT)
softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
sizeof (struct aac_blockwrite64) +
sizeof (struct aac_sg_entry64)) /
sizeof (struct aac_sg_entry64);
else
softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
sizeof (struct aac_blockwrite) +
sizeof (struct aac_sg_entry)) /
sizeof (struct aac_sg_entry);
}
if ((softs->flags & AAC_FLAGS_RAW_IO) &&
(softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
softs->flags |= AAC_FLAGS_LBA_64BIT;
AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
}
softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
/*
* 64K maximum segment size in scatter gather list is controlled by
* the NEW_COMM bit in the adapter information. If not set, the card
* can only accept a maximum of 64K. It is not recommended to permit
* more than 128KB of total transfer size to the adapters because
* performance is negatively impacted.
*
* For new comm, segment size equals max xfer size. For old comm,
* we use 64K for both.
*/
softs->buf_dma_attr.dma_attr_count_max =
softs->buf_dma_attr.dma_attr_maxxfer - 1;
/* Setup FIB operations */
if (softs->flags & AAC_FLAGS_RAW_IO)
softs->aac_cmd_fib = aac_cmd_fib_rawio;
else if (softs->flags & AAC_FLAGS_SG_64BIT)
softs->aac_cmd_fib = aac_cmd_fib_brw64;
else
softs->aac_cmd_fib = aac_cmd_fib_brw;
softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
/* 64-bit LBA needs descriptor format sense data */
softs->slen = sizeof (struct scsi_arq_status);
if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
softs->slen < AAC_ARQ64_LENGTH)
softs->slen = AAC_ARQ64_LENGTH;
AACDB_PRINT(softs, CE_NOTE,
"!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
softs->aac_max_fibs, softs->aac_max_fib_size,
softs->aac_max_sectors, softs->aac_sg_tablesize);
return (AACOK);
}
static void
aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
struct FsaRev *fsarev1)
{
ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
}
/*
* The following function comes from Adaptec:
*
* Query adapter information and supplement adapter information
*/
static int
aac_get_adapter_info(struct aac_softstate *softs,
struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
{
struct aac_cmd *acp = &softs->sync_ac;
ddi_acc_handle_t acc;
struct aac_fib *fibp;
struct aac_adapter_info *ainfp;
struct aac_supplement_adapter_info *sinfp;
int rval;
(void) aac_sync_fib_slot_bind(softs, acp);
acc = acp->slotp->fib_acc_handle;
fibp = acp->slotp->fibp;
ddi_put8(acc, &fibp->data[0], 0);
if (aac_sync_fib(softs, RequestAdapterInfo,
AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) {
AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
rval = AACERR;
goto finish;
}
ainfp = (struct aac_adapter_info *)fibp->data;
if (ainfr) {
AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
aac_fsa_rev(softs, &ainfp->KernelRevision,
&ainfr->KernelRevision);
aac_fsa_rev(softs, &ainfp->MonitorRevision,
&ainfr->MonitorRevision);
aac_fsa_rev(softs, &ainfp->HardwareRevision,
&ainfr->HardwareRevision);
aac_fsa_rev(softs, &ainfp->BIOSRevision,
&ainfr->BIOSRevision);
AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
}
if (sinfr) {
if (!(softs->support_opt &
AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
AACDB_PRINT(softs, CE_WARN,
"SupplementAdapterInfo not supported");
rval = AACERR;
goto finish;
}
ddi_put8(acc, &fibp->data[0], 0);
if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
AAC_FIB_SIZEOF(struct aac_supplement_adapter_info))
!= AACOK) {
AACDB_PRINT(softs, CE_WARN,
"RequestSupplementAdapterInfo failed");
rval = AACERR;
goto finish;
}
sinfp = (struct aac_supplement_adapter_info *)fibp->data;
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
sizeof (struct vpd_info));
aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
&sinfr->FlashFirmwareRevision);
AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
&sinfr->FlashFirmwareBootRevision);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
MFG_PCBA_SERIAL_NUMBER_WIDTH);
AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
MFG_WWN_WIDTH);
AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
if (sinfr->ExpansionFlag == 1) {
AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
AAC_GET_FIELD32(acc, sinfr, sinfp,
SupportedPerformanceMode);
AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
ReservedGrowth[0], 80);
}
}
rval = AACOK;
finish:
aac_sync_fib_slot_release(softs, acp);
return (rval);
}
static int
aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
uint32_t *tgt_max)
{
struct aac_cmd *acp = &softs->sync_ac;
ddi_acc_handle_t acc;
struct aac_fib *fibp;
struct aac_ctcfg *c_cmd;
struct aac_ctcfg_resp *c_resp;
uint32_t scsi_method_id;
struct aac_bus_info *cmd;
struct aac_bus_info_response *resp;
int rval;
(void) aac_sync_fib_slot_bind(softs, acp);
acc = acp->slotp->fib_acc_handle;
fibp = acp->slotp->fibp;
/* Detect MethodId */
c_cmd = (struct aac_ctcfg *)&fibp->data[0];
ddi_put32(acc, &c_cmd