blob: eac5daca8dca50558d85d0651385a6864211c15b [file] [log] [blame]
<
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* This file contains various support routines.
*/
#include <sys/scsi/adapters/pmcs/pmcs.h>
/*
* Local static data
*/
static int tgtmap_stable_usec = MICROSEC; /* 1 second */
static int tgtmap_csync_usec = 10 * MICROSEC; /* 10 seconds */
/*
* SAS Topology Configuration
*/
static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *);
static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int);
static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *);
static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *,
pmcs_phy_t *);
static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *);
static void pmcs_flush_nonio_cmds(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt);
static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t);
static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *);
static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_begin_observations(pmcs_hw_t *);
static void pmcs_flush_observations(pmcs_hw_t *);
static boolean_t pmcs_report_observations(pmcs_hw_t *);
static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *,
pmcs_phy_t *);
#ifdef DEBUG
static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *);
#endif
static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_lock_phy_impl(pmcs_phy_t *, int);
static void pmcs_unlock_phy_impl(pmcs_phy_t *, int);
static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *);
static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_reap_dead_phy(pmcs_phy_t *);
static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *);
static boolean_t pmcs_phy_target_match(pmcs_phy_t *);
static void pmcs_iport_active(pmcs_iport_t *);
static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
void **);
static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *,
scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *);
static void pmcs_get_fw_version(pmcs_hw_t *);
static int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *, hrtime_t *);
/*
* Often used strings
*/
const char pmcs_nowrk[] = "%s: unable to get work structure";
const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry";
const char pmcs_timeo[] = "%s: command timed out";
extern const ddi_dma_attr_t pmcs_dattr;
extern kmutex_t pmcs_trace_lock;
/*
* Some Initial setup steps.
*/
int
pmcs_setup(pmcs_hw_t *pwp)
{
uint32_t barval = pwp->mpibar;
uint32_t i, scratch, regbar, regoff, barbar, baroff;
uint32_t new_ioq_depth, ferr = 0;
/*
* Check current state. If we're not at READY state,
* we can't go further.
*/
scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: AAP Error State (0x%x)",
__func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
PMCS_MSGU_AAP_ERROR_MASK);
pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
return (-1);
}
if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: AAP unit not ready (state 0x%x)",
__func__, scratch & PMCS_MSGU_AAP_STATE_MASK);
pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
return (-1);
}
/*
* Read the offset from the Message Unit scratchpad 0 register.
* This allows us to read the MPI Configuration table.
*
* Check its signature for validity.
*/
baroff = barval;
barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT;
baroff &= PMCS_MSGU_MPI_OFFSET_MASK;
regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0);
regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT;
regoff &= PMCS_MSGU_MPI_OFFSET_MASK;
if (regoff > baroff) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: bad MPI Table Length (register offset=0x%08x, "
"passed offset=0x%08x)", __func__, regoff, baroff);
return (-1);
}
if (regbar != barbar) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: bad MPI BAR (register BAROFF=0x%08x, "
"passed BAROFF=0x%08x)", __func__, regbar, barbar);
return (-1);
}
pwp->mpi_offset = regoff;
if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: Bad MPI Configuration Table Signature 0x%x", __func__,
pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS));
return (-1);
}
if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: Bad MPI Configuration Revision 0x%x", __func__,
pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR));
return (-1);
}
/*
* Generate offsets for the General System, Inbound Queue Configuration
* and Outbound Queue configuration tables. This way the macros to
* access those tables will work correctly.
*/
pwp->mpi_gst_offset =
pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO);
pwp->mpi_iqc_offset =
pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO);
pwp->mpi_oqc_offset =
pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO);
pmcs_get_fw_version(pwp);
pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO);
pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16;
pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
if (pwp->max_iq <= PMCS_NIQ) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: not enough Inbound Queues supported "
"(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ);
return (-1);
}
if (pwp->max_oq <= PMCS_NOQ) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: not enough Outbound Queues supported "
"(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ);
return (-1);
}
if (pwp->nphy == 0) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: zero phys reported", __func__);
return (-1);
}
if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) {
pwp->hipri_queue = (1 << PMCS_IQ_OTHER);
}
for (i = 0; i < pwp->nphy; i++) {
PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i);
PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i);
}
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2,
(PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) |
(PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT));
/*
* Verify that ioq_depth is valid (> 0 and not so high that it
* would cause us to overrun the chip with commands).
*/
if (pwp->ioq_depth == 0) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: I/O queue depth set to 0. Setting to %d",
__func__, PMCS_NQENTRY);
pwp->ioq_depth = PMCS_NQENTRY;
}
if (pwp->ioq_depth < PMCS_MIN_NQENTRY) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: I/O queue depth set too low (%d). Setting to %d",
__func__, pwp->ioq_depth, PMCS_MIN_NQENTRY);
pwp->ioq_depth = PMCS_MIN_NQENTRY;
}
if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) {
new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1);
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: I/O queue depth set too high (%d). Setting to %d",
__func__, pwp->ioq_depth, new_ioq_depth);
pwp->ioq_depth = new_ioq_depth;
}
/*
* Allocate consistent memory for OQs and IQs.
*/
pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr;
pwp->iqp_dma_attr.dma_attr_align =
pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE;
/*
* The Rev C chip has the ability to do PIO to or from consistent
* memory anywhere in a 64 bit address space, but the firmware is
* not presently set up to do so.
*/
pwp->iqp_dma_attr.dma_attr_addr_hi =
pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull;
for (i = 0; i < PMCS_NIQ; i++) {
if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr,
&pwp->iqp_acchdls[i],
&pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
(caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"Failed to setup DMA for iqp[%d]", i);
return (-1);
}
bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
}
for (i = 0; i < PMCS_NOQ; i++) {
if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr,
&pwp->oqp_acchdls[i],
&pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
(caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"Failed to setup DMA for oqp[%d]", i);
return (-1);
}
bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
}
/*
* Install the IQ and OQ addresses (and null out the rest).
*/
for (i = 0; i < pwp->max_iq; i++) {
pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i));
if (i < PMCS_NIQ) {
if (i != PMCS_IQ_OTHER) {
pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16));
} else {
pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
(1 << 30) | pwp->ioq_depth |
(PMCS_QENTRY_SIZE << 16));
}
pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i),
DWORD1(pwp->iqaddr[i]));
pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i),
DWORD0(pwp->iqaddr[i]));
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i),
DWORD1(pwp->ciaddr+IQ_OFFSET(i)));
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i),
DWORD0(pwp->ciaddr+IQ_OFFSET(i)));
} else {
pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
}
}
for (i = 0; i < pwp->max_oq; i++) {
pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i));
if (i < PMCS_NOQ) {
pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth |
(PMCS_QENTRY_SIZE << 16) | OQIEX);
pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i),
DWORD1(pwp->oqaddr[i]));
pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i),
DWORD0(pwp->oqaddr[i]));
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i),
DWORD1(pwp->ciaddr+OQ_OFFSET(i)));
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i),
DWORD0(pwp->ciaddr+OQ_OFFSET(i)));
pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i),
pwp->oqvec[i] << 24);
pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
} else {
pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
}
}
/*
* Set up logging, if defined.
*/
if (pwp->fwlog) {
uint64_t logdma = pwp->fwaddr;
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma));
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma));
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1);
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog);
logdma += (PMCS_FWLOG_SIZE >> 1);
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma));
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma));
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1);
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog);
}
/*
* Interrupt vectors, outbound queues, and odb_auto_clear
*
* MSI/MSI-X:
* If we got 4 interrupt vectors, we'll assign one to each outbound
* queue as well as the fatal interrupt, and auto clear can be set
* for each.
*
* If we only got 2 vectors, one will be used for I/O completions
* and the other for the other two vectors. In this case, auto_
* clear can only be set for I/Os, which is fine. The fatal
* interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which
* is not an interrupt vector.
*
* MSI/MSI-X/INT-X:
* If we only got 1 interrupt vector, auto_clear must be set to 0,
* and again the fatal interrupt will be mapped to the
* PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector).
*/
switch (pwp->int_type) {
case PMCS_INT_MSIX:
case PMCS_INT_MSI:
switch (pwp->intr_cnt) {
case 1:
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
(PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
pwp->odb_auto_clear = 0;
break;
case 2:
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
(PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) |
(1 << PMCS_MSIX_IODONE);
break;
case 4:
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
(PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT));
pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) |
(1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) |
(1 << PMCS_MSIX_EVENTS);
break;
}
break;
case PMCS_INT_FIXED:
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR,
PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
pwp->odb_auto_clear = 0;
break;
}
/*
* If the open retry interval is non-zero, set it.
*/
if (pwp->open_retry_interval != 0) {
int phynum;
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: Setting open retry interval to %d usecs", __func__,
pwp->open_retry_interval);
for (phynum = 0; phynum < pwp->nphy; phynum ++) {
pmcs_wr_gsm_reg(pwp, OPEN_RETRY_INTERVAL(phynum),
pwp->open_retry_interval);
}
}
/*
* Enable Interrupt Reassertion
* Default Delay 1000us
*/
ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR);
if ((ferr & PMCS_MPI_IRAE) == 0) {
ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK);
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE);
}
pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear);
pwp->mpi_table_setup = 1;
return (0);
}
/*
* Start the Message Passing protocol with the PMC chip.
*/
int
pmcs_start_mpi(pmcs_hw_t *pwp)
{
int i;
pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI);
for (i = 0; i < 1000; i++) {
if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
PMCS_MSGU_IBDB_MPIINI) == 0) {
break;
}
drv_usecwait(1000);
}
if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) {
return (-1);
}
drv_usecwait(500000);
/*
* Check to make sure we got to INIT state.
*/
if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) !=
PMCS_MPI_STATE_INIT) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__,
pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE),
pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR));
return (-1);
}
return (0);
}
/*
* Stop the Message Passing protocol with the PMC chip.
*/
int
pmcs_stop_mpi(pmcs_hw_t *pwp)
{
int i;
for (i = 0; i < pwp->max_iq; i++) {
pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
}
for (i = 0; i < pwp->max_oq; i++) {
pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
}
pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0);
pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU);
for (i = 0; i < 2000; i++) {
if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
PMCS_MSGU_IBDB_MPICTU) == 0) {
break;
}
drv_usecwait(1000);
}
if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: MPI stop failed", __func__);
return (-1);
}
return (0);
}
/*
* Do a sequence of ECHO messages to test for MPI functionality,
* all inbound and outbound queue functionality and interrupts.
*/
int
pmcs_echo_test(pmcs_hw_t *pwp)
{
echo_test_t fred;
struct pmcwork *pwrk;
uint32_t *msg, count;
int iqe = 0, iqo = 0, result, rval = 0;
int iterations;
hrtime_t echo_start, echo_end, echo_total;
ASSERT(pwp->max_cmd > 0);
/*
* We want iterations to be max_cmd * 3 to ensure that we run the
* echo test enough times to iterate through every inbound queue
* at least twice.
*/
iterations = pwp->max_cmd * 3;
echo_total = 0;
count = 0;
while (count < iterations) {
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
if (pwrk == NULL) {
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
pmcs_nowrk, __func__);
rval = -1;
break;
}
mutex_enter(&pwp->iqp_lock[iqe]);
msg = GET_IQ_ENTRY(pwp, iqe);
if (msg == NULL) {
mutex_exit(&pwp->iqp_lock[iqe]);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
pmcs_nomsg, __func__);
rval = -1;
break;
}
bzero(msg, PMCS_QENTRY_SIZE);
if (iqe == PMCS_IQ_OTHER) {
/* This is on the high priority queue */
msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO));
} else {
msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO));
}
msg[1] = LE_32(pwrk->htag);
fred.signature = 0xdeadbeef;
fred.count = count;
fred.ptr = &count;
(void) memcpy(&msg[2], &fred, sizeof (fred));
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, iqe);
echo_start = gethrtime();
DTRACE_PROBE2(pmcs__echo__test__wait__start,
hrtime_t, echo_start, uint32_t, pwrk->htag);
if (++iqe == PMCS_NIQ) {
iqe = 0;
}
if (++iqo == PMCS_NOQ) {
iqo = 0;
}
WAIT_FOR(pwrk, 250, result);
pmcs_pwork(pwp, pwrk);
echo_end = gethrtime();
DTRACE_PROBE2(pmcs__echo__test__wait__end,
hrtime_t, echo_end, int, result);
echo_total += (echo_end - echo_start);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: command timed out on echo test #%d",
__func__, count);
rval = -1;
break;
}
}
/*
* The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to
* remove the overhead of things like the delay in getting signaled
* for completion.
*/
if (echo_total != 0) {
pwp->io_intr_coal.intr_latency =
(echo_total / iterations) / 2;
pwp->io_intr_coal.intr_threshold =
PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
pwp->io_intr_coal.intr_latency);
}
return (rval);
}
/*
* Start the (real) phys
*/
int
pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed)
{
int result;
uint32_t *msg;
struct pmcwork *pwrk;
pmcs_phy_t *pptr;
sas_identify_af_t sap;
mutex_enter(&pwp->lock);
pptr = pwp->root_phys + phynum;
if (pptr == NULL) {
mutex_exit(&pwp->lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: cannot find port %d", __func__, phynum);
return (0);
}
pmcs_lock_phy(pptr);
mutex_exit(&pwp->lock);
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
if (pwrk == NULL) {
pmcs_unlock_phy(pptr);
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
return (-1);
}
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (msg == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_unlock_phy(pptr);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
return (-1);
}
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START));
msg[1] = LE_32(pwrk->htag);
msg[2] = LE_32(linkmode | speed | phynum);
bzero(&sap, sizeof (sap));
sap.device_type = SAS_IF_DTYPE_ENDPOINT;
sap.ssp_ini_port = 1;
if (pwp->separate_ports) {
pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address);
} else {
pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address);
}
ASSERT(phynum < SAS2_PHYNUM_MAX);
sap.phy_identifier = phynum & SAS2_PHYNUM_MASK;
(void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t));
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1);
pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1);
pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE;
pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE;
pmcs_unlock_phy(pptr);
WAIT_FOR(pwrk, 1000, result);
pmcs_pwork(pwp, pwrk);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
} else {
mutex_enter(&pwp->lock);
pwp->phys_started |= (1 << phynum);
mutex_exit(&pwp->lock);
}
return (0);
}
int
pmcs_start_phys(pmcs_hw_t *pwp)
{
int i, rval;
for (i = 0; i < pwp->nphy; i++) {
if ((pwp->phyid_block_mask & (1 << i)) == 0) {
if (pmcs_start_phy(pwp, i,
(pwp->phymode << PHY_MODE_SHIFT),
pwp->physpeed << PHY_LINK_SHIFT)) {
return (-1);
}
if (pmcs_clear_diag_counters(pwp, i)) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: failed to reset counters on PHY (%d)",
__func__, i);
}
}
}
rval = pmcs_get_time_stamp(pwp, &pwp->fw_timestamp, &pwp->hrtimestamp);
if (rval) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: Failed to obtain firmware timestamp", __func__);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"Firmware timestamp: 0x%" PRIx64, pwp->fw_timestamp);
}
return (0);
}
/*
* Called with PHY locked
*/
int
pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type)
{
uint32_t *msg;
uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
const char *mbar;
uint32_t amt;
uint32_t pdevid;
uint32_t stsoff;
uint32_t status;
int result, level, phynum;
struct pmcwork *pwrk;
pmcs_iport_t *iport;
uint32_t htag;
ASSERT(mutex_owned(&pptr->phy_lock));
bzero(iomb, PMCS_QENTRY_SIZE);
phynum = pptr->phynum;
level = pptr->level;
if (level > 0) {
pdevid = pptr->parent->device_id;
} else if ((level == 0) && (pptr->dtype == EXPANDER)) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
"%s: Not resetting HBA PHY @ %s", __func__, pptr->path);
return (0);
}
if (!pptr->iport || !pptr->valid_device_id) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
"%s: Can't reach PHY %s", __func__, pptr->path);
return (0);
}
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
if (pwrk == NULL) {
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
return (ENOMEM);
}
pwrk->arg = iomb;
/*
* If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL
* function to do either a link reset or hard reset. If level == 0,
* then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the
* root (local) PHY
*/
if (level) {
stsoff = 2;
iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
PMCIN_SMP_REQUEST));
iomb[1] = LE_32(pwrk->htag);
iomb[2] = LE_32(pdevid);
iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT);
/*
* Send SMP PHY CONTROL/HARD or LINK RESET
*/
iomb[4] = BE_32(0x40910000);
iomb[5] = 0;
if (type == PMCS_PHYOP_HARD_RESET) {
mbar = "SMP PHY CONTROL/HARD RESET";
iomb[6] = BE_32((phynum << 16) |
(PMCS_PHYOP_HARD_RESET << 8));
} else {
mbar = "SMP PHY CONTROL/LINK RESET";
iomb[6] = BE_32((phynum << 16) |
(PMCS_PHYOP_LINK_RESET << 8));
}
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: sending %s to %s for phy 0x%x",
__func__, mbar, pptr->parent->path, pptr->phynum);
amt = 7;
} else {
/*
* Unlike most other Outbound messages, status for
* a local phy operation is in DWORD 3.
*/
stsoff = 3;
iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
PMCIN_LOCAL_PHY_CONTROL));
iomb[1] = LE_32(pwrk->htag);
if (type == PMCS_PHYOP_LINK_RESET) {
mbar = "LOCAL PHY LINK RESET";
iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum);
} else {
mbar = "LOCAL PHY HARD RESET";
iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum);
}
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: sending %s to %s", __func__, mbar, pptr->path);
amt = 3;
}
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (msg == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
return (ENOMEM);
}
COPY_MESSAGE(msg, iomb, amt);
htag = pwrk->htag;
pmcs_hold_iport(pptr->iport);
iport = pptr->iport;
pmcs_smp_acquire(iport);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
pmcs_unlock_phy(pptr);
WAIT_FOR(pwrk, 1000, result);
pmcs_pwork(pwp, pwrk);
pmcs_smp_release(iport);
pmcs_rele_iport(iport);
pmcs_lock_phy(pptr);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
"%s: Unable to issue SMP abort for htag 0x%08x",
__func__, htag);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
"%s: Issuing SMP ABORT for htag 0x%08x",
__func__, htag);
}
return (EIO);
}
status = LE_32(iomb[stsoff]);
if (status != PMCOUT_STATUS_OK) {
char buf[32];
const char *es = pmcs_status_str(status);
if (es == NULL) {
(void) snprintf(buf, sizeof (buf), "Status 0x%x",
status);
es = buf;
}
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: %s action returned %s for %s", __func__, mbar, es,
pptr->path);
return (status);
}
return (0);
}
/*
* Stop the (real) phys. No PHY or softstate locks are required as this only
* happens during detach.
*/
void
pmcs_stop_phy(pmcs_hw_t *pwp, int phynum)
{
int result;
pmcs_phy_t *pptr;
uint32_t *msg;
struct pmcwork *pwrk;
pptr = pwp->root_phys + phynum;
if (pptr == NULL) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: unable to find port %d", __func__, phynum);
return;
}
if (pwp->phys_started & (1 << phynum)) {
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
if (pwrk == NULL) {
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
pmcs_nowrk, __func__);
return;
}
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (msg == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
pmcs_nomsg, __func__);
return;
}
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP));
msg[1] = LE_32(pwrk->htag);
msg[2] = LE_32(phynum);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
/*
* Make this unconfigured now.
*/
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
WAIT_FOR(pwrk, 1000, result);
pmcs_pwork(pwp, pwrk);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG,
pptr, NULL, pmcs_timeo, __func__);
}
pwp->phys_started &= ~(1 << phynum);
}
pptr->configured = 0;
}
/*
* No locks should be required as this is only called during detach
*/
void
pmcs_stop_phys(pmcs_hw_t *pwp)
{
int i;
for (i = 0; i < pwp->nphy; i++) {
if ((pwp->phyid_block_mask & (1 << i)) == 0) {
pmcs_stop_phy(pwp, i);
}
}
}
/*
* Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed.
* ERR_CNT_RESET: return status of cmd
* DIAG_REPORT_GET: return value of the counter
*/
int
pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc,
uint8_t phynum)
{
uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1];
int result;
struct pmcwork *pwrk;
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
if (pwrk == NULL) {
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
return (DDI_FAILURE);
}
pwrk->arg = msg;
htag = pwrk->htag;
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE));
msg[1] = LE_32(htag);
msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) |
(cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum);
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (ptr == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
return (DDI_FAILURE);
}
COPY_MESSAGE(ptr, msg, 3);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
WAIT_FOR(pwrk, 1000, result);
pmcs_pwork(pwp, pwrk);
if (result) {
pmcs_timed_out(pwp, htag, __func__);
return (DDI_FAILURE);
}
status = LE_32(msg[3]);
/* Return for counter reset */
if (cmd == PMCS_ERR_CNT_RESET)
return (status);
/* Return for counter value */
if (status) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: failed, status (0x%x)", __func__, status);
return (DDI_FAILURE);
}
return (LE_32(msg[4]));
}
/* Get the current value of the counter for desc on phynum and return it. */
int
pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum)
{
return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum));
}
/* Clear all of the counters for phynum. Returns the status of the command. */
int
pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum)
{
uint32_t cmd = PMCS_ERR_CNT_RESET;
uint32_t cmd_desc;
cmd_desc = PMCS_INVALID_DWORD_CNT;
if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
return (DDI_FAILURE);
cmd_desc = PMCS_DISPARITY_ERR_CNT;
if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
return (DDI_FAILURE);
cmd_desc = PMCS_LOST_DWORD_SYNC_CNT;
if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
return (DDI_FAILURE);
cmd_desc = PMCS_RESET_FAILED_CNT;
if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
/*
* Get firmware timestamp
*/
static int
pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *fw_ts, hrtime_t *sys_hr_ts)
{
uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1];
int result;
struct pmcwork *pwrk;
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
if (pwrk == NULL) {
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
return (-1);
}
pwrk->arg = msg;
htag = pwrk->htag;
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP));
msg[1] = LE_32(pwrk->htag);
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (ptr == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_pwork(pwp, pwrk);
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
return (-1);
}
COPY_MESSAGE(ptr, msg, 2);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
WAIT_FOR(pwrk, 1000, result);
pmcs_pwork(pwp, pwrk);
if (result) {
pmcs_timed_out(pwp, htag, __func__);
return (-1);
}
mutex_enter(&pmcs_trace_lock);
*sys_hr_ts = gethrtime();
gethrestime(&pwp->sys_timestamp);
*fw_ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32);
mutex_exit(&pmcs_trace_lock);
return (0);
}
/*
* Dump all pertinent registers
*/
void
pmcs_register_dump(pmcs_hw_t *pwp)
{
int i;
uint32_t val;
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start",
ddi_get_instance(pwp->dip));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
"OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x",
pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB),
pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK),
pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x",
pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x",
pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x",
pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x",
pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3));
for (i = 0; i < PMCS_NIQ; i++) {
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u",
i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i));
}
for (i = 0; i < PMCS_NOQ; i++) {
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u",
i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i));
}
val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE);
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
"GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)",
val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4,
PMCS_HMI_ERR(val));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x",
pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x",
pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x",
pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x",
pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK));
for (i = 0; i < pwp->nphy; i++) {
uint32_t rerrf, pinfo, started = 0, link = 0;
pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i));
if (pinfo & 1) {
started = 1;
link = pinfo & 2;
}
rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i));
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
"GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x",
i, started, link, rerrf);
}
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end",
ddi_get_instance(pwp->dip));
}
/*
* Handle SATA Abort and other error processing
*/
int
pmcs_abort_handler(pmcs_hw_t *pwp)
{
pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND];
pmcs_xscsi_t *tgt;
int r, level = 0;
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__);
mutex_enter(&pwp->lock);
pptr = pwp->root_phys;
mutex_exit(&pwp->lock);
while (pptr) {
/*
* XXX: Need to make sure this doesn't happen
* XXX: when non-NCQ commands are running.
*/
pmcs_lock_phy(pptr);
if (pptr->need_rl_ext) {
ASSERT(pptr->dtype == SATA);
if (pmcs_acquire_scratch(pwp, B_FALSE)) {
goto next_phy;
}
r = pmcs_sata_abort_ncq(pwp, pptr);
pmcs_release_scratch(pwp);
if (r == ENOMEM) {
goto next_phy;
}
if (r) {
r = pmcs_reset_phy(pwp, pptr,
PMCS_PHYOP_LINK_RESET);
if (r == ENOMEM) {
goto next_phy;
}
/* what if other failures happened? */
pptr->abort_pending = 1;
pptr->abort_sent = 0;
}
}
if (pptr->abort_pending == 0 || pptr->abort_sent) {
goto next_phy;
}
pptr->abort_pending = 0;
if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) {
pptr->abort_pending = 1;
goto next_phy;
}
pptr->abort_sent = 1;
/*
* If the iport is no longer active, flush the queues
*/
if ((pptr->iport == NULL) ||
(pptr->iport->ua_state != UA_ACTIVE)) {
tgt = pptr->target;
if (tgt != NULL) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
"%s: Clearing target 0x%p, inactive iport",
__func__, (void *) tgt);
mutex_enter(&tgt->statlock);
pmcs_clear_xp(pwp, tgt);
mutex_exit(&tgt->statlock);
}
}
next_phy:
if (pptr->children) {
pnext = pptr->children;
pnext_uplevel[level++] = pptr->sibling;
} else {
pnext = pptr->sibling;
while ((pnext == NULL) && (level > 0)) {
pnext = pnext_uplevel[--level];
}
}
pmcs_unlock_phy(pptr);
pptr = pnext;
}
return (0);
}
/*
* Register a device (get a device handle for it).
* Called with PHY lock held.
*/
int
pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
{
struct pmcwork *pwrk;
int result = 0;
uint32_t *msg;
uint32_t tmp, status;
uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (msg == NULL ||
(pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
result = ENOMEM;
goto out;
}
pwrk->arg = iomb;
pwrk->dtype = pptr->dtype;
msg[1] = LE_32(pwrk->htag);
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE));
tmp = PMCS_DEVREG_TLR |
(pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT);
if (IS_ROOT_PHY(pptr)) {
msg[2] = LE_32(pptr->portid |
(pptr->phynum << PMCS_PHYID_SHIFT));
} else {
msg[2] = LE_32(pptr->portid);
}
if (pptr->dtype == SATA) {
if (IS_ROOT_PHY(pptr)) {
tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT;
} else {
tmp |= PMCS_DEVREG_TYPE_SATA;
}
} else {
tmp |= PMCS_DEVREG_TYPE_SAS;
}
msg[3] = LE_32(tmp);
msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT);
(void) memcpy(&msg[5], pptr->sas_address, 8);
CLEAN_MESSAGE(msg, 7);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
pmcs_unlock_phy(pptr);
WAIT_FOR(pwrk, 250, result);
pmcs_pwork(pwp, pwrk);
pmcs_lock_phy(pptr);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
result = ETIMEDOUT;
goto out;
}
status = LE_32(iomb[2]);
tmp = LE_32(iomb[3]);
switch (status) {
case PMCS_DEVREG_OK:
case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED:
case PMCS_DEVREG_PHY_ALREADY_REGISTERED:
if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) {
result = EEXIST;
goto out;
} else if (status != PMCS_DEVREG_OK) {
if (tmp == 0xffffffff) { /* F/W bug */
pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
"%s: phy %s already has bogus devid 0x%x",
__func__, pptr->path, tmp);
result = EIO;
goto out;
} else {
pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
"%s: phy %s already has a device id 0x%x",
__func__, pptr->path, tmp);
}
}
break;
default:
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: status 0x%x when trying to register device %s",
__func__, status, pptr->path);
result = EIO;
goto out;
}
pptr->device_id = tmp;
pptr->valid_device_id = 1;
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT
" registered with device_id 0x%x (portid %d)", pptr->path,
SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid);
out:
return (result);
}
/*
* Deregister a device (remove a device handle).
* Called with PHY locked.
*/
void
pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
{
struct pmcwork *pwrk;
uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
int result;
pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
if (pwrk == NULL) {
return;
}
pwrk->arg = iomb;
pwrk->dtype = pptr->dtype;
mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
if (ptr == NULL) {
mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
pmcs_pwork(pwp, pwrk);
return;
}
msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
PMCIN_DEREGISTER_DEVICE_HANDLE));
msg[1] = LE_32(pwrk->htag);
msg[2] = LE_32(pptr->device_id);
pwrk->state = PMCS_WORK_STATE_ONCHIP;
COPY_MESSAGE(ptr, msg, 3);
INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
pmcs_unlock_phy(pptr);
WAIT_FOR(pwrk, 250, result);
pmcs_pwork(pwp, pwrk);
pmcs_lock_phy(pptr);
if (result) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
return;
}
status = LE_32(iomb[2]);
if (status != PMCOUT_STATUS_OK) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: status 0x%x when trying to deregister device %s",
__func__, status, pptr->path);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: device %s deregistered", __func__, pptr->path);
}
pptr->device_id = PMCS_INVALID_DEVICE_ID;
pptr->configured = 0;
pptr->deregister_wait = 0;
pptr->valid_device_id = 0;
}
/*
* Deregister all registered devices.
*/
void
pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
{
/*
* Start at the maximum level and walk back to level 0. This only
* gets done during detach after all threads and timers have been
* destroyed.
*/
while (phyp) {
if (phyp->children) {
pmcs_deregister_devices(pwp, phyp->children);
}
pmcs_lock_phy(phyp);
if (phyp->valid_device_id) {
pmcs_deregister_device(pwp, phyp);
}
pmcs_unlock_phy(phyp);
phyp = phyp->sibling;
}
}
/*
* Perform a 'soft' reset on the PMC chip
*/
int
pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart)
{
uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode;
pmcs_phy_t *pptr;
char *msg = NULL;
int i;
/*
* Disable interrupts
*/
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__);
if (pwp->locks_initted) {
mutex_enter(&pwp->lock);
}
pwp->blocked = 1;
/*
* Clear our softstate copies of the MSGU and IOP heartbeats.
*/
pwp->last_msgu_tick = pwp->last_iop_tick = 0;
/*
* Step 1
*/
s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2);
if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) {
pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
for (i = 0; i < 100; i++) {
s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
PMCS_MSGU_HOST_SOFT_RESET_READY;
if (s2) {
break;
}
drv_usecwait(10000);
}
s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
PMCS_MSGU_HOST_SOFT_RESET_READY;
if (s2 == 0) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came "
"ready", __func__);
pmcs_register_dump(pwp);
if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 ||
(pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) {
pwp->state = STATE_DEAD;
pwp->blocked = 0;
if (pwp->locks_initted) {
mutex_exit(&pwp->lock);
}
return (-1);
}
}
}
/*
* Step 2
*/
pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0);
drv_usecwait(10);
pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0);
drv_usecwait(10);
pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0);
drv_usecwait(10);
pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT,
pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT));
drv_usecwait(10);
pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0);
drv_usecwait(10);
pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT,
pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT));
drv_usecwait(10);
sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
PMCS_MSGU_AAP_SFR_PROGRESS;
sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS;
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 "
"%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0),
HST_SFT_RESET_SIG);
pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG);
/*
* Step 3
*/
gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
gsm & ~PMCS_SOFT_RESET_BITS);
pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS);
/*
* Step 4
*/
rapchk = pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
"%08x -> %08x", rapchk, 0);
pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0);
wapchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
"%08x -> %08x", wapchk, 0);
pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0);
wdpchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
"%08x -> %08x", wdpchk, 0);
pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0);
/*
* Step 5
*/
drv_usecwait(100);
/*
* Step 5.5 (Temporary workaround for 1.07.xx Beta)
*/
tsmode = pmcs_rd_gsm_reg(pwp, 0, PMCS_GPIO_TRISTATE_MODE_ADDR);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x",
tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR,
tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
drv_usecwait(10);
/*
* Step 6
*/
spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
drv_usecwait(10);
/*
* Step 7
*/
spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
/*
* Step 8
*/
drv_usecwait(100);
/*
* Step 9
*/
spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
/*
* Step 10
*/
drv_usecwait(100);
/*
* Step 11
*/
gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
gsm | PMCS_SOFT_RESET_BITS);
pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS);
drv_usecwait(10);
/*
* Step 12
*/
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
"%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN),
rapchk);
pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk);
drv_usecwait(10);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
"%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN),
wapchk);
pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk);
drv_usecwait(10);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
"%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN),
wapchk);
pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk);
drv_usecwait(10);
/*
* Step 13
*/
spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
/*
* Step 14
*/
drv_usecwait(100);
/*
* Step 15
*/
for (spc = 0, i = 0; i < 1000; i++) {
drv_usecwait(1000);
spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) {
break;
}
}
if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"SFR didn't toggle (sfr 0x%x)", spc);
pwp->state = STATE_DEAD;
pwp->blocked = 0;
if (pwp->locks_initted) {
mutex_exit(&pwp->lock);
}
return (-1);
}
/*
* Step 16
*/
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
/*
* Wait for up to 5 seconds for AAP state to come either ready or error.
*/
for (i = 0; i < 50; i++) {
spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
PMCS_MSGU_AAP_STATE_MASK;
if (spc == PMCS_MSGU_AAP_STATE_ERROR ||
spc == PMCS_MSGU_AAP_STATE_READY) {
break;
}
drv_usecwait(100000);
}
spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"soft reset failed (state 0x%x)", spc);
pwp->state = STATE_DEAD;
pwp->blocked = 0;
if (pwp->locks_initted) {
mutex_exit(&pwp->lock);
}
return (-1);
}
/* Clear the firmware log */
if (pwp->fwlogp) {
bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
}
/* Reset our queue indices and entries */
bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi));
bzero(pwp->last_iqci, sizeof (pwp->last_iqci));
bzero(pwp->last_htag, sizeof (pwp->last_htag));
for (i = 0; i < PMCS_NIQ; i++) {
if (pwp->iqp[i]) {
bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
pmcs_wr_iqpi(pwp, i, 0);
pmcs_wr_iqci(pwp, i, 0);
}
}
for (i = 0; i < PMCS_NOQ; i++) {
if (pwp->oqp[i]) {
bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
pmcs_wr_oqpi(pwp, i, 0);
pmcs_wr_oqci(pwp, i, 0);
}
}
if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING ||
pwp->state == STATE_PROBING || pwp->locks_initted == 0) {
pwp->blocked = 0;
if (pwp->locks_initted) {
mutex_exit(&pwp->lock);
}
return (0);
}
/*
* Return at this point if we dont need to startup.
*/
if (no_restart) {
return (0);
}
ASSERT(pwp->locks_initted != 0);
/*
* Flush the target queues and clear each target's PHY
*/
if (pwp->targets) {
for (i = 0; i < pwp->max_dev; i++) {
pmcs_xscsi_t *xp = pwp->targets[i];
if (xp == NULL) {
continue;
}
mutex_enter(&xp->statlock);
pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
xp->phy = NULL;
mutex_exit(&xp->statlock);
}
}
/*
* Zero out the ports list, free non root phys, clear root phys
*/
bzero(pwp->ports, sizeof (pwp->ports));
pmcs_free_all_phys(pwp, pwp->root_phys);
for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
pmcs_lock_phy(pptr);
pmcs_clear_phy(pwp, pptr);
pptr->target = NULL;
pmcs_unlock_phy(pptr);
}
/*
* Restore Interrupt Mask
*/
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
pwp->mpi_table_setup = 0;
mutex_exit(&pwp->lock);
/*
* Set up MPI again.
*/
if (pmcs_setup(pwp)) {
msg = "unable to setup MPI tables again";
goto fail_restart;
}
pmcs_report_fwversion(pwp);
/*
* Restart MPI
*/
if (pmcs_start_mpi(pwp)) {
msg = "unable to restart MPI again";
goto fail_restart;
}
mutex_enter(&pwp->lock);
SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
mutex_exit(&pwp->lock);
/*
* Run any completions
*/
PMCS_CQ_RUN(pwp);
/*
* Delay
*/
drv_usecwait(1000000);
return (0);
fail_restart:
mutex_enter(&pwp->lock);
pwp->state = STATE_DEAD;
mutex_exit(&pwp->lock);
pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
"%s: Failed: %s", __func__, msg);
return (-1);
}
/*
* Perform a 'hot' reset, which will soft reset the chip and
* restore the state back to pre-reset context. Called with pwp
* lock held.
*/
int
pmcs_hot_reset(pmcs_hw_t *pwp)
{
pmcs_iport_t *iport;
ASSERT(mutex_owned(&pwp->lock));
pwp->state = STATE_IN_RESET;
/*
* For any iports on this HBA, report empty target sets and
* then tear them down.
*/
rw_enter(&pwp->iports_lock, RW_READER);
for (iport = list_head(&pwp->iports); iport != NULL;
iport = list_next(&pwp->iports, iport)) {
mutex_enter(&iport->lock);
(void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap);
(void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0);
pmcs_iport_teardown_phys(iport);
mutex_exit(&iport->lock);
}
rw_exit(&pwp->iports_lock);
/* Grab a register dump, in the event that reset fails */
pmcs_register_dump_int(pwp);
mutex_exit(&pwp->lock);
/* Ensure discovery is not running before we proceed */
mutex_enter(&pwp->config_lock);
while (pwp->configuring) {
cv_wait(&pwp->config_cv, &pwp->config_lock);
}
mutex_exit(&pwp->config_lock);
/* Issue soft reset and clean up related softstate */
if (pmcs_soft_reset(pwp, B_FALSE)) {
/*
* Disable interrupts, in case we got far enough along to
* enable them, then fire off ereport and service impact.
*/
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: failed soft reset", __func__);
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE);
ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
mutex_enter(&pwp->lock);
pwp->state = STATE_DEAD;
return (DDI_FAILURE);
}
mutex_enter(&pwp->lock);
pwp->state = STATE_RUNNING;
mutex_exit(&pwp->lock);
/*
* Finally, restart the phys, which will bring the iports back
* up and eventually result in discovery running.
*/
if (pmcs_start_phys(pwp)) {
/* We should be up and running now, so retry */
if (pmcs_start_phys(pwp)) {
/* Apparently unable to restart PHYs, fail */
pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: failed to restart PHYs after soft reset",
__func__);
mutex_enter(&pwp->lock);
return (DDI_FAILURE);
}
}
mutex_enter(&pwp->lock);
return (DDI_SUCCESS);
}
/*
* Reset a device or a logical unit.
*/
int
pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun)
{
int rval = 0;
if (pptr == NULL) {
return (ENXIO);
}
pmcs_lock_phy(pptr);
if (pptr->dtype == SAS) {
/*
* Some devices do not support SAS_I_T_NEXUS_RESET as
* it is not a mandatory (in SAM4) task management
* function, while LOGIC_UNIT_RESET is mandatory.
*
* The problem here is that we need to iterate over
* all known LUNs to emulate the semantics of
* "RESET_TARGET".
*
* XXX: FIX ME
*/
if (lun == (uint64_t)-1) {
lun = 0;
}
rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun,
NULL);
} else if (pptr->dtype == SATA) {
if (lun != 0ull) {
pmcs_unlock_phy(pptr);
return (EINVAL);
}
rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET);
} else {
pmcs_unlock_phy(pptr);
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"%s: cannot reset a SMP device yet (%s)",
__func__, pptr->path);
return (EINVAL);
}
/*
* Now harvest any commands killed by this action
* by issuing an ABORT for all commands on this device.
*
* We do this even if the the tmf or reset fails (in case there
* are any dead commands around to be harvested *anyway*).
* We don't have to await for the abort to complete.
*/
if (pmcs_abort(pwp, pptr, 0, 1, 0)) {
pptr->abort_pending = 1;
SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
}
pmcs_unlock_phy(pptr);
return (rval);
}
/*
* Called with PHY locked.
*/
static int
pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
{
if (pptr->valid_device_id == 0) {
int result = pmcs_register_device(pwp, pptr);
/*
* If we changed while registering, punt
*/
if (pptr->changed) {
RESTART_DISCOVERY(pwp);
return (-1);
}
/*
* If we had a failure to register, check against errors.
* An ENOMEM error means we just retry (temp resource shortage).
*/
if (result == ENOMEM) {
PHY_CHANGED(pwp, pptr);
RESTART_DISCOVERY(pwp);
return (-1);
}
/*
* An ETIMEDOUT error means we retry (if our counter isn't
* exhausted)
*/
if (result == ETIMEDOUT) {
if (ddi_get_lbolt() < pptr->config_stop) {
PHY_CHANGED(pwp, pptr);
RESTART_DISCOVERY(pwp);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
"%s: Retries exhausted for %s, killing",
__func__, pptr->path);
pptr->config_stop = 0;
pmcs_kill_changed(pwp, pptr, 0);
}
return (-1);
}
/*
* Other errors or no valid device id is fatal, but don't
* preclude a future action.
*/
if (result || pptr->valid_device_id == 0) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
"%s: %s could not be registered", __func__,
pptr->path);
return (-1);
}
}
return (0);
}
int
pmcs_iport_tgtmap_create(pmcs_iport_t *iport)
{
ASSERT(iport);
if (iport == NULL)
return (B_FALSE);
pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
/* create target map */
if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET,
tgtmap_csync_usec, tgtmap_stable_usec, (void *)iport,
pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb,
&iport->iss_tgtmap) != DDI_SUCCESS) {
pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL,
"%s: failed to create tgtmap", __func__);
return (B_FALSE);
}
return (B_TRUE);
}
int
pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport)
{
ASSERT(iport && iport->iss_tgtmap);
if ((iport == NULL) || (iport->iss_tgtmap == NULL))
return (B_FALSE);
pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
/* destroy target map */
scsi_hba_tgtmap_destroy(iport->iss_tgtmap);
return (B_TRUE);
}
/*
* Remove all phys from an iport's phymap and empty it's phylist.
* Called when a port has been reset by the host (see pmcs_intr.c)
* or prior to issuing a soft reset if we detect a stall on the chip
* (see pmcs_attach.c).
*/
void
pmcs_iport_teardown_phys(pmcs_iport_t *iport)
{
pmcs_hw_t *pwp;
sas_phymap_phys_t *phys;
int phynum;
ASSERT(iport);
ASSERT(mutex_owned(&iport->lock));
pwp = iport->pwp;
ASSERT(pwp);
/*
* Remove all phys from the iport handle's phy list, unset its
* primary phy and update its state.
*/
pmcs_remove_phy_from_iport(iport, NULL);
iport->pptr = NULL;
iport->ua_state = UA_PEND_DEACTIVATE;
/* Remove all phys from the phymap */
phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
if (phys) {
while ((phynum = sas_phymap_phys_next(phys)) != -1) {
(void) sas_phymap_phy_rem(pwp->hss_phymap, phynum);
}
sas_phymap_phys_free(phys);
}
}
/*
* Query the phymap and populate the iport handle passed in.
* Called with iport lock held.
*/
int
pmcs_iport_configure_phys(pmcs_iport_t *iport)
{
pmcs_hw_t *pwp;
pmcs_phy_t *pptr;
sas_phymap_phys_t *phys;
int phynum;
int inst;
ASSERT(iport);
ASSERT(mutex_owned(&iport->lock));
pwp = iport->pwp;
ASSERT(pwp);
inst = ddi_get_instance(iport->dip);
mutex_enter(&pwp->lock);
ASSERT(pwp->root_phys != NULL);
/*
* Query the phymap regarding the phys in this iport and populate
* the iport's phys list. Hereafter this list is maintained via
* port up and down events in pmcs_intr.c
*/
ASSERT(list_is_empty(&iport->phys));
phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
ASSERT(phys != NULL);
while ((phynum = sas_phymap_phys_next(phys)) != -1) {
/* Grab the phy pointer from root_phys */
pptr = pwp->root_phys + phynum;
ASSERT(pptr);
pmcs_lock_phy(pptr);
ASSERT(pptr->phynum == phynum);
/*
* Set a back pointer in the phy to this iport.
*/
pptr->iport = iport;
/*
* If this phy is the primary, set a pointer to it on our
* iport handle, and set our portid from it.
*/
if (!pptr->subsidiary) {
iport->pptr = pptr;
iport->portid = pptr->portid;
}
/*
* Finally, insert the phy into our list
*/
pmcs_unlock_phy(pptr);
pmcs_add_phy_to_iport(iport, pptr);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found "
"phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum,
(void *)pptr, inst, iport->refcnt);
}
mutex_exit(&pwp->lock);
sas_phymap_phys_free(phys);
RESTART_DISCOVERY(pwp);
return (DDI_SUCCESS);
}
/*
* Return the iport that ua is associated with, or NULL. If an iport is
* returned, it will be held and the caller must release the hold.
*/
static pmcs_iport_t *
pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua)
{
pmcs_iport_t *iport = NULL;
rw_enter(&pwp->iports_lock, RW_READER);
for (iport = list_head(&pwp->iports);
iport != NULL;
iport = list_next(&pwp->iports, iport)) {
mutex_enter(&iport->lock);
if (strcmp(iport->ua, ua) == 0) {
mutex_exit(&iport->lock);
pmcs_hold_iport(iport);
break;
}
mutex_exit(&iport->lock);
}
rw_exit(&pwp->iports_lock);
return (iport);
}
/*
* Return the iport that pptr is associated with, or NULL.
* If an iport is returned, there is a hold that the caller must release.
*/
pmcs_iport_t *
pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
{
pmcs_iport_t *iport = NULL;
char *ua;
ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn);
if (ua) {
iport = pmcs_get_iport_by_ua(pwp, ua);
if (iport) {
mutex_enter(&iport->lock);
pmcs_iport_active(iport);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
"found iport [0x%p] on ua (%s), refcnt (%d)",
__func__, (void *)iport, ua, iport->refcnt);
mutex_exit(&iport->lock);
}
}
return (iport);
}
/*
* Promote the next phy on this port to primary, and return it.
* Called when the primary PHY on a port is going down, but the port
* remains up (see pmcs_intr.c).
*/
pmcs_phy_t *
pmcs_promote_next_phy(pmcs_phy_t *prev_primary)
{
pmcs_hw_t *pwp;
pmcs_iport_t *iport;
pmcs_phy_t *pptr, *child;
int portid;
pmcs_lock_phy(prev_primary);
portid = prev_primary->portid;
iport = prev_primary->iport;
pwp = prev_primary->pwp;
/* Use the first available phy in this port */
for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
if ((pptr->portid == portid) && (pptr != prev_primary)) {
mutex_enter(&pptr->phy_lock);
break;
}
}
if (pptr == NULL) {
pmcs_unlock_phy(prev_primary);
return (NULL);
}
if (iport) {
mutex_enter(&iport->lock);
iport->pptr = pptr;
mutex_exit(&iport->lock);
}
/* Update the phy handle with the data from the previous primary */
pptr->children = prev_primary->children;
child = pptr->children;
while (child) {
child->parent = pptr;
child = child->sibling;
}
pptr->ncphy = prev_primary->ncphy;
pptr->width = prev_primary->width;
pptr->dtype = prev_primary->dtype;
pptr->pend_dtype = prev_primary->pend_dtype;
pptr->tolerates_sas2 = prev_primary->tolerates_sas2;
pptr->atdt = prev_primary->atdt;
pptr->portid = prev_primary->portid;
pptr->link_rate = prev_primary->link_rate;
pptr->configured = prev_primary->configured;
pptr->iport = prev_primary->iport;
pptr->target = prev_primary->target;
if (pptr->target) {
pptr->target->phy = pptr;
}
/* Update the phy mask properties for the affected PHYs */
/* Clear the current values... */
pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp,
pptr->tgt_port_pm_tmp, B_FALSE);
/* ...replace with the values from prev_primary... */
pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp,
prev_primary->tgt_port_pm_tmp, B_TRUE);
/* ...then clear prev_primary's PHY values from the new primary */
pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm,
prev_primary->tgt_port_pm, B_FALSE);
/* Clear the prev_primary's values */
pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp,
prev_primary->tgt_port_pm_tmp, B_FALSE);
pptr->subsidiary = 0;
prev_primary->subsidiary = 1;
prev_primary->children = NULL;
prev_primary->target = NULL;
pptr->device_id = prev_primary->device_id;
pptr->valid_device_id = prev_primary->valid_device_id;
pmcs_unlock_phy(prev_primary);
/*
* We call pmcs_unlock_phy() on pptr because it now contains the
* list of children.
*/
pmcs_unlock_phy(pptr);
return (pptr);
}
void
pmcs_hold_iport(pmcs_iport_t *iport)
{
/*
* Grab a reference to this iport.
*/
ASSERT(iport);
mutex_enter(&iport->refcnt_lock);
iport->refcnt++;
mutex_exit(&iport->refcnt_lock);
pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport "
"[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt);
}
void
pmcs_rele_iport(pmcs_iport_t *iport)
{
/*
* Release a refcnt on this iport. If this is the last reference,
* signal the potential waiter in pmcs_iport_unattach().
*/
ASSERT(iport->refcnt > 0);
mutex_enter(&iport->refcnt_lock);
iport->refcnt--;
mutex_exit(&iport->refcnt_lock);
if (iport->refcnt == 0) {
cv_signal(&iport->refcnt_cv);
}
pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport "
"[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt);
}
void
pmcs_phymap_activate(void *arg, char *ua, void **privp)
{
_NOTE(ARGUNUSED(privp));
pmcs_hw_t *pwp = arg;
pmcs_iport_t *iport = NULL;
mutex_enter(&pwp->lock);
if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD) ||
(pwp->state == STATE_IN_RESET)) {
mutex_exit(&pwp->lock);
return;
}
pwp->phymap_active++;
mutex_exit(&pwp->lock);
if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) !=
DDI_SUCCESS) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
"add iport handle on unit address [%s]", __func__, ua);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
"phymap_active count (%d), added iport handle on unit "
"address [%s]", __func__, pwp->phymap_active, ua);
}
/* Set the HBA softstate as our private data for this unit address */
*privp = (void *)pwp;
/*
* We are waiting on attach for this iport node, unless it is still
* attached. This can happen if a consumer has an outstanding open
* on our iport node, but the port is down. If this is the case, we
* need to configure our iport here for reuse.
*/
iport = pmcs_get_iport_by_ua(pwp, ua);
if (iport) {
mutex_enter(&iport->lock);
if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
"failed to configure phys on iport [0x%p] at "
"unit address (%s)", __func__, (void *)iport, ua);
}
pmcs_iport_active(iport);
pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
&iport->nphy);
mutex_exit(&iport->lock);
pmcs_rele_iport(iport);
}
}
void
pmcs_phymap_deactivate(void *arg, char *ua, void *privp)
{
_NOTE(ARGUNUSED(privp));
pmcs_hw_t *pwp = arg;
pmcs_iport_t *iport;
mutex_enter(&pwp->lock);
pwp->phymap_active--;
mutex_exit(&pwp->lock);
if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) !=
DDI_SUCCESS) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
"remove iport handle on unit address [%s]", __func__, ua);
} else {
pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
"phymap_active count (%d), removed iport handle on unit "
"address [%s]", __func__, pwp->phymap_active, ua);
}
iport = pmcs_get_iport_by_ua(pwp, ua);
if (iport == NULL) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed "
"lookup of iport handle on unit addr (%s)", __func__, ua);
return;
}
mutex_enter(&iport->lock);
iport->ua_state = UA_INACTIVE;
iport->portid = PMCS_IPORT_INVALID_PORT_ID;
pmcs_remove_phy_from_iport(iport, NULL);
mutex_exit(&iport->lock);
pmcs_rele_iport(iport);
}
/*
* Top-level discovery function
*/
void
pmcs_discover(pmcs_hw_t *pwp)
{
pmcs_phy_t *pptr;
pmcs_phy_t *root_phy;
DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags,
boolean_t, pwp->config_changed);
mutex_enter(&pwp->lock);
if (pwp->state != STATE_RUNNING) {
mutex_exit(&pwp->lock);
return;
}
/* Ensure we have at least one phymap active */
if (pwp->phymap_active == 0) {
mutex_exit(&pwp->lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: phymap inactive, exiting", __func__);
return;
}
mutex_exit(&pwp->lock);
/*
* If no iports have attached, but we have PHYs that are up, we
* are waiting for iport attach to complete. Restart discovery.
*/
rw_enter(&pwp->iports_lock, RW_READER);
if (!pwp->iports_attached) {
rw_exit(&pwp->iports_lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: no iports attached, retry discovery", __func__);
SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
return;
}
rw_exit(&pwp->iports_lock);
mutex_enter(&pwp->config_lock);
if (pwp->configuring) {
mutex_exit(&pwp->config_lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: configuration already in progress", __func__);
return;
}
if (pmcs_acquire_scratch(pwp, B_FALSE)) {
mutex_exit(&pwp->config_lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: cannot allocate scratch", __func__);
SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
return;
}
pwp->configuring = 1;
pwp->config_changed = B_FALSE;
mutex_exit(&pwp->config_lock);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin");
/*
* First, tell SCSA that we're beginning set operations.
*/
pmcs_begin_observations(pwp);
/*
* The order of the following traversals is important.
*
* The first one checks for changed expanders.
*
* The second one aborts commands for dead devices and deregisters them.
*
* The third one clears the contents of dead expanders from the tree
*
* The fourth one clears now dead devices in expanders that remain.
*/
/*
* 1. Check expanders marked changed (but not dead) to see if they still
* have the same number of phys and the same SAS address. Mark them,
* their subsidiary phys (if wide) and their descendents dead if
* anything has changed. Check the devices they contain to see if
* *they* have changed. If they've changed from type NOTHING we leave
* them marked changed to be configured later (picking up a new SAS
* address and link rate if possible). Otherwise, any change in type,
* SAS address or removal of target role will cause us to mark them
* (and their descendents) as dead (and cause any pending commands
* and associated devices to be removed).
*
* NOTE: We don't want to bail on discovery if the config has
* changed until *after* we run pmcs_kill_devices.
*/
root_phy = pwp->root_phys;
pmcs_check_expanders(pwp, root_phy);
/*
* 2. Descend the tree looking for dead devices and kill them
* by aborting all active commands and then deregistering them.
*/
if (pmcs_kill_devices(pwp, root_phy)) {
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: pmcs_kill_devices failed!", __func__);
}
/*
* 3. Check for dead expanders and remove their children from the tree.
* By the time we get here, the devices and commands for them have
* already been terminated and removed.
*
* We do this independent of the configuration count changing so we can
* free any dead device PHYs that were discovered while checking
* expanders. We ignore any subsidiary phys as pmcs_clear_expander
* will take care of those.
*
* NOTE: pmcs_clear_expander requires softstate lock
*/
mutex_enter(&pwp->lock);
for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
/*
* Call pmcs_clear_expander for every root PHY. It will
* recurse and determine which (if any) expanders actually
* need to be cleared.
*/
pmcs_lock_phy(pptr);
pmcs_clear_expander(pwp, pptr, 0);
pmcs_unlock_phy(pptr);
}
mutex_exit(&pwp->lock);
/*
* 4. Check for dead devices and nullify them. By the time we get here,
* the devices and commands for them have already been terminated
* and removed. This is different from step 2 in that this just nulls
* phys that are part of expanders that are still here but used to
* be something but are no longer something (e.g., after a pulled
* disk drive). Note that dead expanders had their contained phys
* removed from the tree- here, the expanders themselves are
* nullified (unless they were removed by being contained in another
* expander phy).
*/
pmcs_clear_phys(pwp, root_phy);
/*
* 5. Now check for and configure new devices.
*/
if (pmcs_configure_new_devices(pwp, root_phy)) {
goto restart;
}
out:
DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags,
boolean_t, pwp->config_changed);
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end");
mutex_enter(&pwp->config_lock);
if (pwp->config_changed == B_FALSE) {
/*
* Observation is stable, report what we currently see to
* the tgtmaps for delta processing. Start by setting
* BEGIN on all tgtmaps.
*/
mutex_exit(&pwp->config_lock);
if (pmcs_report_observations(pwp) == B_FALSE) {
goto restart;
}
mutex_enter(&pwp->config_lock);
} else {
/*
* If config_changed is TRUE, we need to reschedule
* discovery now.
*/
pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
"%s: Config has changed, will re-run discovery", __func__);
SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
}
pmcs_release_scratch(pwp);
if (!pwp->quiesced) {
pwp->blocked = 0;
}
pwp->configuring = 0;
cv_signal(&pwp->config_cv);
mutex_exit(&pwp->config_lock);
#ifdef DEBUG
pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys);
if (pptr != NULL) {
if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) {
pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
"PHY %s dead=%d changed=%d configured=%d "
"but no work scheduled", pptr->path, pptr->dead,
pptr->changed, pptr->configured);
}
pmcs_unlock_phy(pptr);
}
#endif
return;
restart:
/* Clean up and restart discovery */
pmcs_release_scratch(pwp);
pmcs_flush_observations(pwp);
mutex_enter(&pwp->config_lock);
pwp->configuring = 0;
cv_signal(&pwp->config_cv);
RESTART_DISCOVERY_LOCKED(pwp);
mutex_exit(&pwp->config_lock);
}
#ifdef DEBUG
/*
* Return any PHY that needs to have scheduled work done. The PHY is returned
* locked.
*/
static pmcs_phy_t *
pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
{
pmcs_phy_t *cphyp, *pnext;
while (pptr) {
pmcs_lock_phy(pptr);
if (pptr->changed || (pptr->dead && pptr->valid_device_id)) {
return (pptr);
}
pnext = pptr->sibling;
if (pptr->children) {
cphyp = pptr->children;
pmcs_unlock_phy(pptr);
cphyp = pmcs_find_phy_needing_work(pwp, cphyp);
if (cphyp) {
return (cphyp);
}
} else {
pmcs_unlock_phy(pptr);
}
pptr = pnext;
}
return (NULL);
}
#endif /* DEBUG */
/*
* We may (or may not) report observations to SCSA. This is prefaced by
* issuing a set_begin for each iport target map.
*/
static void
pmcs_begin_observations(pmcs_hw_t *pwp)
{
pmcs_iport_t *iport;
scsi_hba_tgtmap_t *tgtmap;
rw_enter(&pwp->iports_lock, RW_READER);
for (iport = list_head(&pwp->iports); iport != NULL;
iport = list_next(&pwp->iports, iport)) {
/*
* Unless we have at least one phy up, skip this iport.
* Note we don't need to lock the iport for report_skip
* since it is only used here. We are doing the skip so that
* the phymap and iportmap stabilization times are honored -
* giving us the ability to recover port operation within the
* stabilization time without unconfiguring targets using the
* port.
*/
if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) {
iport->report_skip = 1;
continue; /* skip set_begin */
}
iport->report_skip = 0;