blob: ea3283c93cd06577334741a0a8be053245cd9d85 [file] [log] [blame]
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Bayard G. Bell. All rights reserved.
*/
/*
* SCSI SCSA-compliant and not-so-DDI-compliant Tape Driver
*/
#if defined(lint) && !defined(DEBUG)
#define DEBUG 1
#endif
#include <sys/modctl.h>
#include <sys/scsi/scsi.h>
#include <sys/mtio.h>
#include <sys/scsi/targets/stdef.h>
#include <sys/file.h>
#include <sys/kstat.h>
#include <sys/ddidmareq.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/byteorder.h>
#define IOSP KSTAT_IO_PTR(un->un_stats)
/*
* stats maintained only for reads/writes as commands
* like rewind etc skew the wait/busy times
*/
#define IS_RW(bp) ((bp)->b_bcount > 0)
#define ST_DO_KSTATS(bp, kstat_function) \
if ((bp != un->un_sbufp) && un->un_stats && IS_RW(bp)) { \
kstat_function(IOSP); \
}
#define ST_DO_ERRSTATS(un, x) \
if (un->un_errstats) { \
struct st_errstats *stp; \
stp = (struct st_errstats *)un->un_errstats->ks_data; \
stp->x.value.ul++; \
}
#define FILL_SCSI1_LUN(devp, pkt) \
if ((devp)->sd_inq->inq_ansi == 0x1) { \
int _lun; \
_lun = ddi_prop_get_int(DDI_DEV_T_ANY, (devp)->sd_dev, \
DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); \
if (_lun > 0) { \
((union scsi_cdb *)(pkt)->pkt_cdbp)->scc_lun = \
_lun; \
} \
}
/*
* get an available contig mem header, cp.
* when big_enough is true, we will return NULL, if no big enough
* contig mem is found.
* when big_enough is false, we will try to find cp containing big
* enough contig mem. if not found, we will ruturn the last cp available.
*
* used by st_get_contig_mem()
*/
#define ST_GET_CONTIG_MEM_HEAD(un, cp, len, big_enough) { \
struct contig_mem *tmp_cp = NULL; \
for ((cp) = (un)->un_contig_mem; \
(cp) != NULL; \
tmp_cp = (cp), (cp) = (cp)->cm_next) { \
if (((cp)->cm_len >= (len)) || \
(!(big_enough) && ((cp)->cm_next == NULL))) { \
if (tmp_cp == NULL) { \
(un)->un_contig_mem = (cp)->cm_next; \
} else { \
tmp_cp->cm_next = (cp)->cm_next; \
} \
(cp)->cm_next = NULL; \
(un)->un_contig_mem_available_num--; \
break; \
} \
} \
}
#define ST_NUM_MEMBERS(array) (sizeof (array) / sizeof (array[0]))
#define COPY_POS(dest, source) bcopy(source, dest, sizeof (tapepos_t))
#define ISALNUM(byte) \
(((byte) >= 'a' && (byte) <= 'z') || \
((byte) >= 'A' && (byte) <= 'Z') || \
((byte) >= '0' && (byte) <= '9'))
#define ONE_K 1024
#define MAX_SPACE_CNT(cnt) if (cnt >= 0) { \
if (cnt > MIN(SP_CNT_MASK, INT32_MAX)) \
return (EINVAL); \
} else { \
if (-(cnt) > MIN(SP_CNT_MASK, INT32_MAX)) \
return (EINVAL); \
} \
/*
* Global External Data Definitions
*/
extern struct scsi_key_strings scsi_cmds[];
extern uchar_t scsi_cdb_size[];
/*
* Local Static Data
*/
static void *st_state;
static char *const st_label = "st";
static volatile int st_recov_sz = sizeof (recov_info);
static const char mp_misconf[] = {
"St Tape is misconfigured, MPxIO enabled and "
"tape-command-recovery-disable set in st.conf\n"
};
#ifdef __x86
/*
* We need to use below DMA attr to alloc physically contiguous
* memory to do I/O in big block size
*/
static ddi_dma_attr_t st_contig_mem_dma_attr = {
DMA_ATTR_V0, /* version number */
0x0, /* lowest usable address */
0xFFFFFFFFull, /* high DMA address range */
0xFFFFFFFFull, /* DMA counter register */
1, /* DMA address alignment */
1, /* DMA burstsizes */
1, /* min effective DMA size */
0xFFFFFFFFull, /* max DMA xfer size */
0xFFFFFFFFull, /* segment boundary */
1, /* s/g list length */
1, /* granularity of device */
0 /* DMA transfer flags */
};
static ddi_device_acc_attr_t st_acc_attr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
DDI_STRICTORDER_ACC
};
/* set limitation for the number of contig_mem */
static int st_max_contig_mem_num = ST_MAX_CONTIG_MEM_NUM;
#endif
/*
* Tunable parameters
*
* DISCLAIMER
* ----------
* These parameters are intended for use only in system testing; if you use
* them in production systems, you do so at your own risk. Altering any
* variable not listed below may cause unpredictable system behavior.
*
* st_check_media_time
*
* Three second state check
*
* st_allow_large_xfer
*
* Gated with ST_NO_RECSIZE_LIMIT
*
* 0 - Transfers larger than 64KB will not be allowed
* regardless of the setting of ST_NO_RECSIZE_LIMIT
* 1 - Transfers larger than 64KB will be allowed
* if ST_NO_RECSIZE_LIMIT is TRUE for the drive
*
* st_report_soft_errors_on_close
*
* Gated with ST_SOFT_ERROR_REPORTING
*
* 0 - Errors will not be reported on close regardless
* of the setting of ST_SOFT_ERROR_REPORTING
*
* 1 - Errors will be reported on close if
* ST_SOFT_ERROR_REPORTING is TRUE for the drive
*/
static int st_selection_retry_count = ST_SEL_RETRY_COUNT;
static int st_retry_count = ST_RETRY_COUNT;
static int st_io_time = ST_IO_TIME;
static int st_long_timeout_x = ST_LONG_TIMEOUT_X;
static int st_space_time = ST_SPACE_TIME;
static int st_long_space_time_x = ST_LONG_SPACE_TIME_X;
static int st_error_level = SCSI_ERR_RETRYABLE;
static int st_check_media_time = 3000000; /* 3 Second State Check */
static int st_max_throttle = ST_MAX_THROTTLE;
static clock_t st_wait_cmds_complete = ST_WAIT_CMDS_COMPLETE;
static int st_allow_large_xfer = 1;
static int st_report_soft_errors_on_close = 1;
/*
* End of tunable parameters list
*/
/*
* Asynchronous I/O and persistent errors, refer to PSARC/1995/228
*
* Asynchronous I/O's main offering is that it is a non-blocking way to do
* reads and writes. The driver will queue up all the requests it gets and
* have them ready to transport to the HBA. Unfortunately, we cannot always
* just ship the I/O requests to the HBA, as there errors and exceptions
* that may happen when we don't want the HBA to continue. Therein comes
* the flush-on-errors capability. If the HBA supports it, then st will
* send in st_max_throttle I/O requests at the same time.
*
* Persistent errors : This was also reasonably simple. In the interrupt
* routines, if there was an error or exception (FM, LEOT, media error,
* transport error), the persistent error bits are set and shuts everything
* down, but setting the throttle to zero. If we hit and exception in the
* HBA, and flush-on-errors were set, we wait for all outstanding I/O's to
* come back (with CMD_ABORTED), then flush all bp's in the wait queue with
* the appropriate error, and this will preserve order. Of course, depending
* on the exception we have to show a zero read or write before we show
* errors back to the application.
*/
extern const int st_ndrivetypes; /* defined in st_conf.c */
extern const struct st_drivetype st_drivetypes[];
extern const char st_conf_version[];
#ifdef STDEBUG
static int st_soft_error_report_debug = 0;
volatile int st_debug = 0;
static volatile dev_info_t *st_lastdev;
static kmutex_t st_debug_mutex;
#endif
#define ST_MT02_NAME "Emulex MT02 QIC-11/24 "
static const struct vid_drivetype {
char *vid;
char type;
} st_vid_dt[] = {
{"LTO-CVE ", MT_LTO},
{"QUANTUM ", MT_ISDLT},
{"SONY ", MT_ISAIT},
{"STK ", MT_ISSTK9840}
};
static const struct driver_minor_data {
char *name;
int minor;
} st_minor_data[] = {
/*
* The top 4 entries are for the default densities,
* don't alter their position.
*/
{"", 0},
{"n", MT_NOREWIND},
{"b", MT_BSD},
{"bn", MT_NOREWIND | MT_BSD},
{"l", MT_DENSITY1},
{"m", MT_DENSITY2},
{"h", MT_DENSITY3},
{"c", MT_DENSITY4},
{"u", MT_DENSITY4},
{"ln", MT_DENSITY1 | MT_NOREWIND},
{"mn", MT_DENSITY2 | MT_NOREWIND},
{"hn", MT_DENSITY3 | MT_NOREWIND},
{"cn", MT_DENSITY4 | MT_NOREWIND},
{"un", MT_DENSITY4 | MT_NOREWIND},
{"lb", MT_DENSITY1 | MT_BSD},
{"mb", MT_DENSITY2 | MT_BSD},
{"hb", MT_DENSITY3 | MT_BSD},
{"cb", MT_DENSITY4 | MT_BSD},
{"ub", MT_DENSITY4 | MT_BSD},
{"lbn", MT_DENSITY1 | MT_NOREWIND | MT_BSD},
{"mbn", MT_DENSITY2 | MT_NOREWIND | MT_BSD},
{"hbn", MT_DENSITY3 | MT_NOREWIND | MT_BSD},
{"cbn", MT_DENSITY4 | MT_NOREWIND | MT_BSD},
{"ubn", MT_DENSITY4 | MT_NOREWIND | MT_BSD}
};
/* strings used in many debug and warning messages */
static const char wr_str[] = "write";
static const char rd_str[] = "read";
static const char wrg_str[] = "writing";
static const char rdg_str[] = "reading";
static const char *space_strs[] = {
"records",
"filemarks",
"sequential filemarks",
"eod",
"setmarks",
"sequential setmarks",
"Reserved",
"Reserved"
};
static const char *load_strs[] = {
"unload", /* LD_UNLOAD 0 */
"load", /* LD_LOAD 1 */
"retension", /* LD_RETEN 2 */
"load reten", /* LD_LOAD | LD_RETEN 3 */
"eod", /* LD_EOT 4 */
"load EOD", /* LD_LOAD | LD_EOT 5 */
"reten EOD", /* LD_RETEN | LD_EOT 6 */
"load reten EOD" /* LD_LOAD|LD_RETEN|LD_EOT 7 */
"hold", /* LD_HOLD 8 */
"load and hold" /* LD_LOAD | LD_HOLD 9 */
};
static const char *errstatenames[] = {
"COMMAND_DONE",
"COMMAND_DONE_ERROR",
"COMMAND_DONE_ERROR_RECOVERED",
"QUE_COMMAND",
"QUE_BUSY_COMMAND",
"QUE_SENSE",
"JUST_RETURN",
"COMMAND_DONE_EACCES",
"QUE_LAST_COMMAND",
"COMMAND_TIMEOUT",
"PATH_FAILED",
"DEVICE_RESET",
"DEVICE_TAMPER",
"ATTEMPT_RETRY"
};
const char *bogusID = "Unknown Media ID";
/* default density offsets in the table above */
#define DEF_BLANK 0
#define DEF_NOREWIND 1
#define DEF_BSD 2
#define DEF_BSD_NR 3
/* Sense Key, ASC/ASCQ for which tape ejection is needed */
static struct tape_failure_code {
uchar_t key;
uchar_t add_code;
uchar_t qual_code;
} st_tape_failure_code[] = {
{ KEY_HARDWARE_ERROR, 0x15, 0x01},
{ KEY_HARDWARE_ERROR, 0x44, 0x00},
{ KEY_HARDWARE_ERROR, 0x53, 0x00},
{ KEY_HARDWARE_ERROR, 0x53, 0x01},
{ KEY_NOT_READY, 0x53, 0x00},
{ 0xff}
};
/* clean bit position and mask */
static struct cln_bit_position {
ushort_t cln_bit_byte;
uchar_t cln_bit_mask;
} st_cln_bit_position[] = {
{ 21, 0x08},
{ 70, 0xc0},
{ 18, 0x81} /* 80 bit indicates in bit mode, 1 bit clean light is on */
};
/*
* architecture dependent allocation restrictions. For x86, we'll set
* dma_attr_addr_hi to st_max_phys_addr and dma_attr_sgllen to
* st_sgl_size during _init().
*/
#if defined(__sparc)
static ddi_dma_attr_t st_alloc_attr = {
DMA_ATTR_V0, /* version number */
0x0, /* lowest usable address */
0xFFFFFFFFull, /* high DMA address range */
0xFFFFFFFFull, /* DMA counter register */
1, /* DMA address alignment */
1, /* DMA burstsizes */
1, /* min effective DMA size */
0xFFFFFFFFull, /* max DMA xfer size */
0xFFFFFFFFull, /* segment boundary */
1, /* s/g list length */
512, /* granularity of device */
0 /* DMA transfer flags */
};
#elif defined(__x86)
static ddi_dma_attr_t st_alloc_attr = {
DMA_ATTR_V0, /* version number */
0x0, /* lowest usable address */
0x0, /* high DMA address range [set in _init()] */
0xFFFFull, /* DMA counter register */
512, /* DMA address alignment */
1, /* DMA burstsizes */
1, /* min effective DMA size */
0xFFFFFFFFull, /* max DMA xfer size */
0xFFFFFFFFull, /* segment boundary */
0, /* s/g list length */
512, /* granularity of device [set in _init()] */
0 /* DMA transfer flags */
};
uint64_t st_max_phys_addr = 0xFFFFFFFFull;
int st_sgl_size = 0xF;
#endif
/*
* Configuration Data:
*
* Device driver ops vector
*/
static int st_aread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
static int st_awrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
static int st_read(dev_t dev, struct uio *uio_p, cred_t *cred_p);
static int st_write(dev_t dev, struct uio *uio_p, cred_t *cred_p);
static int st_open(dev_t *devp, int flag, int otyp, cred_t *cred_p);
static int st_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
static int st_strategy(struct buf *bp);
static int st_queued_strategy(buf_t *bp);
static int st_ioctl(dev_t dev, int cmd, intptr_t arg, int flag,
cred_t *cred_p, int *rval_p);
extern int nulldev(), nodev();
static struct cb_ops st_cb_ops = {
st_open, /* open */
st_close, /* close */
st_queued_strategy, /* strategy Not Block device but async checks */
nodev, /* print */
nodev, /* dump */
st_read, /* read */
st_write, /* write */
st_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* cb_prop_op */
0, /* streamtab */
D_64BIT | D_MP | D_NEW | D_HOTPLUG |
D_OPEN_RETURNS_EINTR, /* cb_flag */
CB_REV, /* cb_rev */
st_aread, /* async I/O read entry point */
st_awrite /* async I/O write entry point */
};
static int st_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
void **result);
static int st_probe(dev_info_t *dev);
static int st_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
static int st_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
static struct dev_ops st_ops = {
DEVO_REV, /* devo_rev, */
0, /* refcnt */
st_info, /* info */
nulldev, /* identify */
st_probe, /* probe */
st_attach, /* attach */
st_detach, /* detach */
nodev, /* reset */
&st_cb_ops, /* driver operations */
(struct bus_ops *)0, /* bus operations */
nulldev, /* power */
ddi_quiesce_not_needed, /* devo_quiesce */
};
/*
* Local Function Declarations
*/
static char *st_print_scsi_cmd(char cmd);
static void st_print_cdb(dev_info_t *dip, char *label, uint_t level,
char *title, char *cdb);
static void st_clean_print(dev_info_t *dev, char *label, uint_t level,
char *title, char *data, int len);
static int st_doattach(struct scsi_device *devp, int (*canwait)());
static void st_known_tape_type(struct scsi_tape *un);
static int st_get_conf_from_st_dot_conf(struct scsi_tape *, char *,
struct st_drivetype *);
static int st_get_conf_from_st_conf_dot_c(struct scsi_tape *, char *,
struct st_drivetype *);
static int st_get_conf_from_tape_drive(struct scsi_tape *, char *,
struct st_drivetype *);
static int st_get_densities_from_tape_drive(struct scsi_tape *,
struct st_drivetype *);
static int st_get_timeout_values_from_tape_drive(struct scsi_tape *,
struct st_drivetype *);
static int st_get_timeouts_value(struct scsi_tape *, uchar_t, ushort_t *,
ushort_t);
static int st_get_default_conf(struct scsi_tape *, char *,
struct st_drivetype *);
static int st_rw(dev_t dev, struct uio *uio, int flag);
static int st_arw(dev_t dev, struct aio_req *aio, int flag);
static int st_find_eod(struct scsi_tape *un);
static int st_check_density_or_wfm(dev_t dev, int wfm, int mode, int stepflag);
static int st_uscsi_cmd(struct scsi_tape *un, struct uscsi_cmd *, int flag);
static int st_mtioctop(struct scsi_tape *un, intptr_t arg, int flag);
static int st_mtiocltop(struct scsi_tape *un, intptr_t arg, int flag);
static int st_do_mtioctop(struct scsi_tape *un, struct mtlop *mtop);
static void st_start(struct scsi_tape *un);
static int st_handle_start_busy(struct scsi_tape *un, struct buf *bp,
clock_t timeout_interval, int queued);
static int st_handle_intr_busy(struct scsi_tape *un, struct buf *bp,
clock_t timeout_interval);
static int st_handle_intr_retry_lcmd(struct scsi_tape *un, struct buf *bp);
static void st_done_and_mutex_exit(struct scsi_tape *un, struct buf *bp);
static void st_init(struct scsi_tape *un);
static void st_make_cmd(struct scsi_tape *un, struct buf *bp,
int (*func)(caddr_t));
static void st_make_uscsi_cmd(struct scsi_tape *, struct uscsi_cmd *,
struct buf *bp, int (*func)(caddr_t));
static void st_intr(struct scsi_pkt *pkt);
static void st_set_state(struct scsi_tape *un, buf_t *bp);
static void st_test_append(struct buf *bp);
static int st_runout(caddr_t);
static int st_cmd(struct scsi_tape *un, int com, int64_t count, int wait);
static int st_setup_cmd(struct scsi_tape *un, buf_t *bp, int com,
int64_t count);
static int st_set_compression(struct scsi_tape *un);
static int st_write_fm(dev_t dev, int wfm);
static int st_determine_generic(struct scsi_tape *un);
static int st_determine_density(struct scsi_tape *un, int rw);
static int st_get_density(struct scsi_tape *un);
static int st_set_density(struct scsi_tape *un);
static int st_loadtape(struct scsi_tape *un);
static int st_modesense(struct scsi_tape *un);
static int st_modeselect(struct scsi_tape *un);
static errstate st_handle_incomplete(struct scsi_tape *un, struct buf *bp);
static int st_wrongtapetype(struct scsi_tape *un);
static errstate st_check_error(struct scsi_tape *un, struct scsi_pkt *pkt);
static errstate st_handle_sense(struct scsi_tape *un, struct buf *bp,
tapepos_t *);
static errstate st_handle_autosense(struct scsi_tape *un, struct buf *bp,
tapepos_t *);
static int st_get_error_entry(struct scsi_tape *un, intptr_t arg, int flag);
static void st_update_error_stack(struct scsi_tape *un, struct scsi_pkt *pkt,
struct scsi_arq_status *cmd);
static void st_empty_error_stack(struct scsi_tape *un);
static errstate st_decode_sense(struct scsi_tape *un, struct buf *bp, int amt,
struct scsi_arq_status *, tapepos_t *);
static int st_report_soft_errors(dev_t dev, int flag);
static void st_delayed_cv_broadcast(void *arg);
static int st_check_media(dev_t dev, enum mtio_state state);
static int st_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
static void st_intr_restart(void *arg);
static void st_start_restart(void *arg);
static int st_gen_mode_sense(struct scsi_tape *un, ubufunc_t ubf, int page,
struct seq_mode *page_data, int page_size);
static int st_change_block_size(struct scsi_tape *un, uint32_t nblksz);
static int st_gen_mode_select(struct scsi_tape *un, ubufunc_t ubf,
struct seq_mode *page_data, int page_size);
static int st_read_block_limits(struct scsi_tape *un,
struct read_blklim *read_blk);
static int st_report_density_support(struct scsi_tape *un,
uchar_t *density_data, size_t buflen);
static int st_report_supported_operation(struct scsi_tape *un,
uchar_t *oper_data, uchar_t option_code, ushort_t service_action);
static int st_tape_init(struct scsi_tape *un);
static void st_flush(struct scsi_tape *un);
static void st_set_pe_errno(struct scsi_tape *un);
static void st_hba_unflush(struct scsi_tape *un);
static void st_turn_pe_on(struct scsi_tape *un);
static void st_turn_pe_off(struct scsi_tape *un);
static void st_set_pe_flag(struct scsi_tape *un);
static void st_clear_pe(struct scsi_tape *un);
static void st_wait_for_io(struct scsi_tape *un);
static int st_set_devconfig_page(struct scsi_tape *un, int compression_on);
static int st_set_datacomp_page(struct scsi_tape *un, int compression_on);
static int st_reserve_release(struct scsi_tape *un, int command, ubufunc_t ubf);
static int st_check_cdb_for_need_to_reserve(struct scsi_tape *un, uchar_t *cdb);
static int st_check_cmd_for_need_to_reserve(struct scsi_tape *un, uchar_t cmd,
int count);
static int st_take_ownership(struct scsi_tape *un, ubufunc_t ubf);
static int st_check_asc_ascq(struct scsi_tape *un);
static int st_check_clean_bit(struct scsi_tape *un);
static int st_check_alert_flags(struct scsi_tape *un);
static int st_check_sequential_clean_bit(struct scsi_tape *un);
static int st_check_sense_clean_bit(struct scsi_tape *un);
static int st_clear_unit_attentions(dev_t dev_instance, int max_trys);
static void st_calculate_timeouts(struct scsi_tape *un);
static writablity st_is_drive_worm(struct scsi_tape *un);
static int st_read_attributes(struct scsi_tape *un, uint16_t attribute,
void *buf, size_t size, ubufunc_t bufunc);
static int st_get_special_inquiry(struct scsi_tape *un, uchar_t size,
caddr_t dest, uchar_t page);
static int st_update_block_pos(struct scsi_tape *un, bufunc_t bf,
int post_space);
static int st_interpret_read_pos(struct scsi_tape const *un, tapepos_t *dest,
read_p_types type, size_t data_sz, const caddr_t responce, int post_space);
static int st_get_read_pos(struct scsi_tape *un, buf_t *bp);
static int st_logical_block_locate(struct scsi_tape *un, ubufunc_t ubf,
tapepos_t *pos, uint64_t lblk, uchar_t partition);
static int st_mtfsf_ioctl(struct scsi_tape *un, int64_t files);
static int st_mtfsr_ioctl(struct scsi_tape *un, int64_t count);
static int st_mtbsf_ioctl(struct scsi_tape *un, int64_t files);
static int st_mtnbsf_ioctl(struct scsi_tape *un, int64_t count);
static int st_mtbsr_ioctl(struct scsi_tape *un, int64_t num);
static int st_mtfsfm_ioctl(struct scsi_tape *un, int64_t cnt);
static int st_mtbsfm_ioctl(struct scsi_tape *un, int64_t cnt);
static int st_backward_space_files(struct scsi_tape *un, int64_t count,
int infront);
static int st_forward_space_files(struct scsi_tape *un, int64_t files);
static int st_scenic_route_to_begining_of_file(struct scsi_tape *un,
int32_t fileno);
static int st_space_to_begining_of_file(struct scsi_tape *un);
static int st_space_records(struct scsi_tape *un, int64_t records);
static int st_get_media_identification(struct scsi_tape *un, ubufunc_t bufunc);
static errstate st_command_recovery(struct scsi_tape *un, struct scsi_pkt *pkt,
errstate onentry);
static void st_recover(void *arg);
static void st_recov_cb(struct scsi_pkt *pkt);
static int st_rcmd(struct scsi_tape *un, int com, int64_t count, int wait);
static int st_uscsi_rcmd(struct scsi_tape *un, struct uscsi_cmd *ucmd,
int flag);
static void st_add_recovery_info_to_pkt(struct scsi_tape *un, buf_t *bp,
struct scsi_pkt *cmd);
static int st_check_mode_for_change(struct scsi_tape *un, ubufunc_t ubf);
static int st_test_path_to_device(struct scsi_tape *un);
static int st_recovery_read_pos(struct scsi_tape *un, read_p_types type,
read_pos_data_t *raw);
static int st_recovery_get_position(struct scsi_tape *un, tapepos_t *read,
read_pos_data_t *raw);
static int st_compare_expected_position(struct scsi_tape *un, st_err_info *ei,
cmd_attribute const * cmd_att, tapepos_t *read);
static errstate st_recover_reissue_pkt(struct scsi_tape *us,
struct scsi_pkt *pkt);
static int st_transport(struct scsi_tape *un, struct scsi_pkt *pkt);
static buf_t *st_remove_from_queue(buf_t **head, buf_t **tail, buf_t *bp);
static void st_add_to_queue(buf_t **head, buf_t **tail, buf_t *end, buf_t *bp);
static int st_reset(struct scsi_tape *un, int reset_type);
static void st_reset_notification(caddr_t arg);
static const cmd_attribute *st_lookup_cmd_attribute(unsigned char cmd);
static int st_set_target_TLR_mode(struct scsi_tape *un, ubufunc_t ubf);
static int st_make_sure_mode_data_is_correct(struct scsi_tape *un,
ubufunc_t ubf);
#ifdef __x86
/*
* routines for I/O in big block size
*/
static void st_release_contig_mem(struct scsi_tape *un, struct contig_mem *cp);
static struct contig_mem *st_get_contig_mem(struct scsi_tape *un, size_t len,
int alloc_flags);
static int st_bigblk_xfer_done(struct buf *bp);
static struct buf *st_get_bigblk_bp(struct buf *bp);
#endif
static void st_print_position(dev_info_t *dev, char *label, uint_t level,
const char *comment, tapepos_t *pos);
/*
* error statistics create/update functions
*/
static int st_create_errstats(struct scsi_tape *, int);
static int st_validate_tapemarks(struct scsi_tape *un, ubufunc_t ubf,
tapepos_t *pos);
#ifdef STDEBUG
static void st_debug_cmds(struct scsi_tape *un, int com, int count, int wait);
#endif /* STDEBUG */
static char *st_dev_name(dev_t dev);
#if !defined(lint)
_NOTE(SCHEME_PROTECTS_DATA("unique per pkt",
scsi_pkt buf uio scsi_cdb uscsi_cmd))
_NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_extended_sense scsi_status))
_NOTE(SCHEME_PROTECTS_DATA("unique per pkt", recov_info))
_NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device))
_NOTE(DATA_READABLE_WITHOUT_LOCK(st_drivetype scsi_address))
#endif
/*
* autoconfiguration routines.
*/
static struct modldrv modldrv = {
&mod_driverops, /* Type of module. This one is a driver */
"SCSI tape Driver", /* Name of the module. */
&st_ops /* driver ops */
};
static struct modlinkage modlinkage = {
MODREV_1, &modldrv, NULL
};
/*
* Notes on Post Reset Behavior in the tape driver:
*
* When the tape drive is opened, the driver attempts to make sure that
* the tape head is positioned exactly where it was left when it was last
* closed provided the medium is not changed. If the tape drive is
* opened in O_NDELAY mode, the repositioning (if necessary for any loss
* of position due to reset) will happen when the first tape operation or
* I/O occurs. The repositioning (if required) may not be possible under
* certain situations such as when the device firmware not able to report
* the medium change in the REQUEST SENSE data because of a reset or a
* misbehaving bus not allowing the reposition to happen. In such
* extraordinary situations, where the driver fails to position the head
* at its original position, it will fail the open the first time, to
* save the applications from overwriting the data. All further attempts
* to open the tape device will result in the driver attempting to load
* the tape at BOT (beginning of tape). Also a warning message to
* indicate that further attempts to open the tape device may result in
* the tape being loaded at BOT will be printed on the console. If the
* tape device is opened in O_NDELAY mode, failure to restore the
* original tape head position, will result in the failure of the first
* tape operation or I/O, Further, the driver will invalidate its
* internal tape position which will necessitate the applications to
* validate the position by using either a tape positioning ioctl (such
* as MTREW) or closing and reopening the tape device.
*
*/
int
_init(void)
{
int e;
if (((e = ddi_soft_state_init(&st_state,
sizeof (struct scsi_tape), ST_MAXUNIT)) != 0)) {
return (e);
}
if ((e = mod_install(&modlinkage)) != 0) {
ddi_soft_state_fini(&st_state);
} else {
#ifdef STDEBUG
mutex_init(&st_debug_mutex, NULL, MUTEX_DRIVER, NULL);
#endif
#if defined(__x86)
/* set the max physical address for iob allocs on x86 */
st_alloc_attr.dma_attr_addr_hi = st_max_phys_addr;
/*
* set the sgllen for iob allocs on x86. If this is set less
* than the number of pages the buffer will take
* (taking into account alignment), it would force the
* allocator to try and allocate contiguous pages.
*/
st_alloc_attr.dma_attr_sgllen = st_sgl_size;
#endif
}
return (e);
}
int
_fini(void)
{
int e;
if ((e = mod_remove(&modlinkage)) != 0) {
return (e);
}
#ifdef STDEBUG
mutex_destroy(&st_debug_mutex);
#endif
ddi_soft_state_fini(&st_state);
return (e);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
static int
st_probe(dev_info_t *devi)
{
int instance;
struct scsi_device *devp;
int rval;
#if !defined(__sparc)
char *tape_prop;
int tape_prop_len;
#endif
ST_ENTR(devi, st_probe);
/* If self identifying device */
if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
return (DDI_PROBE_DONTCARE);
}
#if !defined(__sparc)
/*
* Since some x86 HBAs have devnodes that look like SCSI as
* far as we can tell but aren't really SCSI (DADK, like mlx)
* we check for the presence of the "tape" property.
*/
if (ddi_prop_op(DDI_DEV_T_NONE, devi, PROP_LEN_AND_VAL_ALLOC,
DDI_PROP_CANSLEEP, "tape",
(caddr_t)&tape_prop, &tape_prop_len) != DDI_PROP_SUCCESS) {
return (DDI_PROBE_FAILURE);
}
if (strncmp(tape_prop, "sctp", tape_prop_len) != 0) {
kmem_free(tape_prop, tape_prop_len);
return (DDI_PROBE_FAILURE);
}
kmem_free(tape_prop, tape_prop_len);
#endif
devp = ddi_get_driver_private(devi);
instance = ddi_get_instance(devi);
if (ddi_get_soft_state(st_state, instance) != NULL) {
return (DDI_PROBE_PARTIAL);
}
/*
* Turn around and call probe routine to see whether
* we actually have a tape at this SCSI nexus.
*/
if (scsi_probe(devp, NULL_FUNC) == SCSIPROBE_EXISTS) {
/*
* In checking the whole inq_dtype byte we are looking at both
* the Peripheral Qualifier and the Peripheral Device Type.
* For this driver we are only interested in sequential devices
* that are connected or capable if connecting to this logical
* unit.
*/
if (devp->sd_inq->inq_dtype ==
(DTYPE_SEQUENTIAL | DPQ_POSSIBLE)) {
ST_DEBUG6(devi, st_label, SCSI_DEBUG,
"probe exists\n");
rval = DDI_PROBE_SUCCESS;
} else {
rval = DDI_PROBE_FAILURE;
}
} else {
ST_DEBUG6(devi, st_label, SCSI_DEBUG,
"probe failure: nothing there\n");
rval = DDI_PROBE_FAILURE;
}
scsi_unprobe(devp);
return (rval);
}
static int
st_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
int instance;
int wide;
int dev_instance;
int ret_status;
struct scsi_device *devp;
int node_ix;
struct scsi_tape *un;
ST_ENTR(devi, st_attach);
devp = ddi_get_driver_private(devi);
instance = ddi_get_instance(devi);
switch (cmd) {
case DDI_ATTACH:
if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
"tape-command-recovery-disable", 0) != 0) {
st_recov_sz = sizeof (pkt_info);
}
if (st_doattach(devp, SLEEP_FUNC) == DDI_FAILURE) {
return (DDI_FAILURE);
}
break;
case DDI_RESUME:
/*
* Suspend/Resume
*
* When the driver suspended, there might be
* outstanding cmds and therefore we need to
* reset the suspended flag and resume the scsi
* watch thread and restart commands and timeouts
*/
if (!(un = ddi_get_soft_state(st_state, instance))) {
return (DDI_FAILURE);
}
dev_instance = ((un->un_dev == 0) ? MTMINOR(instance) :
un->un_dev);
mutex_enter(ST_MUTEX);
un->un_throttle = un->un_max_throttle;
un->un_tids_at_suspend = 0;
un->un_pwr_mgmt = ST_PWR_NORMAL;
if (un->un_swr_token) {
scsi_watch_resume(un->un_swr_token);
}
/*
* Restart timeouts
*/
if ((un->un_tids_at_suspend & ST_DELAY_TID) != 0) {
mutex_exit(ST_MUTEX);
un->un_delay_tid = timeout(
st_delayed_cv_broadcast, un,
drv_usectohz((clock_t)
MEDIA_ACCESS_DELAY));
mutex_enter(ST_MUTEX);
}
if (un->un_tids_at_suspend & ST_HIB_TID) {
mutex_exit(ST_MUTEX);
un->un_hib_tid = timeout(st_intr_restart, un,
ST_STATUS_BUSY_TIMEOUT);
mutex_enter(ST_MUTEX);
}
ret_status = st_clear_unit_attentions(dev_instance, 5);
/*
* now check if we need to restore the tape position
*/
if ((un->un_suspend_pos.pmode != invalid) &&
((un->un_suspend_pos.fileno > 0) ||
(un->un_suspend_pos.blkno > 0)) ||
(un->un_suspend_pos.lgclblkno > 0)) {
if (ret_status != 0) {
/*
* tape didn't get good TUR
* just print out error messages
*/
scsi_log(ST_DEVINFO, st_label, CE_WARN,
"st_attach-RESUME: tape failure "
" tape position will be lost");
} else {
/* this prints errors */
(void) st_validate_tapemarks(un,
st_uscsi_cmd, &un->un_suspend_pos);
}
/*
* there are no retries, if there is an error
* we don't know if the tape has changed
*/
un->un_suspend_pos.pmode = invalid;
}
/* now we are ready to start up any queued I/Os */
if (un->un_ncmds || un->un_quef) {
st_start(un);
}
cv_broadcast(&un->un_suspend_cv);
mutex_exit(ST_MUTEX);
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
un = ddi_get_soft_state(st_state, instance);
ST_DEBUG(devi, st_label, SCSI_DEBUG,
"st_attach: instance=%x\n", instance);
/*
* Add a zero-length attribute to tell the world we support
* kernel ioctls (for layered drivers)
*/
(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
DDI_KERNEL_IOCTL, NULL, 0);
ddi_report_dev((dev_info_t *)devi);
/*
* If it's a SCSI-2 tape drive which supports wide,
* tell the host adapter to use wide.
*/
wide = ((devp->sd_inq->inq_rdf == RDF_SCSI2) &&
(devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) ? 1 : 0;
if (scsi_ifsetcap(ROUTE, "wide-xfer", wide, 1) == 1) {
ST_DEBUG(devi, st_label, SCSI_DEBUG,
"Wide Transfer %s\n", wide ? "enabled" : "disabled");
}
/*
* enable autorequest sense; keep the rq packet around in case
* the autorequest sense fails because of a busy condition
* do a getcap first in case the capability is not variable
*/
if (scsi_ifgetcap(ROUTE, "auto-rqsense", 1) == 1) {
un->un_arq_enabled = 1;
} else {
un->un_arq_enabled =
((scsi_ifsetcap(ROUTE, "auto-rqsense", 1, 1) == 1) ? 1 : 0);
}
ST_DEBUG(devi, st_label, SCSI_DEBUG, "auto request sense %s\n",
(un->un_arq_enabled ? "enabled" : "disabled"));
un->un_untagged_qing =
(scsi_ifgetcap(ROUTE, "untagged-qing", 0) == 1);
/*
* XXX - This is just for 2.6. to tell users that write buffering
* has gone away.
*/
if (un->un_arq_enabled && un->un_untagged_qing) {
if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
"tape-driver-buffering", 0) != 0) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"Write Data Buffering has been depricated. Your "
"applications should continue to work normally.\n"
" But, they should ported to use Asynchronous "
" I/O\n"
" For more information, read about "
" tape-driver-buffering "
"property in the st(7d) man page\n");
}
}
un->un_max_throttle = un->un_throttle = un->un_last_throttle = 1;
un->un_flush_on_errors = 0;
un->un_mkr_pkt = (struct scsi_pkt *)NULL;
ST_DEBUG(devi, st_label, SCSI_DEBUG,
"throttle=%x, max_throttle = %x\n",
un->un_throttle, un->un_max_throttle);
/* initialize persistent errors to nil */
un->un_persistence = 0;
un->un_persist_errors = 0;
/*
* Get dma-max from HBA driver. If it is not defined, use 64k
*/
un->un_maxdma = scsi_ifgetcap(&devp->sd_address, "dma-max", 1);
if (un->un_maxdma == -1) {
ST_DEBUG(devi, st_label, SCSI_DEBUG,
"Received a value that looked like -1. Using 64k maxdma");
un->un_maxdma = (64 * ONE_K);
}
#ifdef __x86
/*
* for x86, the device may be able to DMA more than the system will
* allow under some circumstances. We need account for both the HBA's
* and system's contraints.
*
* Get the maximum DMA under worse case conditions. e.g. looking at the
* device constraints, the max copy buffer size, and the worse case
* fragmentation. NOTE: this may differ from dma-max since dma-max
* doesn't take the worse case framentation into account.
*
* e.g. a device may be able to DMA 16MBytes, but can only DMA 1MByte
* if none of the pages are contiguous. Keeping track of both of these
* values allows us to support larger tape block sizes on some devices.
*/
un->un_maxdma_arch = scsi_ifgetcap(&devp->sd_address, "dma-max-arch",
1);
/*
* If the dma-max-arch capability is not implemented, or the value
* comes back higher than what was reported in dma-max, use dma-max.
*/
if ((un->un_maxdma_arch == -1) ||
((uint_t)un->un_maxdma < (uint_t)un->un_maxdma_arch)) {
un->un_maxdma_arch = un->un_maxdma;
}
#endif
/*
* Get the max allowable cdb size
*/
un->un_max_cdb_sz =
scsi_ifgetcap(&devp->sd_address, "max-cdb-length", 1);
if (un->un_max_cdb_sz < CDB_GROUP0) {
ST_DEBUG(devi, st_label, SCSI_DEBUG,
"HBA reported max-cdb-length as %d\n", un->un_max_cdb_sz);
un->un_max_cdb_sz = CDB_GROUP4; /* optimistic default */
}
if (strcmp(ddi_driver_name(ddi_get_parent(ST_DEVINFO)), "scsi_vhci")) {
un->un_multipath = 0;
} else {
un->un_multipath = 1;
}
un->un_maxbsize = MAXBSIZE_UNKNOWN;
un->un_mediastate = MTIO_NONE;
un->un_HeadClean = TAPE_ALERT_SUPPORT_UNKNOWN;
/*
* initialize kstats
*/
un->un_stats = kstat_create("st", instance, NULL, "tape",
KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
if (un->un_stats) {
un->un_stats->ks_lock = ST_MUTEX;
kstat_install(un->un_stats);
}
(void) st_create_errstats(un, instance);
/*
* find the drive type for this target
*/
mutex_enter(ST_MUTEX);
un->un_dev = MTMINOR(instance);
st_known_tape_type(un);
un->un_dev = 0;
mutex_exit(ST_MUTEX);
for (node_ix = 0; node_ix < ST_NUM_MEMBERS(st_minor_data); node_ix++) {
int minor;
char *name;
name = st_minor_data[node_ix].name;
minor = st_minor_data[node_ix].minor;
/*
* For default devices set the density to the
* preferred default density for this device.
*/
if (node_ix <= DEF_BSD_NR) {
minor |= un->un_dp->default_density;
}
minor |= MTMINOR(instance);
if (ddi_create_minor_node(devi, name, S_IFCHR, minor,
DDI_NT_TAPE, NULL) == DDI_SUCCESS) {
continue;
}
ddi_remove_minor_node(devi, NULL);
(void) scsi_reset_notify(ROUTE, SCSI_RESET_CANCEL,
st_reset_notification, (caddr_t)un);
cv_destroy(&un->un_clscv);
cv_destroy(&un->un_sbuf_cv);
cv_destroy(&un->un_queue_cv);
cv_destroy(&un->un_state_cv);
#ifdef __x86
cv_destroy(&un->un_contig_mem_cv);
#endif
cv_destroy(&un->un_suspend_cv);
cv_destroy(&un->un_tape_busy_cv);
cv_destroy(&un->un_recov_buf_cv);
if (un->un_recov_taskq) {
ddi_taskq_destroy(un->un_recov_taskq);
}
if (un->un_sbufp) {
freerbuf(un->un_sbufp);
}
if (un->un_recov_buf) {
freerbuf(un->un_recov_buf);
}
if (un->un_uscsi_rqs_buf) {
kmem_free(un->un_uscsi_rqs_buf, SENSE_LENGTH);
}
if (un->un_mspl) {
i_ddi_mem_free((caddr_t)un->un_mspl, NULL);
}
if (un->un_dp_size) {
kmem_free(un->un_dp, un->un_dp_size);
}
if (un->un_state) {
kstat_delete(un->un_stats);
}
if (un->un_errstats) {
kstat_delete(un->un_errstats);
}
scsi_destroy_pkt(un->un_rqs);
scsi_free_consistent_buf(un->un_rqs_bp);
ddi_soft_state_free(st_state, instance);
devp->sd_private = NULL;
devp->sd_sense = NULL;
ddi_prop_remove_all(devi);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* st_detach:
*
* we allow a detach if and only if:
* - no tape is currently inserted
* - tape position is at BOT or unknown
* (if it is not at BOT then a no rewind
* device was opened and we have to preserve state)
* - it must be in a closed state : no timeouts or scsi_watch requests
* will exist if it is closed, so we don't need to check for
* them here.
*/
/*ARGSUSED*/
static int
st_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
{
int instance;
int result;
struct scsi_device *devp;
struct scsi_tape *un;
clock_t wait_cmds_complete;
ST_ENTR(devi, st_detach);
instance = ddi_get_instance(devi);
if (!(un = ddi_get_soft_state(st_state, instance))) {
return (DDI_FAILURE);
}
mutex_enter(ST_MUTEX);
/*
* Clear error entry stack
*/
st_empty_error_stack(un);
mutex_exit(ST_MUTEX);
switch (cmd) {
case DDI_DETACH:
/*
* Undo what we did in st_attach & st_doattach,
* freeing resources and removing things we installed.
* The system framework guarantees we are not active
* with this devinfo node in any other entry points at
* this time.
*/
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_detach: instance=%x, un=%p\n", instance,
(void *)un);
if (((un->un_dp->options & ST_UNLOADABLE) == 0) ||
((un->un_rsvd_status & ST_APPLICATION_RESERVATIONS) != 0) ||
(un->un_ncmds != 0) || (un->un_quef != NULL) ||
(un->un_state != ST_STATE_CLOSED)) {
/*
* we cannot unload some targets because the
* inquiry returns junk unless immediately
* after a reset
*/
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"cannot unload instance %x\n", instance);
un->un_unit_attention_flags |= 4;
return (DDI_FAILURE);
}
/*
* if the tape has been removed then we may unload;
* do a test unit ready and if it returns NOT READY
* then we assume that it is safe to unload.
* as a side effect, pmode may be set to invalid if the
* the test unit ready fails;
* also un_state may be set to non-closed, so reset it
*/
if ((un->un_dev) && /* Been opened since attach */
((un->un_pos.pmode == legacy) &&
(un->un_pos.fileno > 0) || /* Known position not rewound */
(un->un_pos.blkno != 0)) || /* Or within first file */
((un->un_pos.pmode == logical) &&
(un->un_pos.lgclblkno > 0))) {
mutex_enter(ST_MUTEX);
/*
* Send Test Unit Ready in the hopes that if
* the drive is not in the state we think it is.
* And the state will be changed so it can be detached.
* If the command fails to reach the device and
* the drive was not rewound or unloaded we want
* to fail the detach till a user command fails
* where after the detach will succead.
*/
result = st_cmd(un, SCMD_TEST_UNIT_READY, 0, SYNC_CMD);
/*
* After TUR un_state may be set to non-closed,
* so reset it back.
*/
un->un_state = ST_STATE_CLOSED;
mutex_exit(ST_MUTEX);
}
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"un_status=%x, fileno=%x, blkno=%x\n",
un->un_status, un->un_pos.fileno, un->un_pos.blkno);
/*
* check again:
* if we are not at BOT then it is not safe to unload
*/
if ((un->un_dev) && /* Been opened since attach */
(result != EACCES) && /* drive is use by somebody */
((((un->un_pos.pmode == legacy) &&
(un->un_pos.fileno > 0) || /* Known position not rewound */
(un->un_pos.blkno != 0)) || /* Or within first file */
((un->un_pos.pmode == logical) &&
(un->un_pos.lgclblkno > 0))) &&
((un->un_state == ST_STATE_CLOSED) &&
(un->un_laststate == ST_STATE_CLOSING)))) {
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"cannot detach: pmode=%d fileno=0x%x, blkno=0x%x"
" lgclblkno=0x%"PRIx64"\n", un->un_pos.pmode,
un->un_pos.fileno, un->un_pos.blkno,
un->un_pos.lgclblkno);
un->un_unit_attention_flags |= 4;
return (DDI_FAILURE);
}
/*
* Just To make sure that we have released the
* tape unit .
*/
if (un->un_dev && (un->un_rsvd_status & ST_RESERVE) &&
!DEVI_IS_DEVICE_REMOVED(devi)) {
mutex_enter(ST_MUTEX);
(void) st_reserve_release(un, ST_RELEASE, st_uscsi_cmd);
mutex_exit(ST_MUTEX);
}
/*
* now remove other data structures allocated in st_doattach()
*/
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"destroying/freeing\n");
(void) scsi_reset_notify(ROUTE, SCSI_RESET_CANCEL,
st_reset_notification, (caddr_t)un);
cv_destroy(&un->un_clscv);
cv_destroy(&un->un_sbuf_cv);
cv_destroy(&un->un_queue_cv);
cv_destroy(&un->un_suspend_cv);
cv_destroy(&un->un_tape_busy_cv);
cv_destroy(&un->un_recov_buf_cv);
if (un->un_recov_taskq) {
ddi_taskq_destroy(un->un_recov_taskq);
}
if (un->un_hib_tid) {
(void) untimeout(un->un_hib_tid);
un->un_hib_tid = 0;
}
if (un->un_delay_tid) {
(void) untimeout(un->un_delay_tid);
un->un_delay_tid = 0;
}
cv_destroy(&un->un_state_cv);
#ifdef __x86
cv_destroy(&un->un_contig_mem_cv);
if (un->un_contig_mem_hdl != NULL) {
ddi_dma_free_handle(&un->un_contig_mem_hdl);
}
#endif
if (un->un_sbufp) {
freerbuf(un->un_sbufp);
}
if (un->un_recov_buf) {
freerbuf(un->un_recov_buf);
}
if (un->un_uscsi_rqs_buf) {
kmem_free(un->un_uscsi_rqs_buf, SENSE_LENGTH);
}
if (un->un_mspl) {
i_ddi_mem_free((caddr_t)un->un_mspl, NULL);
}
if (un->un_rqs) {
scsi_destroy_pkt(un->un_rqs);
scsi_free_consistent_buf(un->un_rqs_bp);
}
if (un->un_mkr_pkt) {
scsi_destroy_pkt(un->un_mkr_pkt);
}
if (un->un_arq_enabled) {
(void) scsi_ifsetcap(ROUTE, "auto-rqsense", 0, 1);
}
if (un->un_dp_size) {
kmem_free(un->un_dp, un->un_dp_size);
}
if (un->un_stats) {
kstat_delete(un->un_stats);
un->un_stats = (kstat_t *)0;
}
if (un->un_errstats) {
kstat_delete(un->un_errstats);
un->un_errstats = (kstat_t *)0;
}
if (un->un_media_id_len) {
kmem_free(un->un_media_id, un->un_media_id_len);
}
devp = ST_SCSI_DEVP;
ddi_soft_state_free(st_state, instance);
devp->sd_private = NULL;
devp->sd_sense = NULL;
scsi_unprobe(devp);
ddi_prop_remove_all(devi);
ddi_remove_minor_node(devi, NULL);
ST_DEBUG(0, st_label, SCSI_DEBUG, "st_detach done\n");
return (DDI_SUCCESS);
case DDI_SUSPEND:
/*
* Suspend/Resume
*
* To process DDI_SUSPEND, we must do the following:
*
* - check ddi_removing_power to see if power will be turned
* off. if so, return DDI_FAILURE
* - check if we are already suspended,
* if so, return DDI_FAILURE
* - check if device state is CLOSED,
* if not, return DDI_FAILURE.
* - wait until outstanding operations complete
* - save tape state
* - block new operations
* - cancel pending timeouts
*
*/
if (ddi_removing_power(devi)) {
return (DDI_FAILURE);
}
if (un->un_dev == 0)
un->un_dev = MTMINOR(instance);
mutex_enter(ST_MUTEX);
/*
* Shouldn't already be suspended, if so return failure
*/
if (un->un_pwr_mgmt == ST_PWR_SUSPENDED) {
mutex_exit(ST_MUTEX);
return (DDI_FAILURE);
}
if (un->un_state != ST_STATE_CLOSED) {
mutex_exit(ST_MUTEX);
return (DDI_FAILURE);
}
/*
* Wait for all outstanding I/O's to complete
*
* we wait on both ncmds and the wait queue for times
* when we are flushing after persistent errors are
* flagged, which is when ncmds can be 0, and the
* queue can still have I/O's. This way we preserve
* order of biodone's.
*/
wait_cmds_complete = ddi_get_lbolt();
wait_cmds_complete +=
st_wait_cmds_complete * drv_usectohz(1000000);
while (un->un_ncmds || un->un_quef ||
(un->un_state == ST_STATE_RESOURCE_WAIT)) {
if (cv_timedwait(&un->un_tape_busy_cv, ST_MUTEX,
wait_cmds_complete) == -1) {
/*
* Time expired then cancel the command
*/
if (st_reset(un, RESET_LUN) == 0) {
if (un->un_last_throttle) {
un->un_throttle =
un->un_last_throttle;
}
mutex_exit(ST_MUTEX);
return (DDI_FAILURE);
} else {
break;
}
}
}
/*
* DDI_SUSPEND says that the system "may" power down, we
* remember the file and block number before rewinding.
* we also need to save state before issuing
* any WRITE_FILE_MARK command.
*/
(void) st_update_block_pos(un, st_cmd, 0);
COPY_POS(&un->un_suspend_pos, &un->un_pos);
/*
* Issue a zero write file fmk command to tell the drive to
* flush any buffered tape marks
*/
(void) st_cmd(un, SCMD_WRITE_FILE_MARK, 0, SYNC_CMD);
/*
* Because not all tape drives correctly implement buffer
* flushing with the zero write file fmk command, issue a
* synchronous rewind command to force data flushing.
* st_validate_tapemarks() will do a rewind during DDI_RESUME
* anyway.
*/
(void) st_cmd(un, SCMD_REWIND, 0, SYNC_CMD);
/* stop any new operations */
un->un_pwr_mgmt = ST_PWR_SUSPENDED;
un->un_throttle = 0;
/*
* cancel any outstanding timeouts
*/
if (un->un_delay_tid) {
timeout_id_t temp_id = un->un_delay_tid;
un->un_delay_tid = 0;
un->un_tids_at_suspend |= ST_DELAY_TID;
mutex_exit(ST_MUTEX);
(void) untimeout(temp_id);
mutex_enter(ST_MUTEX);
}
if (un->un_hib_tid) {
timeout_id_t temp_id = un->un_hib_tid;
un->un_hib_tid = 0;
un->un_tids_at_suspend |= ST_HIB_TID;
mutex_exit(ST_MUTEX);
(void) untimeout(temp_id);
mutex_enter(ST_MUTEX);
}
/*
* Suspend the scsi_watch_thread
*/
if (un->un_swr_token) {
opaque_t temp_token = un->un_swr_token;
mutex_exit(ST_MUTEX);
scsi_watch_suspend(temp_token);
} else {
mutex_exit(ST_MUTEX);
}
return (DDI_SUCCESS);
default:
ST_DEBUG(0, st_label, SCSI_DEBUG, "st_detach failed\n");
return (DDI_FAILURE);
}
}
/* ARGSUSED */
static int
st_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
dev_t dev;
struct scsi_tape *un;
int instance, error;
ST_ENTR(dip, st_info);
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
dev = (dev_t)arg;
instance = MTUNIT(dev);
if ((un = ddi_get_soft_state(st_state, instance)) == NULL)
return (DDI_FAILURE);
*result = (void *) ST_DEVINFO;
error = DDI_SUCCESS;
break;
case DDI_INFO_DEVT2INSTANCE:
dev = (dev_t)arg;
instance = MTUNIT(dev);
*result = (void *)(uintptr_t)instance;
error = DDI_SUCCESS;
break;
default:
error = DDI_FAILURE;
}
return (error);
}
static int
st_doattach(struct scsi_device *devp, int (*canwait)())
{
struct scsi_tape *un = NULL;
recov_info *ri;
int km_flags = (canwait != NULL_FUNC) ? KM_SLEEP : KM_NOSLEEP;
int instance;
size_t rlen;
ST_FUNC(devp->sd_dev, st_doattach);
/*
* Call the routine scsi_probe to do some of the dirty work.
* If the INQUIRY command succeeds, the field sd_inq in the
* device structure will be filled in.
*/
ST_DEBUG(devp->sd_dev, st_label, SCSI_DEBUG,
"st_doattach(): probing\n");
if (scsi_probe(devp, canwait) == SCSIPROBE_EXISTS) {
/*
* In checking the whole inq_dtype byte we are looking at both
* the Peripheral Qualifier and the Peripheral Device Type.
* For this driver we are only interested in sequential devices
* that are connected or capable if connecting to this logical
* unit.
*/
if (devp->sd_inq->inq_dtype ==
(DTYPE_SEQUENTIAL | DPQ_POSSIBLE)) {
ST_DEBUG(devp->sd_dev, st_label, SCSI_DEBUG,
"probe exists\n");
} else {
/* Something there but not a tape device */
scsi_unprobe(devp);
return (DDI_FAILURE);
}
} else {
/* Nothing there */
ST_DEBUG(devp->sd_dev, st_label, SCSI_DEBUG,
"probe failure: nothing there\n");
scsi_unprobe(devp);
return (DDI_FAILURE);
}
/*
* The actual unit is present.
* Now is the time to fill in the rest of our info..
*/
instance = ddi_get_instance(devp->sd_dev);
if (ddi_soft_state_zalloc(st_state, instance) != DDI_SUCCESS) {
goto error;
}
un = ddi_get_soft_state(st_state, instance);
ASSERT(un != NULL);
un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
MAX_SENSE_LENGTH, B_READ, canwait, NULL);
if (un->un_rqs_bp == NULL) {
goto error;
}
un->un_rqs = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
CDB_GROUP0, 1, st_recov_sz, PKT_CONSISTENT, canwait, NULL);
if (!un->un_rqs) {
goto error;
}
ASSERT(un->un_rqs->pkt_resid == 0);
devp->sd_sense =
(struct scsi_extended_sense *)un->un_rqs_bp->b_un.b_addr;
ASSERT(geterror(un->un_rqs_bp) == NULL);
(void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs->pkt_cdbp,
SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
FILL_SCSI1_LUN(devp, un->un_rqs);
un->un_rqs->pkt_flags |= (FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON);
un->un_rqs->pkt_time = st_io_time;
un->un_rqs->pkt_comp = st_intr;
ri = (recov_info *)un->un_rqs->pkt_private;
if (st_recov_sz == sizeof (recov_info)) {
ri->privatelen = sizeof (recov_info);
} else {
ri->privatelen = sizeof (pkt_info);
}
un->un_sbufp = getrbuf(km_flags);
un->un_recov_buf = getrbuf(km_flags);
un->un_uscsi_rqs_buf = kmem_alloc(SENSE_LENGTH, KM_SLEEP);
/*
* use i_ddi_mem_alloc() for now until we have an interface to allocate
* memory for DMA which doesn't require a DMA handle.
*/
(void) i_ddi_mem_alloc(devp->sd_dev, &st_alloc_attr,
sizeof (struct seq_mode), ((km_flags == KM_SLEEP) ? 1 : 0), 0,
NULL, (caddr_t *)&un->un_mspl, &rlen, NULL);
(void) i_ddi_mem_alloc(devp->sd_dev, &st_alloc_attr,
sizeof (read_pos_data_t), ((km_flags == KM_SLEEP) ? 1 : 0), 0,
NULL, (caddr_t *)&un->un_read_pos_data, &rlen, NULL);
if (!un->un_sbufp || !un->un_mspl || !un->un_read_pos_data) {
ST_DEBUG6(devp->sd_dev, st_label, SCSI_DEBUG,
"probe partial failure: no space\n");
goto error;
}
bzero(un->un_mspl, sizeof (struct seq_mode));
cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_queue_cv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_clscv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
#ifdef __x86
cv_init(&un->un_contig_mem_cv, NULL, CV_DRIVER, NULL);
#endif
/* Initialize power managemnet condition variable */
cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_tape_busy_cv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_recov_buf_cv, NULL, CV_DRIVER, NULL);
un->un_recov_taskq = ddi_taskq_create(devp->sd_dev,
"un_recov_taskq", 1, TASKQ_DEFAULTPRI, km_flags);
ASSERT(un->un_recov_taskq != NULL);
un->un_pos.pmode = invalid;
un->un_sd = devp;
un->un_swr_token = (opaque_t)NULL;
un->un_comp_page = ST_DEV_DATACOMP_PAGE | ST_DEV_CONFIG_PAGE;
un->un_wormable = st_is_drive_worm;
un->un_media_id_method = st_get_media_identification;
/*
* setting long a initial as it contains logical file info.
* support for long format is mandatory but many drive don't do it.
*/
un->un_read_pos_type = LONG_POS;
un->un_suspend_pos.pmode = invalid;
st_add_recovery_info_to_pkt(un, un->un_rqs_bp, un->un_rqs);
#ifdef __x86
if (ddi_dma_alloc_handle(ST_DEVINFO, &st_contig_mem_dma_attr,
DDI_DMA_SLEEP, NULL, &un->un_contig_mem_hdl) != DDI_SUCCESS) {
ST_DEBUG6(devp->sd_dev, st_label, SCSI_DEBUG,
"allocation of contiguous memory dma handle failed!");
un->un_contig_mem_hdl = NULL;
goto error;
}
#endif
/*
* Since this driver manages devices with "remote" hardware,
* i.e. the devices themselves have no "reg" properties,
* the SUSPEND/RESUME commands in detach/attach will not be
* called by the power management framework unless we request
* it by creating a "pm-hardware-state" property and setting it
* to value "needs-suspend-resume".
*/
if (ddi_prop_update_string(DDI_DEV_T_NONE, devp->sd_dev,
"pm-hardware-state", "needs-suspend-resume") !=
DDI_PROP_SUCCESS) {
ST_DEBUG(devp->sd_dev, st_label, SCSI_DEBUG,
"ddi_prop_update(\"pm-hardware-state\") failed\n");
goto error;
}
if (ddi_prop_create(DDI_DEV_T_NONE, devp->sd_dev, DDI_PROP_CANSLEEP,
"no-involuntary-power-cycles", NULL, 0) != DDI_PROP_SUCCESS) {
ST_DEBUG(devp->sd_dev, st_label, SCSI_DEBUG,
"ddi_prop_create(\"no-involuntary-power-cycles\") "
"failed\n");
goto error;
}
(void) scsi_reset_notify(ROUTE, SCSI_RESET_NOTIFY,
st_reset_notification, (caddr_t)un);
ST_DEBUG6(devp->sd_dev, st_label, SCSI_DEBUG, "attach success\n");
return (DDI_SUCCESS);
error:
devp->sd_sense = NULL;
ddi_remove_minor_node(devp->sd_dev, NULL);
if (un) {
if (un->un_mspl) {
i_ddi_mem_free((caddr_t)un->un_mspl, NULL);
}
if (un->un_read_pos_data) {
i_ddi_mem_free((caddr_t)un->un_read_pos_data, 0);
}
if (un->un_sbufp) {
freerbuf(un->un_sbufp);
}
if (un->un_recov_buf) {
freerbuf(un->un_recov_buf);
}
if (un->un_uscsi_rqs_buf) {
kmem_free(un->un_uscsi_rqs_buf, SENSE_LENGTH);
}
#ifdef __x86
if (un->un_contig_mem_hdl != NULL) {
ddi_dma_free_handle(&un->un_contig_mem_hdl);
}
#endif
if (un->un_rqs) {
scsi_destroy_pkt(un->un_rqs);
}
if (un->un_rqs_bp) {
scsi_free_consistent_buf(un->un_rqs_bp);
}
ddi_soft_state_free(st_state, instance);
devp->sd_private = NULL;
}
if (devp->sd_inq) {
scsi_unprobe(devp);
}
return (DDI_FAILURE);
}
typedef int
(*cfg_functp)(struct scsi_tape *, char *vidpid, struct st_drivetype *);
static cfg_functp config_functs[] = {
st_get_conf_from_st_dot_conf,
st_get_conf_from_st_conf_dot_c,
st_get_conf_from_tape_drive,
st_get_default_conf
};
/*
* determine tape type, using tape-config-list or built-in table or
* use a generic tape config entry
*/
static void
st_known_tape_type(struct scsi_tape *un)
{
struct st_drivetype *dp;
cfg_functp *config_funct;
uchar_t reserved;
ST_FUNC(ST_DEVINFO, st_known_tape_type);
reserved = (un->un_rsvd_status & ST_RESERVE) ? ST_RESERVE
: ST_RELEASE;
/*
* XXX: Emulex MT-02 (and emulators) predates SCSI-1 and has
* no vid & pid inquiry data. So, we provide one.
*/
if (ST_INQUIRY->inq_len == 0 ||
(bcmp("\0\0\0\0\0\0\0\0", ST_INQUIRY->inq_vid, 8) == 0)) {
(void) strcpy((char *)ST_INQUIRY->inq_vid, ST_MT02_NAME);
}
if (un->un_dp_size == 0) {
un->un_dp_size = sizeof (struct st_drivetype);
dp = kmem_zalloc((size_t)un->un_dp_size, KM_SLEEP);
un->un_dp = dp;
} else {
dp = un->un_dp;
}
un->un_dp->non_motion_timeout = st_io_time;
/*
* Loop through the configuration methods till one works.
*/
for (config_funct = &config_functs[0]; ; config_funct++) {
if ((*config_funct)(un, ST_INQUIRY->inq_vid, dp)) {
break;
}
}
/*
* If we didn't just make up this configuration and
* all the density codes are the same..
* Set Auto Density over ride.
*/
if (*config_funct != st_get_default_conf) {
/*
* If this device is one that is configured and all
* densities are the same, This saves doing gets and set
* that yield nothing.
*/
if ((dp->densities[0]) == (dp->densities[1]) &&
(dp->densities[0]) == (dp->densities[2]) &&
(dp->densities[0]) == (dp->densities[3])) {
dp->options |= ST_AUTODEN_OVERRIDE;
}
}
/*
* Store tape drive characteristics.
*/
un->un_status = 0;
un->un_attached = 1;
un->un_init_options = dp->options;
/* setup operation time-outs based on options */
st_calculate_timeouts(un);
/* TLR support */
if (un->un_dp->type != ST_TYPE_INVALID) {
int result;
/* try and enable TLR */
un->un_tlr_flag = TLR_SAS_ONE_DEVICE;
result = st_set_target_TLR_mode(un, st_uscsi_cmd);
if (result == EACCES) {
/*
* From attach command failed.
* Set dp type so is run again on open.
*/
un->un_dp->type = ST_TYPE_INVALID;
un->un_tlr_flag = TLR_NOT_KNOWN;
} else if (result == 0) {
if (scsi_ifgetcap(&un->un_sd->sd_address,
"tran-layer-retries", 1) == -1) {
un->un_tlr_flag = TLR_NOT_SUPPORTED;
(void) st_set_target_TLR_mode(un, st_uscsi_cmd);
} else {
un->un_tlr_flag = TLR_SAS_ONE_DEVICE;
}
} else {
un->un_tlr_flag = TLR_NOT_SUPPORTED;
}
}
/* make sure if we are supposed to be variable, make it variable */
if (dp->options & ST_VARIABLE) {
dp->bsize = 0;
}
if (reserved != ((un->un_rsvd_status & ST_RESERVE) ? ST_RESERVE
: ST_RELEASE)) {
(void) st_reserve_release(un, reserved, st_uscsi_cmd);
}
un->un_unit_attention_flags |= 1;
scsi_log(ST_DEVINFO, st_label, CE_NOTE, "?<%s>\n", dp->name);
}
typedef struct {
int mask;
int bottom;
int top;
char *name;
} conf_limit;
static const conf_limit conf_limits[] = {
-1, 1, 2, "conf version",
-1, MT_ISTS, ST_LAST_TYPE, "drive type",
-1, 0, 0xffffff, "block size",
ST_VALID_OPTS, 0, ST_VALID_OPTS, "options",
-1, 0, 4, "number of densities",
-1, 0, UINT8_MAX, "density code",
-1, 0, 3, "default density",
-1, 0, UINT16_MAX, "non motion timeout",
-1, 0, UINT16_MAX, "I/O timeout",
-1, 0, UINT16_MAX, "space timeout",
-1, 0, UINT16_MAX, "load timeout",
-1, 0, UINT16_MAX, "unload timeout",
-1, 0, UINT16_MAX, "erase timeout",
0, 0, 0, NULL
};
static int
st_validate_conf_data(struct scsi_tape *un, int *list, int list_len,
const char *conf_name)
{
int dens;
int ndens;
int value;
int type;
int count;
const conf_limit *limit = &conf_limits[0];
ST_FUNC(ST_DEVINFO, st_validate_conf_data);
ST_DEBUG3(ST_DEVINFO, st_label, CE_NOTE,
"Checking %d entrys total with %d densities\n", list_len, list[4]);
count = list_len;
type = *list;
for (; count && limit->name; count--, list++, limit++) {
value = *list;
if (value & ~limit->mask) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s %s value invalid bits set: 0x%X\n",
conf_name, limit->name, value & ~limit->mask);
*list &= limit->mask;
} else if (value < limit->bottom) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s %s value too low: value = %d limit %d\n",
conf_name, limit->name, value, limit->bottom);
} else if (value > limit->top) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s %s value too high: value = %d limit %d\n",
conf_name, limit->name, value, limit->top);
} else {
ST_DEBUG3(ST_DEVINFO, st_label, CE_CONT,
"%s %s value = 0x%X\n",
conf_name, limit->name, value);
}
/* If not the number of densities continue */
if (limit != &conf_limits[4]) {
continue;
}
/* If number of densities is not in range can't use config */
if (value < limit->bottom || value > limit->top) {
return (-1);
}
ndens = min(value, NDENSITIES);
if ((type == 1) && (list_len - ndens) != 6) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s conf version 1 with %d densities has %d items"
" should have %d",
conf_name, ndens, list_len, 6 + ndens);
} else if ((type == 2) && (list_len - ndens) != 13) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s conf version 2 with %d densities has %d items"
" should have %d",
conf_name, ndens, list_len, 13 + ndens);
}
limit++;
for (dens = 0; dens < ndens && count; dens++) {
count--;
list++;
value = *list;
if (value < limit->bottom) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s density[%d] value too low: value ="
" 0x%X limit 0x%X\n",
conf_name, dens, value, limit->bottom);
} else if (value > limit->top) {
scsi_log(ST_DEVINFO, st_label, CE_NOTE,
"%s density[%d] value too high: value ="
" 0x%X limit 0x%X\n",
conf_name, dens, value, limit->top);
} else {
ST_DEBUG3(ST_DEVINFO, st_label, CE_CONT,
"%s density[%d] value = 0x%X\n",
conf_name, dens, value);
}
}
}
return (0);
}
static int
st_get_conf_from_st_dot_conf(struct scsi_tape *un, char *vidpid,
struct st_drivetype *dp)
{
caddr_t config_list = NULL;
caddr_t data_list = NULL;
int *data_ptr;
caddr_t vidptr, prettyptr, datanameptr;
size_t vidlen, prettylen, datanamelen, tripletlen = 0;
int config_list_len, data_list_len, len, i;
int version;
int found = 0;
ST_FUNC(ST_DEVINFO, st_get_conf_from_st_dot_conf);
/*
* Determine type of tape controller. Type is determined by
* checking the vendor ids of the earlier inquiry command and
* comparing those with vids in tape-config-list defined in st.conf
*/
if (ddi_getlongprop(DDI_DEV_T_ANY, ST_DEVINFO, DDI_PROP_DONTPASS,
"tape-config-list", (caddr_t)&config_list, &config_list_len)
!= DDI_PROP_SUCCESS) {
return (found);
}
ST_DEBUG6(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_st_dot_conf(): st.conf has tape-config-list\n");
/*
* Compare vids in each triplet - if it matches, get value for
* data_name and contruct a st_drivetype struct
* tripletlen is not set yet!
*/
for (len = config_list_len, vidptr = config_list;
len > 0;
vidptr += tripletlen, len -= tripletlen) {
vidlen = strlen(vidptr);
prettyptr = vidptr + vidlen + 1;
prettylen = strlen(prettyptr);
datanameptr = prettyptr + prettylen + 1;
datanamelen = strlen(datanameptr);
tripletlen = vidlen + prettylen + datanamelen + 3;
if (vidlen == 0) {
continue;
}
/*
* If inquiry vid dosen't match this triplets vid,
* try the next.
*/
if (strncasecmp(vidpid, vidptr, vidlen)) {
continue;
}
/*
* if prettylen is zero then use the vid string
*/
if (prettylen == 0) {
prettyptr = vidptr;
prettylen = vidlen;
}
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"vid = %s, pretty=%s, dataname = %s\n",
vidptr, prettyptr, datanameptr);
/*
* get the data list
*/
if (ddi_getlongprop(DDI_DEV_T_ANY, ST_DEVINFO, 0,
datanameptr, (caddr_t)&data_list,
&data_list_len) != DDI_PROP_SUCCESS) {
/*
* Error in getting property value
* print warning!
*/
scsi_log(ST_DEVINFO, st_label, CE_WARN,
"data property (%s) has no value\n",
datanameptr);
continue;
}
/*
* now initialize the st_drivetype struct
*/
(void) strncpy(dp->name, prettyptr, ST_NAMESIZE - 1);
dp->length = (int)min(vidlen, (VIDPIDLEN - 1));
(void) strncpy(dp->vid, vidptr, dp->length);
data_ptr = (int *)data_list;
/*
* check if data is enough for version, type,
* bsize, options, # of densities, density1,
* density2, ..., default_density
*/
if ((data_list_len < 5 * sizeof (int)) ||
(data_list_len < 6 * sizeof (int) +
*(data_ptr + 4) * sizeof (int))) {
/*
* print warning and skip to next triplet.
*/
scsi_log(ST_DEVINFO, st_label, CE_WARN,
"data property (%s) incomplete\n",
datanameptr);
kmem_free(data_list, data_list_len);
continue;
}
if (st_validate_conf_data(un, data_ptr,
data_list_len / sizeof (int), datanameptr)) {
kmem_free(data_list, data_list_len);
scsi_log(ST_DEVINFO, st_label, CE_WARN,
"data property (%s) rejected\n",
datanameptr);
continue;
}
/*
* check version
*/
version = *data_ptr++;
if (version != 1 && version != 2) {
/* print warning but accept it */
scsi_log(ST_DEVINFO, st_label, CE_WARN,
"Version # for data property (%s) "
"not set to 1 or 2\n", datanameptr);
}
dp->type = *data_ptr++;
dp->bsize = *data_ptr++;
dp->options = *data_ptr++;
dp->options |= ST_DYNAMIC;
len = *data_ptr++;
for (i = 0; i < NDENSITIES; i++) {
if (i < len) {
dp->densities[i] = *data_ptr++;
}
}
dp->default_density = *data_ptr << 3;
if (version == 2 &&
data_list_len >= (13 + len) * sizeof (int)) {
data_ptr++;
dp->non_motion_timeout = *data_ptr++;
dp->io_timeout = *data_ptr++;
dp->rewind_timeout = *data_ptr++;
dp->space_timeout = *data_ptr++;
dp->load_timeout = *data_ptr++;
dp->unload_timeout = *data_ptr++;
dp->erase_timeout = *data_ptr++;
}
kmem_free(data_list, data_list_len);
found = 1;
ST_DEBUG(ST_DEVINFO, st_label, SCSI_DEBUG,
"found in st.conf: vid = %s, pretty=%s\n",
dp->vid, dp->name);
break;
}
/*
* free up the memory allocated by ddi_getlongprop
*/
if (config_list) {
kmem_free(config_list, config_list_len);
}
return (found);
}
static int
st_get_conf_from_st_conf_dot_c(struct scsi_tape *un, char *vidpid,
struct st_drivetype *dp)
{
int i;
ST_FUNC(ST_DEVINFO, st_get_conf_from_st_conf_dot_c);
/*
* Determine type of tape controller. Type is determined by
* checking the result of the earlier inquiry command and
* comparing vendor ids with strings in a table declared in st_conf.c.
*/
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_st_conf_dot_c(): looking at st_drivetypes\n");
for (i = 0; i < st_ndrivetypes; i++) {
if (st_drivetypes[i].length == 0) {
continue;
}
if (strncasecmp(vidpid, st_drivetypes[i].vid,
st_drivetypes[i].length)) {
continue;
}
bcopy(&st_drivetypes[i], dp, sizeof (st_drivetypes[i]));
return (1);
}
return (0);
}
static int
st_get_conf_from_tape_drive(struct scsi_tape *un, char *vidpid,
struct st_drivetype *dp)
{
int bsize;
ulong_t maxbsize;
caddr_t buf;
struct st_drivetype *tem_dp;
struct read_blklim *blklim;
int rval;
int i;
ST_FUNC(ST_DEVINFO, st_get_conf_from_tape_drive);
/*
* Determine the type of tape controller. Type is determined by
* sending SCSI commands to tape drive and deriving the type from
* the returned data.
*/
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): asking tape drive\n");
tem_dp = kmem_zalloc(sizeof (struct st_drivetype), KM_SLEEP);
/*
* Make up a name
*/
bcopy(vidpid, tem_dp->name, VIDPIDLEN);
tem_dp->name[VIDPIDLEN] = '\0';
tem_dp->length = min(strlen(ST_INQUIRY->inq_vid), (VIDPIDLEN - 1));
(void) strncpy(tem_dp->vid, ST_INQUIRY->inq_vid, tem_dp->length);
/*
* 'clean' vendor and product strings of non-printing chars
*/
for (i = 0; i < VIDPIDLEN - 1; i ++) {
if (tem_dp->name[i] < ' ' || tem_dp->name[i] > '~') {
tem_dp->name[i] = '.';
}
}
/*
* MODE SENSE to determine block size.
*/
un->un_dp->options |= ST_MODE_SEL_COMP | ST_UNLOADABLE;
rval = st_modesense(un);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
rval = 1;
} else {
un->un_dp->options &= ~ST_MODE_SEL_COMP;
rval = 0;
}
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): fail to mode sense\n");
goto exit;
}
/* Can mode sense page 0x10 or 0xf */
tem_dp->options |= ST_MODE_SEL_COMP;
bsize = (un->un_mspl->high_bl << 16) |
(un->un_mspl->mid_bl << 8) |
(un->un_mspl->low_bl);
if (bsize == 0) {
tem_dp->options |= ST_VARIABLE;
tem_dp->bsize = 0;
} else if (bsize > ST_MAXRECSIZE_FIXED) {
rval = st_change_block_size(un, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
rval = 1;
} else {
rval = 0;
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): "
"Fixed record size is too large and"
"cannot switch to variable record size");
}
goto exit;
}
tem_dp->options |= ST_VARIABLE;
} else {
rval = st_change_block_size(un, 0);
if (rval == 0) {
tem_dp->options |= ST_VARIABLE;
tem_dp->bsize = 0;
} else if (rval != EACCES) {
tem_dp->bsize = bsize;
} else {
un->un_dp->type = ST_TYPE_INVALID;
rval = 1;
goto exit;
}
}
/*
* If READ BLOCk LIMITS works and upper block size limit is
* more than 64K, ST_NO_RECSIZE_LIMIT is supported.
*/
blklim = kmem_zalloc(sizeof (struct read_blklim), KM_SLEEP);
rval = st_read_block_limits(un, blklim);
if (rval) {
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): "
"fail to read block limits.\n");
rval = 0;
kmem_free(blklim, sizeof (struct read_blklim));
goto exit;
}
maxbsize = (blklim->max_hi << 16) +
(blklim->max_mid << 8) + blklim->max_lo;
if (maxbsize > ST_MAXRECSIZE_VARIABLE) {
tem_dp->options |= ST_NO_RECSIZE_LIMIT;
}
kmem_free(blklim, sizeof (struct read_blklim));
/*
* Inquiry VPD page 0xb0 to see if the tape drive supports WORM
*/
buf = kmem_zalloc(6, KM_SLEEP);
rval = st_get_special_inquiry(un, 6, buf, 0xb0);
if (rval) {
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): "
"fail to read vitial inquiry.\n");
rval = 0;
kmem_free(buf, 6);
goto exit;
}
if (buf[4] & 1) {
tem_dp->options |= ST_WORMABLE;
}
kmem_free(buf, 6);
/* Assume BSD BSR KNOWS_EOD */
tem_dp->options |= ST_BSF | ST_BSR | ST_KNOWS_EOD | ST_UNLOADABLE;
tem_dp->max_rretries = -1;
tem_dp->max_wretries = -1;
/*
* Decide the densities supported by tape drive by sending
* REPORT DENSITY SUPPORT command.
*/
if (st_get_densities_from_tape_drive(un, tem_dp) == 0) {
goto exit;
}
/*
* Decide the timeout values for several commands by sending
* REPORT SUPPORTED OPERATION CODES command.
*/
rval = st_get_timeout_values_from_tape_drive(un, tem_dp);
if (rval == 0 || ((rval == 1) && (tem_dp->type == ST_TYPE_INVALID))) {
goto exit;
}
bcopy(tem_dp, dp, sizeof (struct st_drivetype));
rval = 1;
exit:
un->un_status = KEY_NO_SENSE;
kmem_free(tem_dp, sizeof (struct st_drivetype));
return (rval);
}
static int
st_get_densities_from_tape_drive(struct scsi_tape *un,
struct st_drivetype *dp)
{
int i, p;
size_t buflen;
ushort_t des_len;
uchar_t *den_header;
uchar_t num_den;
uchar_t den[NDENSITIES];
uchar_t deflt[NDENSITIES];
struct report_density_desc *den_desc;
ST_FUNC(ST_DEVINFO, st_get_densities_from_type_drive);
/*
* Since we have no idea how many densitiy support entries
* will be returned, we send the command firstly assuming
* there is only one. Then we can decide the number of
* entries by available density support length. If multiple
* entries exist, we will resend the command with enough
* buffer size.
*/
buflen = sizeof (struct report_density_header) +
sizeof (struct report_density_desc);
den_header = kmem_zalloc(buflen, KM_SLEEP);
if (st_report_density_support(un, den_header, buflen) != 0) {
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): fail to report density.\n");
kmem_free(den_header, buflen);
return (0);
}
des_len =
BE_16(((struct report_density_header *)den_header)->ava_dens_len);
num_den = (des_len - 2) / sizeof (struct report_density_desc);
if (num_den > 1) {
kmem_free(den_header, buflen);
buflen = sizeof (struct report_density_header) +
sizeof (struct report_density_desc) * num_den;
den_header = kmem_zalloc(buflen, KM_SLEEP);
if (st_report_density_support(un, den_header, buflen) != 0) {
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): "
"fail to report density.\n");
kmem_free(den_header, buflen);
return (0);
}
}
den_desc = (struct report_density_desc *)(den_header
+ sizeof (struct report_density_header));
/*
* Decide the drive type by assigning organization
*/
for (i = 0; i < ST_NUM_MEMBERS(st_vid_dt); i ++) {
if (strncmp(st_vid_dt[i].vid, (char *)(den_desc->ass_org),
8) == 0) {
dp->type = st_vid_dt[i].type;
break;
}
}
if (i == ST_NUM_MEMBERS(st_vid_dt)) {
ST_DEBUG2(ST_DEVINFO, st_label, SCSI_DEBUG,
"st_get_conf_from_tape_drive(): "
"can't find match of assigned ort.\n");
kmem_free(den_header, buflen);
return (0);
}
/*
* The tape drive may support many tape formats, but the st driver
* supports only the four highest densities. Since density code
* values are returned by ascending sequence, we start from the
* last entry of density support data block descriptor.
*/
p = 0;
den_desc += num_den - 1;
for (i = 0; i < num_den && p < NDENSITIES; i ++, den_desc --) {
if ((den_desc->pri_den != 0) && (den_desc->wrtok)) {
if (p != 0) {
if (den_desc->pri_den >= den[p - 1]) {
continue;
}
}
den[p] = den_desc->pri_den;
deflt[p] = den_desc->deflt;
p ++;
}
}
switch (p) {
case 0:
bzero(dp->densities, NDENSITIES);
dp->options |= ST_AUTODEN_OVERRIDE;
dp->default_density = MT_DENSITY4;
break;
case 1:
(void) memset(dp->densities, den[0], NDENSITIES);
dp->options |= ST_AUTODEN_OVERRIDE;
dp->default_density = MT_DENSITY4;
break;
case 2:
dp->densities[0] = den[1];
dp->densities[1] = den[1];
dp->densities[2] = den[0];
dp->densities[3] = den[0];
if (deflt[0]) {
dp->default_density = MT_DENSITY4;
} else {
dp->default_density = MT_DENSITY2;
}
break;
case 3:
dp->densities[0] = den[2];
dp->densities[1] = den[1];
dp->densities[2] = den[0];
dp->densities[3] = den[0];
if (deflt[0]) {
dp->default_density = MT_DENSITY4;
} else if (deflt[1]) {
dp->default_density = MT_DENSITY2;
} else {
dp->default_density = MT_DENSITY1;
}
break;
default:
for (i = p; i > p - NDENSITIES; i --) {
dp->densities[i - 1] = den[p - i];
}
if (deflt[0]) {
dp->default_density = MT_DENSITY4;
} else if (deflt[1]) {
dp->default_density = MT_DENSITY3;
} else if (deflt[2]) {
dp->default_density = MT_DENSITY2;
} else {
dp->default_density = MT_DENSITY1;
}
break;
}
bzero(dp->mediatype, NDENSITIES);
kmem_free(den_header, buflen);
return (1);
}
static int
st_get_timeout_values_from_tape_drive(struct scsi_tape *un,
struct st_drivetype *dp)
{
ushort_t timeout;
int rval;
ST_FUNC(ST_DEVINFO, st_get_timeout_values_from_type_drive);
rval = st_get_timeouts_value(un, SCMD_ERASE, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->erase_timeout = timeout;
rval = st_get_timeouts_value(un, SCMD_READ, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->io_timeout = timeout;
rval = st_get_timeouts_value(un, SCMD_WRITE, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->io_timeout = max(dp->io_timeout, timeout);
rval = st_get_timeouts_value(un, SCMD_SPACE, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->space_timeout = timeout;
rval = st_get_timeouts_value(un, SCMD_LOAD, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->load_timeout = timeout;
dp->unload_timeout = timeout;
rval = st_get_timeouts_value(un, SCMD_REWIND, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->rewind_timeout = timeout;
rval = st_get_timeouts_value(un, SCMD_INQUIRY, &timeout, 0);
if (rval) {
if (rval == EACCES) {
un->un_dp->type = ST_TYPE_INVALID;
dp->type = ST_TYPE_INVALID;
return (1);
}
return (0);
}
dp->non_motion_timeout = timeout;
return (1);
}