| /* |
| * mr_sas_tbolt.c: source for mr_sas driver for New Generation. |
| * i.e. Thunderbolt and Invader |
| * |
| * Solaris MegaRAID device driver for SAS2.0 controllers |
| * Copyright (c) 2008-2012, LSI Logic Corporation. |
| * All rights reserved. |
| * |
| * Version: |
| * Author: |
| * Swaminathan K S |
| * Arun Chandrashekhar |
| * Manju R |
| * Rasheed |
| * Shakeel Bukhari |
| */ |
| |
| /* |
| * Copyright 2018 Nexenta Systems, Inc. |
| * Copyright 2015, 2017 Citrus IT Limited. All rights reserved. |
| * Copyright 2015 Garrett D'Amore <garrett@damore.org> |
| */ |
| |
| |
| #include <sys/types.h> |
| #include <sys/file.h> |
| #include <sys/atomic.h> |
| #include <sys/scsi/scsi.h> |
| #include <sys/byteorder.h> |
| #include <sys/sdt.h> |
| #include "ld_pd_map.h" |
| #include "mr_sas.h" |
| #include "fusion.h" |
| |
| /* |
| * FMA header files |
| */ |
| #include <sys/ddifm.h> |
| #include <sys/fm/protocol.h> |
| #include <sys/fm/util.h> |
| #include <sys/fm/io/ddi.h> |
| |
| |
| /* Pre-TB command size and TB command size. */ |
| #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */ |
| MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map); |
| U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map); |
| U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map); |
| U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *); |
| extern ddi_dma_attr_t mrsas_generic_dma_attr; |
| extern uint32_t mrsas_tbolt_max_cap_maxxfer; |
| extern struct ddi_device_acc_attr endian_attr; |
| extern int debug_level_g; |
| extern unsigned int enable_fp; |
| volatile int dump_io_wait_time = 900; |
| extern volatile int debug_timeout_g; |
| extern int mrsas_issue_pending_cmds(struct mrsas_instance *); |
| extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance); |
| extern void push_pending_mfi_pkt(struct mrsas_instance *, |
| struct mrsas_cmd *); |
| extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *, |
| MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *); |
| |
| /* Local static prototypes. */ |
| static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *, |
| struct scsi_address *, struct scsi_pkt *, uchar_t *); |
| static void mrsas_tbolt_set_pd_lba(U8 *, size_t, uint8_t *, U64, U32); |
| static int mrsas_tbolt_check_map_info(struct mrsas_instance *); |
| static int mrsas_tbolt_sync_map_info(struct mrsas_instance *); |
| static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *); |
| static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *); |
| static void mrsas_tbolt_get_pd_info(struct mrsas_instance *, |
| struct mrsas_tbolt_pd_info *, int); |
| |
| static int mrsas_debug_tbolt_fw_faults_after_ocr = 0; |
| |
| /* |
| * destroy_mfi_mpi_frame_pool |
| */ |
| void |
| destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance) |
| { |
| int i; |
| |
| struct mrsas_cmd *cmd; |
| |
| /* return all mfi frames to pool */ |
| for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) { |
| cmd = instance->cmd_list[i]; |
| if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| cmd->frame_dma_obj); |
| } |
| cmd->frame_dma_obj_status = DMA_OBJ_FREED; |
| } |
| } |
| |
| /* |
| * destroy_mpi2_frame_pool |
| */ |
| void |
| destroy_mpi2_frame_pool(struct mrsas_instance *instance) |
| { |
| |
| if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->mpi2_frame_pool_dma_obj); |
| instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED; |
| } |
| } |
| |
| |
| /* |
| * mrsas_tbolt_free_additional_dma_buffer |
| */ |
| void |
| mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance) |
| { |
| int i; |
| |
| if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->mfi_internal_dma_obj); |
| instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; |
| } |
| if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->mfi_evt_detail_obj); |
| instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; |
| } |
| |
| for (i = 0; i < 2; i++) { |
| if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->ld_map_obj[i]); |
| instance->ld_map_obj[i].status = DMA_OBJ_FREED; |
| } |
| } |
| } |
| |
| |
| /* |
| * free_req_desc_pool |
| */ |
| void |
| free_req_rep_desc_pool(struct mrsas_instance *instance) |
| { |
| if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->request_desc_dma_obj); |
| instance->request_desc_dma_obj.status = DMA_OBJ_FREED; |
| } |
| |
| if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->reply_desc_dma_obj); |
| instance->reply_desc_dma_obj.status = DMA_OBJ_FREED; |
| } |
| |
| |
| } |
| |
| |
| /* |
| * ThunderBolt(TB) Request Message Frame Pool |
| */ |
| int |
| create_mpi2_frame_pool(struct mrsas_instance *instance) |
| { |
| int i = 0; |
| uint16_t max_cmd; |
| uint32_t sgl_sz; |
| uint32_t raid_msg_size; |
| uint32_t total_size; |
| uint32_t offset; |
| uint32_t io_req_base_phys; |
| uint8_t *io_req_base; |
| struct mrsas_cmd *cmd; |
| |
| max_cmd = instance->max_fw_cmds; |
| |
| sgl_sz = 1024; |
| raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE; |
| |
| /* Allocating additional 256 bytes to accomodate SMID 0. */ |
| total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) + |
| (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH); |
| |
| con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: " |
| "max_cmd %x", max_cmd)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: " |
| "request message frame pool size %x", total_size)); |
| |
| /* |
| * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory |
| * and then split the memory to 1024 commands. Each command should be |
| * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME |
| * within it. Further refer the "alloc_req_rep_desc" function where |
| * we allocate request/reply descriptors queues for a clue. |
| */ |
| |
| instance->mpi2_frame_pool_dma_obj.size = total_size; |
| instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi = |
| 0xFFFFFFFFU; |
| instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max = |
| 0xFFFFFFFFU; |
| instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "could not alloc mpi2 frame pool"); |
| return (DDI_FAILURE); |
| } |
| |
| bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size); |
| instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED; |
| |
| instance->io_request_frames = |
| (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer; |
| instance->io_request_frames_phy = |
| (uint32_t) |
| instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address; |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p", |
| (void *)instance->io_request_frames)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x", |
| instance->io_request_frames_phy)); |
| |
| io_req_base = (uint8_t *)instance->io_request_frames + |
| MRSAS_THUNDERBOLT_MSG_SIZE; |
| io_req_base_phys = instance->io_request_frames_phy + |
| MRSAS_THUNDERBOLT_MSG_SIZE; |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, |
| "io req_base_phys 0x%x", io_req_base_phys)); |
| |
| for (i = 0; i < max_cmd; i++) { |
| cmd = instance->cmd_list[i]; |
| |
| offset = i * MRSAS_THUNDERBOLT_MSG_SIZE; |
| |
| cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *) |
| ((uint8_t *)io_req_base + offset); |
| cmd->scsi_io_request_phys_addr = io_req_base_phys + offset; |
| |
| cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base + |
| (max_cmd * raid_msg_size) + i * sgl_sz); |
| |
| cmd->sgl_phys_addr = (io_req_base_phys + |
| (max_cmd * raid_msg_size) + i * sgl_sz); |
| |
| cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base + |
| (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) + |
| (i * SENSE_LENGTH)); |
| |
| cmd->sense_phys_addr1 = (io_req_base_phys + |
| (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) + |
| (i * SENSE_LENGTH)); |
| |
| |
| cmd->SMID = i + 1; |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p", |
| cmd->index, (void *)cmd->scsi_io_request)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x", |
| cmd->index, cmd->scsi_io_request_phys_addr)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p", |
| cmd->index, (void *)cmd->sense1)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x", |
| cmd->index, cmd->sense_phys_addr1)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p", |
| cmd->index, (void *)cmd->sgl)); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x", |
| cmd->index, cmd->sgl_phys_addr)); |
| } |
| |
| return (DDI_SUCCESS); |
| |
| } |
| |
| |
| /* |
| * alloc_additional_dma_buffer for AEN |
| */ |
| int |
| mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance) |
| { |
| uint32_t internal_buf_size = PAGESIZE*2; |
| int i; |
| |
| /* Initialize buffer status as free */ |
| instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; |
| instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; |
| instance->ld_map_obj[0].status = DMA_OBJ_FREED; |
| instance->ld_map_obj[1].status = DMA_OBJ_FREED; |
| |
| |
| instance->mfi_internal_dma_obj.size = internal_buf_size; |
| instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = |
| 0xFFFFFFFFU; |
| instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "could not alloc reply queue"); |
| return (DDI_FAILURE); |
| } |
| |
| bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); |
| |
| instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; |
| instance->internal_buf = |
| (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer)); |
| instance->internal_buf_dmac_add = |
| instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; |
| instance->internal_buf_size = internal_buf_size; |
| |
| /* allocate evt_detail */ |
| instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); |
| instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; |
| instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; |
| instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "mrsas_tbolt_alloc_additional_dma_buffer: " |
| "could not allocate data transfer buffer."); |
| goto fail_tbolt_additional_buff; |
| } |
| |
| bzero(instance->mfi_evt_detail_obj.buffer, |
| sizeof (struct mrsas_evt_detail)); |
| |
| instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; |
| |
| instance->size_map_info = sizeof (MR_FW_RAID_MAP) + |
| (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1)); |
| |
| for (i = 0; i < 2; i++) { |
| /* allocate the data transfer buffer */ |
| instance->ld_map_obj[i].size = instance->size_map_info; |
| instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr; |
| instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->ld_map_obj[i].dma_attr.dma_attr_count_max = |
| 0xFFFFFFFFU; |
| instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1; |
| instance->ld_map_obj[i].dma_attr.dma_attr_align = 1; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i], |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "could not allocate data transfer buffer."); |
| goto fail_tbolt_additional_buff; |
| } |
| |
| instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED; |
| |
| bzero(instance->ld_map_obj[i].buffer, instance->size_map_info); |
| |
| instance->ld_map[i] = |
| (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer; |
| instance->ld_map_phy[i] = (uint32_t)instance-> |
| ld_map_obj[i].dma_cookie[0].dmac_address; |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, |
| "ld_map Addr Phys 0x%x", instance->ld_map_phy[i])); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, |
| "size_map_info 0x%x", instance->size_map_info)); |
| } |
| |
| return (DDI_SUCCESS); |
| |
| fail_tbolt_additional_buff: |
| mrsas_tbolt_free_additional_dma_buffer(instance); |
| |
| return (DDI_FAILURE); |
| } |
| |
| MRSAS_REQUEST_DESCRIPTOR_UNION * |
| mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index) |
| { |
| MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; |
| |
| if (index > instance->max_fw_cmds) { |
| con_log(CL_ANN1, (CE_NOTE, |
| "Invalid SMID 0x%x request for descriptor", index)); |
| con_log(CL_ANN1, (CE_NOTE, |
| "max_fw_cmds : 0x%x", instance->max_fw_cmds)); |
| return (NULL); |
| } |
| |
| req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *) |
| ((char *)instance->request_message_pool + |
| (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index)); |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "request descriptor : 0x%08lx", (unsigned long)req_desc)); |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "request descriptor base phy : 0x%08lx", |
| (unsigned long)instance->request_message_pool_phy)); |
| |
| return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc); |
| } |
| |
| |
| /* |
| * Allocate Request and Reply Queue Descriptors. |
| */ |
| int |
| alloc_req_rep_desc(struct mrsas_instance *instance) |
| { |
| uint32_t request_q_sz, reply_q_sz; |
| int i, max_reply_q_sz; |
| MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; |
| |
| /* |
| * ThunderBolt(TB) There's no longer producer consumer mechanism. |
| * Once we have an interrupt we are supposed to scan through the list of |
| * reply descriptors and process them accordingly. We would be needing |
| * to allocate memory for 1024 reply descriptors |
| */ |
| |
| /* Allocate Reply Descriptors */ |
| con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x", |
| (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION))); |
| |
| /* reply queue size should be multiple of 16 */ |
| max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16; |
| |
| reply_q_sz = 8 * max_reply_q_sz; |
| |
| |
| con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x", |
| (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION))); |
| |
| instance->reply_desc_dma_obj.size = reply_q_sz; |
| instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; |
| instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, "could not alloc reply queue"); |
| return (DDI_FAILURE); |
| } |
| |
| bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz); |
| instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED; |
| |
| /* virtual address of reply queue */ |
| instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)( |
| instance->reply_desc_dma_obj.buffer); |
| |
| instance->reply_q_depth = max_reply_q_sz; |
| |
| con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x", |
| instance->reply_q_depth)); |
| |
| con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p", |
| (void *)instance->reply_frame_pool)); |
| |
| /* initializing reply address to 0xFFFFFFFF */ |
| reply_desc = instance->reply_frame_pool; |
| |
| for (i = 0; i < instance->reply_q_depth; i++) { |
| reply_desc->Words = (uint64_t)~0; |
| reply_desc++; |
| } |
| |
| |
| instance->reply_frame_pool_phy = |
| (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address; |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy)); |
| |
| |
| instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy + |
| reply_q_sz); |
| |
| con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x", |
| instance->reply_pool_limit_phy)); |
| |
| |
| con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x", |
| (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION))); |
| |
| /* Allocate Request Descriptors */ |
| con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x", |
| (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION))); |
| |
| request_q_sz = 8 * |
| (instance->max_fw_cmds); |
| |
| instance->request_desc_dma_obj.size = request_q_sz; |
| instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = |
| 0xFFFFFFFFU; |
| instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "could not alloc request queue desc"); |
| goto fail_undo_reply_queue; |
| } |
| |
| bzero(instance->request_desc_dma_obj.buffer, request_q_sz); |
| instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED; |
| |
| /* virtual address of request queue desc */ |
| instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *) |
| (instance->request_desc_dma_obj.buffer); |
| |
| instance->request_message_pool_phy = |
| (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address; |
| |
| return (DDI_SUCCESS); |
| |
| fail_undo_reply_queue: |
| if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) { |
| (void) mrsas_free_dma_obj(instance, |
| instance->reply_desc_dma_obj); |
| instance->reply_desc_dma_obj.status = DMA_OBJ_FREED; |
| } |
| |
| return (DDI_FAILURE); |
| } |
| |
| /* |
| * mrsas_alloc_cmd_pool_tbolt |
| * |
| * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single |
| * routine |
| */ |
| int |
| mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance) |
| { |
| int i; |
| int count; |
| uint32_t max_cmd; |
| uint32_t reserve_cmd; |
| size_t sz; |
| |
| struct mrsas_cmd *cmd; |
| |
| max_cmd = instance->max_fw_cmds; |
| con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: " |
| "max_cmd %x", max_cmd)); |
| |
| |
| sz = sizeof (struct mrsas_cmd *) * max_cmd; |
| |
| /* |
| * instance->cmd_list is an array of struct mrsas_cmd pointers. |
| * Allocate the dynamic array first and then allocate individual |
| * commands. |
| */ |
| instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); |
| |
| /* create a frame pool and assign one frame to each cmd */ |
| for (count = 0; count < max_cmd; count++) { |
| instance->cmd_list[count] = |
| kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP); |
| } |
| |
| /* add all the commands to command pool */ |
| |
| INIT_LIST_HEAD(&instance->cmd_pool_list); |
| INIT_LIST_HEAD(&instance->cmd_pend_list); |
| INIT_LIST_HEAD(&instance->cmd_app_pool_list); |
| |
| reserve_cmd = MRSAS_APP_RESERVED_CMDS; |
| |
| /* cmd index 0 reservered for IOC INIT */ |
| for (i = 1; i < reserve_cmd; i++) { |
| cmd = instance->cmd_list[i]; |
| cmd->index = i; |
| mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list); |
| } |
| |
| |
| for (i = reserve_cmd; i < max_cmd; i++) { |
| cmd = instance->cmd_list[i]; |
| cmd->index = i; |
| mlist_add_tail(&cmd->list, &instance->cmd_pool_list); |
| } |
| |
| return (DDI_SUCCESS); |
| |
| mrsas_undo_cmds: |
| if (count > 0) { |
| /* free each cmd */ |
| for (i = 0; i < count; i++) { |
| if (instance->cmd_list[i] != NULL) { |
| kmem_free(instance->cmd_list[i], |
| sizeof (struct mrsas_cmd)); |
| } |
| instance->cmd_list[i] = NULL; |
| } |
| } |
| |
| mrsas_undo_cmd_list: |
| if (instance->cmd_list != NULL) |
| kmem_free(instance->cmd_list, sz); |
| instance->cmd_list = NULL; |
| |
| return (DDI_FAILURE); |
| } |
| |
| |
| /* |
| * free_space_for_mpi2 |
| */ |
| void |
| free_space_for_mpi2(struct mrsas_instance *instance) |
| { |
| /* already freed */ |
| if (instance->cmd_list == NULL) { |
| return; |
| } |
| |
| /* First free the additional DMA buffer */ |
| mrsas_tbolt_free_additional_dma_buffer(instance); |
| |
| /* Free the request/reply descriptor pool */ |
| free_req_rep_desc_pool(instance); |
| |
| /* Free the MPI message pool */ |
| destroy_mpi2_frame_pool(instance); |
| |
| /* Free the MFI frame pool */ |
| destroy_mfi_frame_pool(instance); |
| |
| /* Free all the commands in the cmd_list */ |
| /* Free the cmd_list buffer itself */ |
| mrsas_free_cmd_pool(instance); |
| } |
| |
| |
| /* |
| * ThunderBolt(TB) memory allocations for commands/messages/frames. |
| */ |
| int |
| alloc_space_for_mpi2(struct mrsas_instance *instance) |
| { |
| /* Allocate command pool (memory for cmd_list & individual commands) */ |
| if (mrsas_alloc_cmd_pool_tbolt(instance)) { |
| dev_err(instance->dip, CE_WARN, "Error creating cmd pool"); |
| return (DDI_FAILURE); |
| } |
| |
| /* Initialize single reply size and Message size */ |
| instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE; |
| instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE; |
| |
| instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE - |
| (sizeof (MPI2_RAID_SCSI_IO_REQUEST) - |
| sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION); |
| instance->max_sge_in_chain = (MR_COMMAND_SIZE - |
| MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION); |
| |
| /* Reduce SG count by 1 to take care of group cmds feature in FW */ |
| instance->max_num_sge = (instance->max_sge_in_main_msg + |
| instance->max_sge_in_chain - 2); |
| instance->chain_offset_mpt_msg = |
| offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16; |
| instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE - |
| sizeof (MPI2_SGE_IO_UNION)) / 16; |
| instance->reply_read_index = 0; |
| |
| |
| /* Allocate Request and Reply descriptors Array */ |
| /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */ |
| if (alloc_req_rep_desc(instance)) { |
| dev_err(instance->dip, CE_WARN, |
| "Error, allocating memory for descripter-pool"); |
| goto mpi2_undo_cmd_pool; |
| } |
| con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x", |
| instance->request_message_pool_phy)); |
| |
| |
| /* Allocate MFI Frame pool - for MPI-MFI passthru commands */ |
| if (create_mfi_frame_pool(instance)) { |
| dev_err(instance->dip, CE_WARN, |
| "Error, allocating memory for MFI frame-pool"); |
| goto mpi2_undo_descripter_pool; |
| } |
| |
| |
| /* Allocate MPI2 Message pool */ |
| /* |
| * Make sure the buffer is alligned to 256 for raid message packet |
| * create a io request pool and assign one frame to each cmd |
| */ |
| |
| if (create_mpi2_frame_pool(instance)) { |
| dev_err(instance->dip, CE_WARN, |
| "Error, allocating memory for MPI2 Message-pool"); |
| goto mpi2_undo_mfi_frame_pool; |
| } |
| |
| #ifdef DEBUG |
| con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x", |
| instance->max_sge_in_main_msg)); |
| con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x", |
| instance->max_sge_in_chain)); |
| con_log(CL_ANN1, (CE_CONT, |
| "[max_sge]0x%x", instance->max_num_sge)); |
| con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x", |
| instance->chain_offset_mpt_msg)); |
| con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x", |
| instance->chain_offset_io_req)); |
| #endif |
| |
| |
| /* Allocate additional dma buffer */ |
| if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) { |
| dev_err(instance->dip, CE_WARN, |
| "Error, allocating tbolt additional DMA buffer"); |
| goto mpi2_undo_message_pool; |
| } |
| |
| return (DDI_SUCCESS); |
| |
| mpi2_undo_message_pool: |
| destroy_mpi2_frame_pool(instance); |
| |
| mpi2_undo_mfi_frame_pool: |
| destroy_mfi_frame_pool(instance); |
| |
| mpi2_undo_descripter_pool: |
| free_req_rep_desc_pool(instance); |
| |
| mpi2_undo_cmd_pool: |
| mrsas_free_cmd_pool(instance); |
| |
| return (DDI_FAILURE); |
| } |
| |
| |
| /* |
| * mrsas_init_adapter_tbolt - Initialize fusion interface adapter. |
| */ |
| int |
| mrsas_init_adapter_tbolt(struct mrsas_instance *instance) |
| { |
| |
| /* |
| * Reduce the max supported cmds by 1. This is to ensure that the |
| * reply_q_sz (1 more than the max cmd that driver may send) |
| * does not exceed max cmds that the FW can support |
| */ |
| |
| if (instance->max_fw_cmds > 1008) { |
| instance->max_fw_cmds = 1008; |
| instance->max_fw_cmds = instance->max_fw_cmds-1; |
| } |
| |
| con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: " |
| "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds)); |
| |
| |
| /* create a pool of commands */ |
| if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) { |
| dev_err(instance->dip, CE_WARN, |
| "alloc_space_for_mpi2() failed."); |
| |
| return (DDI_FAILURE); |
| } |
| |
| /* Send ioc init message */ |
| /* NOTE: the issue_init call does FMA checking already. */ |
| if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) { |
| dev_err(instance->dip, CE_WARN, |
| "mrsas_issue_init_mpi2() failed."); |
| |
| goto fail_init_fusion; |
| } |
| |
| instance->unroll.alloc_space_mpi2 = 1; |
| |
| con_log(CL_ANN, (CE_NOTE, |
| "mrsas_init_adapter_tbolt: SUCCESSFUL")); |
| |
| return (DDI_SUCCESS); |
| |
| fail_init_fusion: |
| free_space_for_mpi2(instance); |
| |
| return (DDI_FAILURE); |
| } |
| |
| |
| |
| /* |
| * init_mpi2 |
| */ |
| int |
| mrsas_issue_init_mpi2(struct mrsas_instance *instance) |
| { |
| dma_obj_t init2_dma_obj; |
| int ret_val = DDI_SUCCESS; |
| |
| /* allocate DMA buffer for IOC INIT message */ |
| init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t); |
| init2_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; |
| init2_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| init2_dma_obj.dma_attr.dma_attr_align = 256; |
| |
| if (mrsas_alloc_dma_obj(instance, &init2_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 " |
| "could not allocate data transfer buffer."); |
| return (DDI_FAILURE); |
| } |
| (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t)); |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "mrsas_issue_init_mpi2 _phys adr: %x", |
| init2_dma_obj.dma_cookie[0].dmac_address)); |
| |
| |
| /* Initialize and send ioc init message */ |
| ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj); |
| if (ret_val == DDI_FAILURE) { |
| con_log(CL_ANN1, (CE_WARN, |
| "mrsas_issue_init_mpi2: Failed")); |
| goto fail_init_mpi2; |
| } |
| |
| /* free IOC init DMA buffer */ |
| if (mrsas_free_dma_obj(instance, init2_dma_obj) |
| != DDI_SUCCESS) { |
| con_log(CL_ANN1, (CE_WARN, |
| "mrsas_issue_init_mpi2: Free Failed")); |
| return (DDI_FAILURE); |
| } |
| |
| /* Get/Check and sync ld_map info */ |
| instance->map_id = 0; |
| if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS) |
| (void) mrsas_tbolt_sync_map_info(instance); |
| |
| |
| /* No mrsas_cmd to send, so send NULL. */ |
| if (mrsas_common_check(instance, NULL) != DDI_SUCCESS) |
| goto fail_init_mpi2; |
| |
| con_log(CL_ANN, (CE_NOTE, |
| "mrsas_issue_init_mpi2: SUCCESSFUL")); |
| |
| return (DDI_SUCCESS); |
| |
| fail_init_mpi2: |
| (void) mrsas_free_dma_obj(instance, init2_dma_obj); |
| |
| return (DDI_FAILURE); |
| } |
| |
| static int |
| mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj) |
| { |
| int numbytes; |
| uint16_t flags; |
| struct mrsas_init_frame2 *mfiFrameInit2; |
| struct mrsas_header *frame_hdr; |
| Mpi2IOCInitRequest_t *init; |
| struct mrsas_cmd *cmd = NULL; |
| struct mrsas_drv_ver drv_ver_info; |
| MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; |
| uint32_t timeout; |
| |
| con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); |
| |
| |
| #ifdef DEBUG |
| con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n", |
| (int)sizeof (*mfiFrameInit2))); |
| con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init))); |
| con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n", |
| (int)sizeof (struct mrsas_init_frame2))); |
| con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", |
| (int)sizeof (Mpi2IOCInitRequest_t))); |
| #endif |
| |
| init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer; |
| numbytes = sizeof (*init); |
| bzero(init, numbytes); |
| |
| ddi_put8(mpi2_dma_obj->acc_handle, &init->Function, |
| MPI2_FUNCTION_IOC_INIT); |
| |
| ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit, |
| MPI2_WHOINIT_HOST_DRIVER); |
| |
| /* set MsgVersion and HeaderVersion host driver was built with */ |
| ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion, |
| MPI2_VERSION); |
| |
| ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion, |
| MPI2_HEADER_VERSION); |
| |
| ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize, |
| instance->raid_io_msg_size / 4); |
| |
| ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth, |
| 0); |
| |
| ddi_put16(mpi2_dma_obj->acc_handle, |
| &init->ReplyDescriptorPostQueueDepth, |
| instance->reply_q_depth); |
| /* |
| * These addresses are set using the DMA cookie addresses from when the |
| * memory was allocated. Sense buffer hi address should be 0. |
| * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0); |
| */ |
| |
| ddi_put32(mpi2_dma_obj->acc_handle, |
| &init->SenseBufferAddressHigh, 0); |
| |
| ddi_put64(mpi2_dma_obj->acc_handle, |
| (uint64_t *)&init->SystemRequestFrameBaseAddress, |
| instance->io_request_frames_phy); |
| |
| ddi_put64(mpi2_dma_obj->acc_handle, |
| &init->ReplyDescriptorPostQueueAddress, |
| instance->reply_frame_pool_phy); |
| |
| ddi_put64(mpi2_dma_obj->acc_handle, |
| &init->ReplyFreeQueueAddress, 0); |
| |
| cmd = instance->cmd_list[0]; |
| if (cmd == NULL) { |
| return (DDI_FAILURE); |
| } |
| cmd->retry_count_for_ocr = 0; |
| cmd->pkt = NULL; |
| cmd->drv_pkt_time = 0; |
| |
| mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request; |
| con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2)); |
| |
| frame_hdr = &cmd->frame->hdr; |
| |
| ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, |
| MFI_CMD_STATUS_POLL_MODE); |
| |
| flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); |
| |
| flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; |
| |
| ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); |
| |
| con_log(CL_ANN, (CE_CONT, |
| "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID)); |
| |
| /* Init the MFI Header */ |
| ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, |
| &mfiFrameInit2->cmd, MFI_CMD_OP_INIT); |
| |
| con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd)); |
| |
| ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, |
| &mfiFrameInit2->cmd_status, |
| MFI_STAT_INVALID_STATUS); |
| |
| con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status)); |
| |
| ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle, |
| &mfiFrameInit2->queue_info_new_phys_addr_lo, |
| mpi2_dma_obj->dma_cookie[0].dmac_address); |
| |
| ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle, |
| &mfiFrameInit2->data_xfer_len, |
| sizeof (Mpi2IOCInitRequest_t)); |
| |
| con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x", |
| (int)init->ReplyDescriptorPostQueueAddress)); |
| |
| /* fill driver version information */ |
| fill_up_drv_ver(&drv_ver_info); |
| |
| /* allocate the driver version data transfer buffer */ |
| instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver); |
| instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr; |
| instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; |
| instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; |
| instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1; |
| instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1; |
| |
| if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj, |
| (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { |
| dev_err(instance->dip, CE_WARN, |
| "fusion init: Could not allocate driver version buffer."); |
| return (DDI_FAILURE); |
| } |
| /* copy driver version to dma buffer */ |
| bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver)); |
| ddi_rep_put8(cmd->frame_dma_obj.acc_handle, |
| (uint8_t *)drv_ver_info.drv_ver, |
| (uint8_t *)instance->drv_ver_dma_obj.buffer, |
| sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR); |
| |
| /* send driver version physical address to firmware */ |
| ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion, |
| instance->drv_ver_dma_obj.dma_cookie[0].dmac_address); |
| |
| con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x", |
| mfiFrameInit2->queue_info_new_phys_addr_lo, |
| (int)sizeof (Mpi2IOCInitRequest_t))); |
| |
| con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len)); |
| |
| con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x", |
| cmd->scsi_io_request_phys_addr, |
| (int)sizeof (struct mrsas_init_frame2))); |
| |
| /* disable interrupts before sending INIT2 frame */ |
| instance->func_ptr->disable_intr(instance); |
| |
| req_desc.Words = cmd->scsi_io_request_phys_addr; |
| req_desc.MFAIo.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| cmd->request_desc = &req_desc; |
| |
| /* issue the init frame */ |
| |
| mutex_enter(&instance->reg_write_mtx); |
| WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance); |
| WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance); |
| mutex_exit(&instance->reg_write_mtx); |
| |
| con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd)); |
| con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ", |
| frame_hdr->cmd_status)); |
| |
| timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC); |
| do { |
| if (ddi_get8(cmd->frame_dma_obj.acc_handle, |
| &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE) |
| break; |
| delay(1); |
| timeout--; |
| } while (timeout > 0); |
| |
| if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, |
| &mfiFrameInit2->cmd_status) == 0) { |
| con_log(CL_ANN, (CE_NOTE, "INIT2 Success")); |
| } else { |
| con_log(CL_ANN, (CE_WARN, "INIT2 Fail")); |
| mrsas_dump_reply_desc(instance); |
| goto fail_ioc_init; |
| } |
| |
| mrsas_dump_reply_desc(instance); |
| |
| instance->unroll.verBuff = 1; |
| |
| con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL")); |
| |
| return (DDI_SUCCESS); |
| |
| |
| fail_ioc_init: |
| |
| (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); |
| |
| return (DDI_FAILURE); |
| } |
| |
| int |
| wait_for_outstanding_poll_io(struct mrsas_instance *instance) |
| { |
| int i; |
| uint32_t wait_time = dump_io_wait_time; |
| for (i = 0; i < wait_time; i++) { |
| /* |
| * Check For Outstanding poll Commands |
| * except ldsync command and aen command |
| */ |
| if (instance->fw_outstanding <= 2) { |
| break; |
| } |
| drv_usecwait(MILLISEC); |
| /* complete commands from reply queue */ |
| (void) mr_sas_tbolt_process_outstanding_cmd(instance); |
| } |
| if (instance->fw_outstanding > 2) { |
| return (1); |
| } |
| return (0); |
| } |
| /* |
| * scsi_pkt handling |
| * |
| * Visible to the external world via the transport structure. |
| */ |
| |
| int |
| mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) |
| { |
| struct mrsas_instance *instance = ADDR2MR(ap); |
| struct scsa_cmd *acmd = PKT2CMD(pkt); |
| struct mrsas_cmd *cmd = NULL; |
| uchar_t cmd_done = 0; |
| |
| con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); |
| if (instance->deadadapter == 1) { |
| dev_err(instance->dip, CE_WARN, |
| "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR " |
| "for IO, as the HBA doesnt take any more IOs"); |
| if (pkt) { |
| pkt->pkt_reason = CMD_DEV_GONE; |
| pkt->pkt_statistics = STAT_DISCON; |
| } |
| return (TRAN_FATAL_ERROR); |
| } |
| if (instance->adapterresetinprogress) { |
| con_log(CL_ANN, (CE_NOTE, "Reset flag set, " |
| "returning mfi_pkt and setting TRAN_BUSY\n")); |
| return (TRAN_BUSY); |
| } |
| (void) mrsas_tbolt_prepare_pkt(acmd); |
| |
| cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done); |
| |
| /* |
| * Check if the command is already completed by the mrsas_build_cmd() |
| * routine. In which case the busy_flag would be clear and scb will be |
| * NULL and appropriate reason provided in pkt_reason field |
| */ |
| if (cmd_done) { |
| pkt->pkt_reason = CMD_CMPLT; |
| pkt->pkt_scbp[0] = STATUS_GOOD; |
| pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
| | STATE_SENT_CMD; |
| if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { |
| (*pkt->pkt_comp)(pkt); |
| } |
| |
| return (TRAN_ACCEPT); |
| } |
| |
| if (cmd == NULL) { |
| return (TRAN_BUSY); |
| } |
| |
| |
| if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { |
| if (instance->fw_outstanding > instance->max_fw_cmds) { |
| dev_err(instance->dip, CE_WARN, |
| "Command Queue Full... Returning BUSY"); |
| DTRACE_PROBE2(tbolt_start_tran_err, |
| uint16_t, instance->fw_outstanding, |
| uint16_t, instance->max_fw_cmds); |
| return_raid_msg_pkt(instance, cmd); |
| return (TRAN_BUSY); |
| } |
| |
| /* Synchronize the Cmd frame for the controller */ |
| (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, |
| DDI_DMA_SYNC_FORDEV); |
| |
| con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x " |
| "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0], |
| cmd->index, cmd->SMID)); |
| |
| instance->func_ptr->issue_cmd(cmd, instance); |
| } else { |
| instance->func_ptr->issue_cmd(cmd, instance); |
| (void) wait_for_outstanding_poll_io(instance); |
| (void) mrsas_common_check(instance, cmd); |
| DTRACE_PROBE2(tbolt_start_nointr_done, |
| uint8_t, cmd->frame->hdr.cmd, |
| uint8_t, cmd->frame->hdr.cmd_status); |
| } |
| |
| return (TRAN_ACCEPT); |
| } |
| |
| /* |
| * prepare the pkt: |
| * the pkt may have been resubmitted or just reused so |
| * initialize some fields and do some checks. |
| */ |
| static int |
| mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd) |
| { |
| struct scsi_pkt *pkt = CMD2PKT(acmd); |
| |
| |
| /* |
| * Reinitialize some fields that need it; the packet may |
| * have been resubmitted |
| */ |
| pkt->pkt_reason = CMD_CMPLT; |
| pkt->pkt_state = 0; |
| pkt->pkt_statistics = 0; |
| pkt->pkt_resid = 0; |
| |
| /* |
| * zero status byte. |
| */ |
| *(pkt->pkt_scbp) = 0; |
| |
| return (0); |
| } |
| |
| |
| int |
| mr_sas_tbolt_build_sgl(struct mrsas_instance *instance, |
| struct scsa_cmd *acmd, |
| struct mrsas_cmd *cmd, |
| Mpi2RaidSCSIIORequest_t *scsi_raid_io, |
| uint32_t *datalen) |
| { |
| uint32_t MaxSGEs; |
| int sg_to_process; |
| uint32_t i, j; |
| uint32_t numElements, endElement; |
| Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL; |
| Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL; |
| ddi_acc_handle_t acc_handle = |
| instance->mpi2_frame_pool_dma_obj.acc_handle; |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "chkpnt: Building Chained SGL :%d", __LINE__)); |
| |
| /* Calulate SGE size in number of Words(32bit) */ |
| /* Clear the datalen before updating it. */ |
| *datalen = 0; |
| |
| MaxSGEs = instance->max_sge_in_main_msg; |
| |
| ddi_put16(acc_handle, &scsi_raid_io->SGLFlags, |
| MPI2_SGE_FLAGS_64_BIT_ADDRESSING); |
| |
| /* set data transfer flag. */ |
| if (acmd->cmd_flags & CFLAG_DMASEND) { |
| ddi_put32(acc_handle, &scsi_raid_io->Control, |
| MPI2_SCSIIO_CONTROL_WRITE); |
| } else { |
| ddi_put32(acc_handle, &scsi_raid_io->Control, |
| MPI2_SCSIIO_CONTROL_READ); |
| } |
| |
| |
| numElements = acmd->cmd_cookiecnt; |
| |
| con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements)); |
| |
| if (numElements > instance->max_num_sge) { |
| con_log(CL_ANN, (CE_NOTE, |
| "[Max SGE Count Exceeded]:%x", numElements)); |
| return (numElements); |
| } |
| |
| ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE, |
| (uint8_t)numElements); |
| |
| /* set end element in main message frame */ |
| endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1); |
| |
| /* prepare the scatter-gather list for the firmware */ |
| scsi_raid_io_sgl_ieee = |
| (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain; |
| |
| if (instance->gen3) { |
| Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee; |
| sgl_ptr_end += instance->max_sge_in_main_msg - 1; |
| |
| ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0); |
| } |
| |
| for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) { |
| ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, |
| acmd->cmd_dmacookies[i].dmac_laddress); |
| |
| ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, |
| acmd->cmd_dmacookies[i].dmac_size); |
| |
| ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0); |
| |
| if (instance->gen3) { |
| if (i == (numElements - 1)) { |
| ddi_put8(acc_handle, |
| &scsi_raid_io_sgl_ieee->Flags, |
| IEEE_SGE_FLAGS_END_OF_LIST); |
| } |
| } |
| |
| *datalen += acmd->cmd_dmacookies[i].dmac_size; |
| |
| #ifdef DEBUG |
| con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64, |
| scsi_raid_io_sgl_ieee->Address)); |
| con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x", |
| scsi_raid_io_sgl_ieee->Length)); |
| con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x", |
| scsi_raid_io_sgl_ieee->Flags)); |
| #endif |
| |
| } |
| |
| ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0); |
| |
| /* check if chained SGL required */ |
| if (i < numElements) { |
| |
| con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i)); |
| |
| if (instance->gen3) { |
| uint16_t ioFlags = |
| ddi_get16(acc_handle, &scsi_raid_io->IoFlags); |
| |
| if ((ioFlags & |
| MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != |
| MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) { |
| ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, |
| (U8)instance->chain_offset_io_req); |
| } else { |
| ddi_put8(acc_handle, |
| &scsi_raid_io->ChainOffset, 0); |
| } |
| } else { |
| ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, |
| (U8)instance->chain_offset_io_req); |
| } |
| |
| /* prepare physical chain element */ |
| ieeeChainElement = scsi_raid_io_sgl_ieee; |
| |
| ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0); |
| |
| if (instance->gen3) { |
| ddi_put8(acc_handle, &ieeeChainElement->Flags, |
| IEEE_SGE_FLAGS_CHAIN_ELEMENT); |
| } else { |
| ddi_put8(acc_handle, &ieeeChainElement->Flags, |
| (IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR)); |
| } |
| |
| ddi_put32(acc_handle, &ieeeChainElement->Length, |
| (sizeof (MPI2_SGE_IO_UNION) * (numElements - i))); |
| |
| ddi_put64(acc_handle, &ieeeChainElement->Address, |
| (U64)cmd->sgl_phys_addr); |
| |
| sg_to_process = numElements - i; |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "[Additional SGE Count]:%x", endElement)); |
| |
| /* point to the chained SGL buffer */ |
| scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl; |
| |
| /* build rest of the SGL in chained buffer */ |
| for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) { |
| con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i)); |
| |
| ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, |
| acmd->cmd_dmacookies[i].dmac_laddress); |
| |
| ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, |
| acmd->cmd_dmacookies[i].dmac_size); |
| |
| ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0); |
| |
| if (instance->gen3) { |
| if (i == (numElements - 1)) { |
| ddi_put8(acc_handle, |
| &scsi_raid_io_sgl_ieee->Flags, |
| IEEE_SGE_FLAGS_END_OF_LIST); |
| } |
| } |
| |
| *datalen += acmd->cmd_dmacookies[i].dmac_size; |
| |
| #if DEBUG |
| con_log(CL_DLEVEL1, (CE_NOTE, |
| "[SGL Address]: %" PRIx64, |
| scsi_raid_io_sgl_ieee->Address)); |
| con_log(CL_DLEVEL1, (CE_NOTE, |
| "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length)); |
| con_log(CL_DLEVEL1, (CE_NOTE, |
| "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags)); |
| #endif |
| |
| i++; |
| } |
| } |
| |
| return (0); |
| } /*end of BuildScatterGather */ |
| |
| |
| /* |
| * build_cmd |
| */ |
| static struct mrsas_cmd * |
| mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, |
| struct scsi_pkt *pkt, uchar_t *cmd_done) |
| { |
| uint8_t fp_possible = 0; |
| uint32_t index; |
| uint32_t lba_count = 0; |
| uint32_t start_lba_hi = 0; |
| uint32_t start_lba_lo = 0; |
| ddi_acc_handle_t acc_handle = |
| instance->mpi2_frame_pool_dma_obj.acc_handle; |
| struct mrsas_cmd *cmd = NULL; |
| struct scsa_cmd *acmd = PKT2CMD(pkt); |
| MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion; |
| Mpi2RaidSCSIIORequest_t *scsi_raid_io; |
| uint32_t datalen; |
| struct IO_REQUEST_INFO io_info; |
| MR_FW_RAID_MAP_ALL *local_map_ptr; |
| uint16_t pd_cmd_cdblen; |
| |
| con_log(CL_DLEVEL1, (CE_NOTE, |
| "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__)); |
| |
| /* find out if this is logical or physical drive command. */ |
| acmd->islogical = MRDRV_IS_LOGICAL(ap); |
| acmd->device_id = MAP_DEVICE_ID(instance, ap); |
| |
| *cmd_done = 0; |
| |
| /* get the command packet */ |
| if (!(cmd = get_raid_msg_pkt(instance))) { |
| DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t, |
| instance->fw_outstanding, uint16_t, instance->max_fw_cmds); |
| return (NULL); |
| } |
| |
| index = cmd->index; |
| ReqDescUnion = mr_sas_get_request_descriptor(instance, index); |
| ReqDescUnion->Words = 0; |
| ReqDescUnion->SCSIIO.SMID = cmd->SMID; |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_LD_IO << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| |
| cmd->request_desc = ReqDescUnion; |
| cmd->pkt = pkt; |
| cmd->cmd = acmd; |
| |
| DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0], |
| ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len, |
| uint16_t, acmd->device_id); |
| |
| /* lets get the command directions */ |
| if (acmd->cmd_flags & CFLAG_DMASEND) { |
| if (acmd->cmd_flags & CFLAG_CONSISTENT) { |
| (void) ddi_dma_sync(acmd->cmd_dmahandle, |
| acmd->cmd_dma_offset, acmd->cmd_dma_len, |
| DDI_DMA_SYNC_FORDEV); |
| } |
| } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { |
| if (acmd->cmd_flags & CFLAG_CONSISTENT) { |
| (void) ddi_dma_sync(acmd->cmd_dmahandle, |
| acmd->cmd_dma_offset, acmd->cmd_dma_len, |
| DDI_DMA_SYNC_FORCPU); |
| } |
| } else { |
| con_log(CL_ANN, (CE_NOTE, "NO DMA")); |
| } |
| |
| |
| /* get SCSI_IO raid message frame pointer */ |
| scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; |
| |
| /* zero out SCSI_IO raid message frame */ |
| bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t)); |
| |
| /* Set the ldTargetId set by BuildRaidContext() */ |
| ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId, |
| acmd->device_id); |
| |
| /* Copy CDB to scsi_io_request message frame */ |
| ddi_rep_put8(acc_handle, |
| (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32, |
| acmd->cmd_cdblen, DDI_DEV_AUTOINCR); |
| |
| /* |
| * Just the CDB length, rest of the Flags are zero |
| * This will be modified later. |
| */ |
| ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen); |
| |
| pd_cmd_cdblen = acmd->cmd_cdblen; |
| |
| if (acmd->islogical) { |
| |
| switch (pkt->pkt_cdbp[0]) { |
| case SCMD_READ: |
| case SCMD_WRITE: |
| case SCMD_READ_G1: |
| case SCMD_WRITE_G1: |
| case SCMD_READ_G4: |
| case SCMD_WRITE_G4: |
| case SCMD_READ_G5: |
| case SCMD_WRITE_G5: |
| |
| /* Initialize sense Information */ |
| if (cmd->sense1 == NULL) { |
| con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: " |
| "Sense buffer ptr NULL ")); |
| } |
| bzero(cmd->sense1, SENSE_LENGTH); |
| con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd " |
| "CDB[0] = %x\n", pkt->pkt_cdbp[0])); |
| |
| if (acmd->cmd_cdblen == CDB_GROUP0) { |
| /* 6-byte cdb */ |
| lba_count = (uint16_t)(pkt->pkt_cdbp[4]); |
| start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) | |
| ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | |
| ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) |
| << 16)); |
| } else if (acmd->cmd_cdblen == CDB_GROUP1) { |
| /* 10-byte cdb */ |
| lba_count = |
| (((uint16_t)(pkt->pkt_cdbp[8])) | |
| ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); |
| |
| start_lba_lo = |
| (((uint32_t)(pkt->pkt_cdbp[5])) | |
| ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); |
| |
| } else if (acmd->cmd_cdblen == CDB_GROUP5) { |
| /* 12-byte cdb */ |
| lba_count = ( |
| ((uint32_t)(pkt->pkt_cdbp[9])) | |
| ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); |
| |
| start_lba_lo = |
| (((uint32_t)(pkt->pkt_cdbp[5])) | |
| ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); |
| |
| } else if (acmd->cmd_cdblen == CDB_GROUP4) { |
| /* 16-byte cdb */ |
| lba_count = ( |
| ((uint32_t)(pkt->pkt_cdbp[13])) | |
| ((uint32_t)(pkt->pkt_cdbp[12]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[11]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[10]) << 24)); |
| |
| start_lba_lo = ( |
| ((uint32_t)(pkt->pkt_cdbp[9])) | |
| ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); |
| |
| start_lba_hi = ( |
| ((uint32_t)(pkt->pkt_cdbp[5])) | |
| ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | |
| ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | |
| ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); |
| } |
| |
| if (instance->tbolt && |
| ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) { |
| dev_err(instance->dip, CE_WARN, |
| "IO SECTOR COUNT exceeds " |
| "controller limit 0x%x sectors", |
| lba_count); |
| } |
| |
| bzero(&io_info, sizeof (struct IO_REQUEST_INFO)); |
| io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | |
| start_lba_lo; |
| io_info.numBlocks = lba_count; |
| io_info.ldTgtId = acmd->device_id; |
| |
| if (acmd->cmd_flags & CFLAG_DMASEND) |
| io_info.isRead = 0; |
| else |
| io_info.isRead = 1; |
| |
| |
| /* Acquire SYNC MAP UPDATE lock */ |
| mutex_enter(&instance->sync_map_mtx); |
| |
| local_map_ptr = |
| instance->ld_map[(instance->map_id & 1)]; |
| |
| if ((MR_TargetIdToLdGet( |
| acmd->device_id, local_map_ptr) >= |
| MAX_LOGICAL_DRIVES) || !instance->fast_path_io) { |
| dev_err(instance->dip, CE_NOTE, |
| "Fast Path NOT Possible, " |
| "targetId >= MAX_LOGICAL_DRIVES || " |
| "!instance->fast_path_io"); |
| fp_possible = 0; |
| /* Set Regionlock flags to BYPASS */ |
| /* io_request->RaidContext.regLockFlags = 0; */ |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags, 0); |
| } else { |
| if (MR_BuildRaidContext(instance, &io_info, |
| &scsi_raid_io->RaidContext, local_map_ptr)) |
| fp_possible = io_info.fpOkForIo; |
| } |
| |
| if (!enable_fp) |
| fp_possible = 0; |
| |
| con_log(CL_ANN1, (CE_NOTE, "enable_fp %d " |
| "instance->fast_path_io %d fp_possible %d", |
| enable_fp, instance->fast_path_io, fp_possible)); |
| |
| if (fp_possible) { |
| |
| /* Check for DIF enabled LD */ |
| if (MR_CheckDIF(acmd->device_id, local_map_ptr)) { |
| /* Prepare 32 Byte CDB for DIF capable Disk */ |
| mrsas_tbolt_prepare_cdb(instance, |
| scsi_raid_io->CDB.CDB32, |
| &io_info, scsi_raid_io, start_lba_lo); |
| } else { |
| mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32, |
| sizeof (scsi_raid_io->CDB.CDB32), |
| (uint8_t *)&pd_cmd_cdblen, |
| io_info.pdBlock, io_info.numBlocks); |
| ddi_put16(acc_handle, |
| &scsi_raid_io->IoFlags, pd_cmd_cdblen); |
| } |
| |
| ddi_put8(acc_handle, &scsi_raid_io->Function, |
| MPI2_FUNCTION_SCSI_IO_REQUEST); |
| |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| if (instance->gen3) { |
| uint8_t regLockFlags = ddi_get8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags); |
| uint16_t IoFlags = ddi_get16(acc_handle, |
| &scsi_raid_io->IoFlags); |
| |
| if (regLockFlags == REGION_TYPE_UNUSED) |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| IoFlags |= |
| MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; |
| regLockFlags |= |
| (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | |
| MR_RL_FLAGS_SEQ_NUM_ENABLE); |
| |
| ddi_put8(acc_handle, |
| &scsi_raid_io->ChainOffset, 0); |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.nsegType, |
| ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | |
| MPI2_TYPE_CUDA)); |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags, |
| regLockFlags); |
| ddi_put16(acc_handle, |
| &scsi_raid_io->IoFlags, IoFlags); |
| } |
| |
| if ((instance->load_balance_info[ |
| acmd->device_id].loadBalanceFlag) && |
| (io_info.isRead)) { |
| io_info.devHandle = |
| get_updated_dev_handle(&instance-> |
| load_balance_info[acmd->device_id], |
| &io_info); |
| cmd->load_balance_flag |= |
| MEGASAS_LOAD_BALANCE_FLAG; |
| } else { |
| cmd->load_balance_flag &= |
| ~MEGASAS_LOAD_BALANCE_FLAG; |
| } |
| |
| ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle; |
| ddi_put16(acc_handle, &scsi_raid_io->DevHandle, |
| io_info.devHandle); |
| |
| } else { /* FP Not Possible */ |
| |
| ddi_put8(acc_handle, &scsi_raid_io->Function, |
| MPI2_FUNCTION_LD_IO_REQUEST); |
| |
| ddi_put16(acc_handle, |
| &scsi_raid_io->DevHandle, acmd->device_id); |
| |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_LD_IO << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| ddi_put16(acc_handle, |
| &scsi_raid_io->RaidContext.timeoutValue, |
| local_map_ptr->raidMap.fpPdIoTimeoutSec); |
| |
| if (instance->gen3) { |
| uint8_t regLockFlags = ddi_get8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags); |
| |
| if (regLockFlags == REGION_TYPE_UNUSED) { |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| } |
| |
| regLockFlags |= |
| (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | |
| MR_RL_FLAGS_SEQ_NUM_ENABLE); |
| |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.nsegType, |
| ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | |
| MPI2_TYPE_CUDA)); |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags, |
| regLockFlags); |
| } |
| } /* Not FP */ |
| |
| /* Release SYNC MAP UPDATE lock */ |
| mutex_exit(&instance->sync_map_mtx); |
| |
| break; |
| |
| case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */ |
| return_raid_msg_pkt(instance, cmd); |
| *cmd_done = 1; |
| return (NULL); |
| } |
| |
| case SCMD_MODE_SENSE: |
| case SCMD_MODE_SENSE_G1: { |
| union scsi_cdb *cdbp; |
| uint16_t page_code; |
| |
| cdbp = (void *)pkt->pkt_cdbp; |
| page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; |
| switch (page_code) { |
| case 0x3: |
| case 0x4: |
| (void) mrsas_mode_sense_build(pkt); |
| return_raid_msg_pkt(instance, cmd); |
| *cmd_done = 1; |
| return (NULL); |
| } |
| return (cmd); |
| } |
| |
| default: |
| /* Pass-through command to logical drive */ |
| ddi_put8(acc_handle, &scsi_raid_io->Function, |
| MPI2_FUNCTION_LD_IO_REQUEST); |
| ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun); |
| ddi_put16(acc_handle, &scsi_raid_io->DevHandle, |
| acmd->device_id); |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| break; |
| } |
| } else { /* Physical */ |
| /* Pass-through command to physical drive */ |
| |
| /* Acquire SYNC MAP UPDATE lock */ |
| mutex_enter(&instance->sync_map_mtx); |
| |
| local_map_ptr = instance->ld_map[instance->map_id & 1]; |
| |
| ddi_put8(acc_handle, &scsi_raid_io->Function, |
| MPI2_FUNCTION_SCSI_IO_REQUEST); |
| |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| ddi_put16(acc_handle, &scsi_raid_io->DevHandle, |
| local_map_ptr->raidMap. |
| devHndlInfo[acmd->device_id].curDevHdl); |
| |
| /* Set regLockFlasgs to REGION_TYPE_BYPASS */ |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.regLockFlags, 0); |
| ddi_put64(acc_handle, |
| &scsi_raid_io->RaidContext.regLockRowLBA, 0); |
| ddi_put32(acc_handle, |
| &scsi_raid_io->RaidContext.regLockLength, 0); |
| ddi_put8(acc_handle, |
| &scsi_raid_io->RaidContext.RAIDFlags, |
| MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << |
| MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); |
| ddi_put16(acc_handle, |
| &scsi_raid_io->RaidContext.timeoutValue, |
| local_map_ptr->raidMap.fpPdIoTimeoutSec); |
| ddi_put16(acc_handle, |
| &scsi_raid_io->RaidContext.ldTargetId, |
| acmd->device_id); |
| ddi_put8(acc_handle, |
| &scsi_raid_io->LUN[1], acmd->lun); |
| |
| if (instance->fast_path_io && instance->gen3) { |
| uint16_t IoFlags = ddi_get16(acc_handle, |
| &scsi_raid_io->IoFlags); |
| IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; |
| ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags); |
| } |
| ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle, |
| local_map_ptr->raidMap. |
| devHndlInfo[acmd->device_id].curDevHdl); |
| |
| /* Release SYNC MAP UPDATE lock */ |
| mutex_exit(&instance->sync_map_mtx); |
| } |
| |
| /* Set sense buffer physical address/length in scsi_io_request. */ |
| ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress, |
| cmd->sense_phys_addr1); |
| ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH); |
| |
| /* Construct SGL */ |
| ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0, |
| offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4); |
| |
| (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd, |
| scsi_raid_io, &datalen); |
| |
| ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen); |
| |
| con_log(CL_ANN, (CE_CONT, |
| "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n", |
| pkt->pkt_cdbp[0], acmd->device_id)); |
| con_log(CL_DLEVEL1, (CE_CONT, |
| "data length = %x\n", |
| scsi_raid_io->DataLength)); |
| con_log(CL_DLEVEL1, (CE_CONT, |
| "cdb length = %x\n", |
| acmd->cmd_cdblen)); |
| |
| return (cmd); |
| } |
| |
| uint32_t |
| tbolt_read_fw_status_reg(struct mrsas_instance *instance) |
| { |
| return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance)); |
| } |
| |
| void |
| tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance) |
| { |
| MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; |
| atomic_inc_16(&instance->fw_outstanding); |
| |
| struct scsi_pkt *pkt; |
| |
| con_log(CL_ANN1, |
| (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID)); |
| |
| con_log(CL_DLEVEL1, (CE_CONT, |
| " [req desc Words] %" PRIx64 " \n", req_desc->Words)); |
| con_log(CL_DLEVEL1, (CE_CONT, |
| " [req desc low part] %x \n", |
| (uint_t)(req_desc->Words & 0xffffffffff))); |
| con_log(CL_DLEVEL1, (CE_CONT, |
| " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32))); |
| pkt = cmd->pkt; |
| |
| if (pkt) { |
| con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:" |
| "ISSUED CMD TO FW : called : cmd:" |
| ": %p instance : %p pkt : %p pkt_time : %x\n", |
| gethrtime(), (void *)cmd, (void *)instance, |
| (void *)pkt, cmd->drv_pkt_time)); |
| if (instance->adapterresetinprogress) { |
| cmd->drv_pkt_time = (uint16_t)debug_timeout_g; |
| con_log(CL_ANN, (CE_NOTE, |
| "TBOLT Reset the scsi_pkt timer")); |
| } else { |
| push_pending_mfi_pkt(instance, cmd); |
| } |
| |
| } else { |
| con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:" |
| "ISSUED CMD TO FW : called : cmd : %p, instance: %p" |
| "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); |
| } |
| |
| /* Issue the command to the FW */ |
| mutex_enter(&instance->reg_write_mtx); |
| WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); |
| WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); |
| mutex_exit(&instance->reg_write_mtx); |
| } |
| |
| /* |
| * issue_cmd_in_sync_mode |
| */ |
| int |
| tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance, |
| struct mrsas_cmd *cmd) |
| { |
| int i; |
| uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; |
| MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; |
| |
| struct mrsas_header *hdr; |
| hdr = (struct mrsas_header *)&cmd->frame->hdr; |
| |
| con_log(CL_ANN, |
| (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", |
| cmd->SMID)); |
| |
| |
| if (instance->adapterresetinprogress) { |
| cmd->drv_pkt_time = ddi_get16 |
| (cmd->frame_dma_obj.acc_handle, &hdr->timeout); |
| if (cmd->drv_pkt_time < debug_timeout_g) |
| cmd->drv_pkt_time = (uint16_t)debug_timeout_g; |
| con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:" |
| "RESET-IN-PROGRESS, issue cmd & return.")); |
| |
| mutex_enter(&instance->reg_write_mtx); |
| WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); |
| WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); |
| mutex_exit(&instance->reg_write_mtx); |
| |
| return (DDI_SUCCESS); |
| } else { |
| con_log(CL_ANN1, (CE_NOTE, |
| "tbolt_issue_cmd_in_sync_mode: pushing the pkt")); |
| push_pending_mfi_pkt(instance, cmd); |
| } |
| |
| con_log(CL_DLEVEL2, (CE_NOTE, |
| "HighQport offset :%p", |
| (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT))); |
| con_log(CL_DLEVEL2, (CE_NOTE, |
| "LowQport offset :%p", |
| (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT))); |
| |
| cmd->sync_cmd = MRSAS_TRUE; |
| cmd->cmd_status = ENODATA; |
| |
| |
| mutex_enter(&instance->reg_write_mtx); |
| WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); |
| WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); |
| mutex_exit(&instance->reg_write_mtx); |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| " req desc high part %x", (uint_t)(req_desc->Words >> 32))); |
| con_log(CL_ANN1, (CE_NOTE, " req desc low part %x", |
| (uint_t)(req_desc->Words & 0xffffffff))); |
| |
| mutex_enter(&instance->int_cmd_mtx); |
| for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { |
| cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); |
| } |
| mutex_exit(&instance->int_cmd_mtx); |
| |
| |
| if (i < (msecs -1)) { |
| return (DDI_SUCCESS); |
| } else { |
| return (DDI_FAILURE); |
| } |
| } |
| |
| /* |
| * issue_cmd_in_poll_mode |
| */ |
| int |
| tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance, |
| struct mrsas_cmd *cmd) |
| { |
| int i; |
| uint16_t flags; |
| uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; |
| struct mrsas_header *frame_hdr; |
| |
| con_log(CL_ANN, |
| (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", |
| cmd->SMID)); |
| |
| MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc; |
| |
| frame_hdr = (struct mrsas_header *)&cmd->frame->hdr; |
| ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, |
| MFI_CMD_STATUS_POLL_MODE); |
| flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); |
| flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; |
| ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); |
| |
| con_log(CL_ANN1, (CE_NOTE, " req desc low part %x", |
| (uint_t)(req_desc->Words & 0xffffffff))); |
| con_log(CL_ANN1, (CE_NOTE, |
| " req desc high part %x", (uint_t)(req_desc->Words >> 32))); |
| |
| /* issue the frame using inbound queue port */ |
| mutex_enter(&instance->reg_write_mtx); |
| WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance); |
| WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance); |
| mutex_exit(&instance->reg_write_mtx); |
| |
| for (i = 0; i < msecs && ( |
| ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) |
| == MFI_CMD_STATUS_POLL_MODE); i++) { |
| /* wait for cmd_status to change from 0xFF */ |
| drv_usecwait(MILLISEC); /* wait for 1000 usecs */ |
| } |
| |
| DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i); |
| |
| if (ddi_get8(cmd->frame_dma_obj.acc_handle, |
| &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) { |
| con_log(CL_ANN1, (CE_NOTE, |
| " cmd failed %" PRIx64, (req_desc->Words))); |
| return (DDI_FAILURE); |
| } |
| |
| return (DDI_SUCCESS); |
| } |
| |
| void |
| tbolt_enable_intr(struct mrsas_instance *instance) |
| { |
| /* TODO: For Thunderbolt/Invader also clear intr on enable */ |
| /* writel(~0, ®s->outbound_intr_status); */ |
| /* readl(®s->outbound_intr_status); */ |
| |
| WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance); |
| |
| /* dummy read to force PCI flush */ |
| (void) RD_OB_INTR_MASK(instance); |
| |
| } |
| |
| void |
| tbolt_disable_intr(struct mrsas_instance *instance) |
| { |
| uint32_t mask = 0xFFFFFFFF; |
| |
| WR_OB_INTR_MASK(mask, instance); |
| |
| /* Dummy readl to force pci flush */ |
| |
| (void) RD_OB_INTR_MASK(instance); |
| } |
| |
| |
| int |
| tbolt_intr_ack(struct mrsas_instance *instance) |
| { |
| uint32_t status; |
| |
| /* check if it is our interrupt */ |
| status = RD_OB_INTR_STATUS(instance); |
| con_log(CL_ANN1, (CE_NOTE, |
| "chkpnt: Entered tbolt_intr_ack status = %d", status)); |
| |
| if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) { |
| return (DDI_INTR_UNCLAIMED); |
| } |
| |
| if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { |
| ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); |
| return (DDI_INTR_UNCLAIMED); |
| } |
| |
| if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) { |
| /* clear the interrupt by writing back the same value */ |
| WR_OB_INTR_STATUS(status, instance); |
| /* dummy READ */ |
| (void) RD_OB_INTR_STATUS(instance); |
| } |
| return (DDI_INTR_CLAIMED); |
| } |
| |
| /* |
| * get_raid_msg_pkt : Get a command from the free pool |
| * After successful allocation, the caller of this routine |
| * must clear the frame buffer (memset to zero) before |
| * using the packet further. |
| * |
| * ***** Note ***** |
| * After clearing the frame buffer the context id of the |
| * frame buffer SHOULD be restored back. |
| */ |
| |
| struct mrsas_cmd * |
| get_raid_msg_pkt(struct mrsas_instance *instance) |
| { |
| mlist_t *head = &instance->cmd_pool_list; |
| struct mrsas_cmd *cmd = NULL; |
| |
| mutex_enter(&instance->cmd_pool_mtx); |
| ASSERT(mutex_owned(&instance->cmd_pool_mtx)); |
| |
| |
| if (!mlist_empty(head)) { |
| cmd = mlist_entry(head->next, struct mrsas_cmd, list); |
| mlist_del_init(head->next); |
| } |
| if (cmd != NULL) { |
| cmd->pkt = NULL; |
| cmd->retry_count_for_ocr = 0; |
| cmd->drv_pkt_time = 0; |
| } |
| mutex_exit(&instance->cmd_pool_mtx); |
| |
| if (cmd != NULL) |
| bzero(cmd->scsi_io_request, |
| sizeof (Mpi2RaidSCSIIORequest_t)); |
| return (cmd); |
| } |
| |
| struct mrsas_cmd * |
| get_raid_msg_mfi_pkt(struct mrsas_instance *instance) |
| { |
| mlist_t *head = &instance->cmd_app_pool_list; |
| struct mrsas_cmd *cmd = NULL; |
| |
| mutex_enter(&instance->cmd_app_pool_mtx); |
| ASSERT(mutex_owned(&instance->cmd_app_pool_mtx)); |
| |
| if (!mlist_empty(head)) { |
| cmd = mlist_entry(head->next, struct mrsas_cmd, list); |
| mlist_del_init(head->next); |
| } |
| if (cmd != NULL) { |
| cmd->retry_count_for_ocr = 0; |
| cmd->drv_pkt_time = 0; |
| cmd->pkt = NULL; |
| cmd->request_desc = NULL; |
| |
| } |
| |
| mutex_exit(&instance->cmd_app_pool_mtx); |
| |
| if (cmd != NULL) { |
| bzero(cmd->scsi_io_request, |
| sizeof (Mpi2RaidSCSIIORequest_t)); |
| } |
| |
| return (cmd); |
| } |
| |
| /* |
| * return_raid_msg_pkt : Return a cmd to free command pool |
| */ |
| void |
| return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) |
| { |
| mutex_enter(&instance->cmd_pool_mtx); |
| ASSERT(mutex_owned(&instance->cmd_pool_mtx)); |
| |
| |
| mlist_add_tail(&cmd->list, &instance->cmd_pool_list); |
| |
| mutex_exit(&instance->cmd_pool_mtx); |
| } |
| |
| void |
| return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) |
| { |
| mutex_enter(&instance->cmd_app_pool_mtx); |
| ASSERT(mutex_owned(&instance->cmd_app_pool_mtx)); |
| |
| mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list); |
| |
| mutex_exit(&instance->cmd_app_pool_mtx); |
| } |
| |
| |
| void |
| mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance, |
| struct mrsas_cmd *cmd) |
| { |
| Mpi2RaidSCSIIORequest_t *scsi_raid_io; |
| Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee; |
| MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion; |
| uint32_t index; |
| ddi_acc_handle_t acc_handle = |
| instance->mpi2_frame_pool_dma_obj.acc_handle; |
| |
| if (!instance->tbolt) { |
| con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.")); |
| return; |
| } |
| |
| index = cmd->index; |
| |
| ReqDescUnion = mr_sas_get_request_descriptor(instance, index); |
| |
| if (!ReqDescUnion) { |
| con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]")); |
| return; |
| } |
| |
| con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID)); |
| |
| ReqDescUnion->Words = 0; |
| |
| ReqDescUnion->SCSIIO.RequestFlags = |
| (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
| MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| |
| ReqDescUnion->SCSIIO.SMID = cmd->SMID; |
| |
| cmd->request_desc = ReqDescUnion; |
| |
| /* get raid message frame pointer */ |
| scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; |
| |
| if (instance->gen3) { |
| Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *) |
| &scsi_raid_io->SGL.IeeeChain; |
| sgl_ptr_end += instance->max_sge_in_main_msg - 1; |
| ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0); |
| } |
| |
| ddi_put8(acc_handle, &scsi_raid_io->Function, |
| MPI2_FUNCTION_PASSTHRU_IO_REQUEST); |
| |
| ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0, |
| offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4); |
| |
| ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, |
| (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16); |
| |
| ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress, |
| cmd->sense_phys_addr1); |
| |
| |
| scsi_raid_io_sgl_ieee = |
| (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain; |
| |
| ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address, |
| (U64)cmd->frame_phys_addr); |
| |
| ddi_put8(acc_handle, |
| &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR)); |
| /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */ |
| ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024); |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "[MFI CMD PHY ADDRESS]:%" PRIx64, |
| scsi_raid_io_sgl_ieee->Address)); |
| con_log(CL_ANN1, (CE_NOTE, |
| "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length)); |
| con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x", |
| scsi_raid_io_sgl_ieee->Flags)); |
| } |
| |
| |
| void |
| tbolt_complete_cmd(struct mrsas_instance *instance, |
| struct mrsas_cmd *cmd) |
| { |
| uint8_t status; |
| uint8_t extStatus; |
| uint8_t function; |
| uint8_t arm; |
| struct scsa_cmd *acmd; |
| struct scsi_pkt *pkt; |
| struct scsi_arq_status *arqstat; |
| Mpi2RaidSCSIIORequest_t *scsi_raid_io; |
| LD_LOAD_BALANCE_INFO *lbinfo; |
| ddi_acc_handle_t acc_handle = |
| instance->mpi2_frame_pool_dma_obj.acc_handle; |
| |
| scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request; |
| |
| status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status); |
| extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus); |
| |
| con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status)); |
| con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus)); |
| |
| if (status != MFI_STAT_OK) { |
| con_log(CL_ANN, (CE_WARN, |
| "IO Cmd Failed SMID %x", cmd->SMID)); |
| } else { |
| con_log(CL_ANN, (CE_NOTE, |
| "IO Cmd Success SMID %x", cmd->SMID)); |
| } |
| |
| /* regular commands */ |
| |
| function = ddi_get8(acc_handle, &scsi_raid_io->Function); |
| DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function, |
| uint8_t, status, uint8_t, extStatus); |
| |
| switch (function) { |
| |
| case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */ |
| acmd = (struct scsa_cmd *)cmd->cmd; |
| lbinfo = &instance->load_balance_info[acmd->device_id]; |
| |
| if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) { |
| arm = lbinfo->raid1DevHandle[0] == |
| scsi_raid_io->DevHandle ? 0 : 1; |
| |
| lbinfo->scsi_pending_cmds[arm]--; |
| cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG; |
| } |
| con_log(CL_DLEVEL3, (CE_NOTE, |
| "FastPath IO Completion Success ")); |
| /* FALLTHRU */ |
| |
| case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */ |
| acmd = (struct scsa_cmd *)cmd->cmd; |
| pkt = (struct scsi_pkt *)CMD2PKT(acmd); |
| |
| if (acmd->cmd_flags & CFLAG_DMAVALID) { |
| if (acmd->cmd_flags & CFLAG_CONSISTENT) { |
| (void) ddi_dma_sync(acmd->cmd_dmahandle, |
| acmd->cmd_dma_offset, acmd->cmd_dma_len, |
| DDI_DMA_SYNC_FORCPU); |
| } |
| } |
| |
| pkt->pkt_reason = CMD_CMPLT; |
| pkt->pkt_statistics = 0; |
| pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | |
| STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; |
| |
| con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: " |
| "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0], |
| ((acmd->islogical) ? "LD" : "PD"), |
| acmd->cmd_dmacount, cmd->SMID, status)); |
| |
| if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { |
| struct scsi_inquiry *inq; |
| |
| if (acmd->cmd_dmacount != 0) { |
| bp_mapin(acmd->cmd_buf); |
| inq = (struct scsi_inquiry *) |
| acmd->cmd_buf->b_un.b_addr; |
| |
| /* don't expose physical drives to OS */ |
| if (acmd->islogical && |
| (status == MFI_STAT_OK)) { |
| display_scsi_inquiry((caddr_t)inq); |
| } else if ((status == MFI_STAT_OK) && |
| inq->inq_dtype == DTYPE_DIRECT) { |
| display_scsi_inquiry((caddr_t)inq); |
| } else { |
| /* for physical disk */ |
| status = MFI_STAT_DEVICE_NOT_FOUND; |
| } |
| } |
| } |
| |
| switch (status) { |
| case MFI_STAT_OK: |
| pkt->pkt_scbp[0] = STATUS_GOOD; |
| break; |
| case MFI_STAT_LD_CC_IN_PROGRESS: |
| case MFI_STAT_LD_RECON_IN_PROGRESS: |
| pkt->pkt_scbp[0] = STATUS_GOOD; |
| break; |
| case MFI_STAT_LD_INIT_IN_PROGRESS: |
| pkt->pkt_reason = CMD_TRAN_ERR; |
| break; |
| case MFI_STAT_SCSI_IO_FAILED: |
| dev_err(instance->dip, CE_WARN, |
| "tbolt_complete_cmd: scsi_io failed"); |
| pkt->pkt_reason = CMD_TRAN_ERR; |
| break; |
| case MFI_STAT_SCSI_DONE_WITH_ERROR: |
| con_log(CL_ANN, (CE_WARN, |
| "tbolt_complete_cmd: scsi_done with error")); |
| |
| pkt->pkt_reason = CMD_CMPLT; |
| ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; |
| |
| if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { |
| con_log(CL_ANN, |
| (CE_WARN, "TEST_UNIT_READY fail")); |
| } else { |
| pkt->pkt_state |= STATE_ARQ_DONE; |
| arqstat = (void *)(pkt->pkt_scbp); |
| arqstat->sts_rqpkt_reason = CMD_CMPLT; |
| arqstat->sts_rqpkt_resid = 0; |
| arqstat->sts_rqpkt_state |= |
| STATE_GOT_BUS | STATE_GOT_TARGET |
| | STATE_SENT_CMD |
| | STATE_XFERRED_DATA; |
| *(uint8_t *)&arqstat->sts_rqpkt_status = |
| STATUS_GOOD; |
| con_log(CL_ANN1, |
| (CE_NOTE, "Copying Sense data %x", |
| cmd->SMID)); |
| |
| ddi_rep_get8(acc_handle, |
| (uint8_t *)&(arqstat->sts_sensedata), |
| cmd->sense1, |
| sizeof (struct scsi_extended_sense), |
| DDI_DEV_AUTOINCR); |
| |
| } |
| break; |
| case MFI_STAT_LD_OFFLINE: |
| dev_err(instance->dip, CE_WARN, |
| "tbolt_complete_cmd: ld offline " |
| "CDB[0]=0x%x targetId=0x%x devhandle=0x%x", |
| /* UNDO: */ |
| ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]), |
| |
| ddi_get16(acc_handle, |
| &scsi_raid_io->RaidContext.ldTargetId), |
| |
| ddi_get16(acc_handle, &scsi_raid_io->DevHandle)); |
| |
| pkt->pkt_reason = CMD_DEV_GONE; |
| pkt->pkt_statistics = STAT_DISCON; |
| break; |
| case MFI_STAT_DEVICE_NOT_FOUND: |
| con_log(CL_ANN, (CE_CONT, |
| "tbolt_complete_cmd: device not found error")); |
| pkt->pkt_reason = CMD_DEV_GONE; |
| pkt->pkt_statistics = STAT_DISCON; |
| break; |
| |
| case MFI_STAT_LD_LBA_OUT_OF_RANGE: |
| pkt->pkt_state |= STATE_ARQ_DONE; |
| pkt->pkt_reason = CMD_CMPLT; |
| ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; |
| |
| arqstat = (void *)(pkt->pkt_scbp); |
| arqstat->sts_rqpkt_reason = CMD_CMPLT; |
| arqstat->sts_rqpkt_resid = 0; |
| arqstat->sts_rqpkt_state |= STATE_GOT_BUS |
| | STATE_GOT_TARGET | STATE_SENT_CMD |
| | STATE_XFERRED_DATA; |
| *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD; |
| |
| arqstat->sts_sensedata.es_valid = 1; |
| arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST; |
| arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE; |
| |
| /* |
| * LOGICAL BLOCK ADDRESS OUT OF RANGE: |
| * ASC: 0x21h; ASCQ: 0x00h; |
| */ |
| arqstat->sts_sensedata.es_add_code = 0x21; |
| arqstat->sts_sensedata.es_qual_code = 0x00; |
| break; |
| case MFI_STAT_INVALID_CMD: |
| case MFI_STAT_INVALID_DCMD: |
| case MFI_STAT_INVALID_PARAMETER: |
| case MFI_STAT_INVALID_SEQUENCE_NUMBER: |
| default: |
| dev_err(instance->dip, CE_WARN, |
| "tbolt_complete_cmd: Unknown status!"); |
| pkt->pkt_reason = CMD_TRAN_ERR; |
| |
| break; |
| } |
| |
| atomic_add_16(&instance->fw_outstanding, (-1)); |
| |
| (void) mrsas_common_check(instance, cmd); |
| if (acmd->cmd_dmahandle) { |
| if (mrsas_check_dma_handle(acmd->cmd_dmahandle) != |
| DDI_SUCCESS) { |
| ddi_fm_service_impact(instance->dip, |
| DDI_SERVICE_UNAFFECTED); |
| pkt->pkt_reason = CMD_TRAN_ERR; |
| pkt->pkt_statistics = 0; |
| } |
| } |
| |
| /* Call the callback routine */ |
| if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) |
| (*pkt->pkt_comp)(pkt); |
| |
| con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID)); |
| |
| ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0); |
| |
| ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0); |
| |
| return_raid_msg_pkt(instance, cmd); |
| break; |
| } |
| case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */ |
| |
| if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO && |
| cmd->frame->dcmd.mbox.b[1] == 1) { |
| |
| mutex_enter(&instance->sync_map_mtx); |
| |
| con_log(CL_ANN, (CE_NOTE, |
| "LDMAP sync command SMID RECEIVED 0x%X", |
| cmd->SMID)); |
| if (cmd->frame->hdr.cmd_status != 0) { |
| dev_err(instance->dip, CE_WARN, |
| "map sync failed, status = 0x%x.", |
| cmd->frame->hdr.cmd_status); |
| } else { |
| instance->map_id++; |
| con_log(CL_ANN1, (CE_NOTE, |
| "map sync received, switched map_id to %" |
| PRIu64, instance->map_id)); |
| } |
| |
| if (MR_ValidateMapInfo( |
| instance->ld_map[instance->map_id & 1], |
| instance->load_balance_info)) { |
| instance->fast_path_io = 1; |
| } else { |
| instance->fast_path_io = 0; |
| } |
| |
| con_log(CL_ANN, (CE_NOTE, |
| "instance->fast_path_io %d", |
| instance->fast_path_io)); |
| |
| instance->unroll.syncCmd = 0; |
| |
| if (instance->map_update_cmd == cmd) { |
| return_raid_msg_pkt(instance, cmd); |
| atomic_add_16(&instance->fw_outstanding, (-1)); |
| (void) mrsas_tbolt_sync_map_info(instance); |
| } |
| |
| con_log(CL_ANN1, (CE_NOTE, |
| "LDMAP sync completed, ldcount=%d", |
| instance->ld_map[instance->map_id & 1] |
| ->raidMap.ldCount)); |
| mutex_exit(&instance->sync_map_mtx); |
| break; |
| } |
| |
| if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) { |
| con_log(CL_ANN1, (CE_CONT, |
| "AEN command SMID RECEIVED 0x%X", |
| cmd->SMID)); |
| if ((instance->aen_cmd == cmd) && |
| (instance->aen_cmd->abort_aen)) { |
| con_log(CL_ANN, (CE_WARN, "mrsas_softintr: " |
| "aborted_aen returned")); |
| } else { |
| atomic_add_16(&instance->fw_outstanding, (-1)); |
| service_mfi_aen(instance, cmd); |
| } |
| } |
| |
| if (cmd->sync_cmd == MRSAS_TRUE) { |
| con_log(CL_ANN1, (CE_CONT, |
| "Sync-mode Command Response SMID RECEIVED 0x%X", |
| cmd->SMID)); |
| |
| tbolt_complete_cmd_in_sync_mode(instance, cmd); |
| } else { |
| con_log(CL_ANN, (CE_CONT, |
| "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X", |
| cmd->SMID)); |
| } |
| break; |
| default: |
| mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); |
| ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); |
| |
| /* free message */ |
| con_log(CL_ANN, |
| (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!")); |
| break; |
| } |
| } |
| |
| uint_t |
| mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance) |
| { |
| uint8_t replyType; |
| Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc; |
| Mpi2ReplyDescriptorsUnion_t *desc; |
| uint16_t smid; |
| union desc_value d_val; |
| struct mrsas_cmd *cmd; |
| |
| struct mrsas_header *hdr; |
| struct scsi_pkt *pkt; |
| |
| (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, |
| 0, 0, DDI_DMA_SYNC_FORDEV); |
| |
| (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, |
| 0, 0, DDI_DMA_SYNC_FORCPU); |
| |
| desc = instance->reply_frame_pool; |
| desc += instance->reply_read_index; |
| |
| replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; |
| replyType = replyDesc->ReplyFlags & |
| MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; |
| |
| if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) |
| return (DDI_INTR_UNCLAIMED); |
| |
| if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) |
| != DDI_SUCCESS) { |
| mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); |
| ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); |
| con_log(CL_ANN1, |
| (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): " |
| "FMA check, returning DDI_INTR_UNCLAIMED")); |
| return (DDI_INTR_CLAIMED); |
| } |
| |
| con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64, |
| (void *)desc, desc->Words)); |
| |
| d_val.word = desc->Words; |
| |
| |
| /* Read Reply descriptor */ |
| while ((d_val.u1.low != 0xffffffff) && |
| (d_val.u1.high != 0xffffffff)) { |
| |
| (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle, |
| 0, 0, DDI_DMA_SYNC_FORCPU); |
| |
| smid = replyDesc->SMID; |
| |
| if (!smid || smid > instance->max_fw_cmds + 1) { |
| con_log(CL_ANN1, (CE_NOTE, |
| "Reply Desc at Break = %p Words = %" PRIx64, |
| (void *)desc, desc->Words)); |
| break; |
| } |
| |
| cmd = instance->cmd_list[smid - 1]; |
| if (!cmd) { |
| con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_" |
| "outstanding_cmd: Invalid command " |
| " or Poll commad Received in completion path")); |
| } else { |
| mutex_enter(&instance->cmd_pend_mtx); |
| if (cmd->sync_cmd == MRSAS_TRUE) { |
| hdr = (struct mrsas_header *)&cmd->frame->hdr; |
| if (hdr) { |
| con_log(CL_ANN1, (CE_NOTE, "mr_sas_" |
| "tbolt_process_outstanding_cmd:" |
| " mlist_del_init(&cmd->list).")); |
| mlist_del_init(&cmd->list); |
| } |
| } else { |
| pkt = cmd->pkt; |
| if (pkt) { |
| con_log(CL_ANN1, (CE_NOTE, "mr_sas_" |
| "tbolt_process_outstanding_cmd:" |
| "mlist_del_init(&cmd->list).")); |
| mlist_del_init(&cmd->list); |
| } |
| } |
| |
| mutex_exit(&instance->cmd_pend_mtx); |
| |
| tbolt_complete_cmd(instance, cmd); |
| } |
| /* set it back to all 1s. */ |
| desc->Words = -1LL; |
| |
| instance->reply_read_index++; |
| |
| if (instance->reply_read_index >= (instance->reply_q_depth)) { |
| con_log(CL_ANN1, (CE_NOTE, "wrap around")); |
| instance->reply_read_index = 0; |
| } |
| |
| /* Get the next reply descriptor */ |
| if (!instance->reply_read_index) |
| desc = instance->reply_frame_pool; |
| else |
| desc++; |
| |
| replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; |
| |