blob: c7c03826b5f76d7aaf4cce432a6d87e61dc75387 [file] [log] [blame]
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
*
* IEEE 1284 Parallel Port Device Driver
*
*/
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/file.h>
#include <sys/cmn_err.h>
#include <sys/stropts.h>
#include <sys/debug.h>
#include <sys/stream.h>
#include <sys/strsun.h>
#include <sys/kmem.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */
#include <sys/modctl.h> /* for modldrv */
#include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */
#include <sys/open.h>
#include <sys/ddi_impldefs.h>
#include <sys/kstat.h>
#include <sys/prnio.h>
#include <sys/ecppreg.h> /* hw description */
#include <sys/ecppio.h> /* ioctl description */
#include <sys/ecppvar.h> /* driver description */
#include <sys/dma_engine.h>
#include <sys/dma_i8237A.h>
/*
* Background
* ==========
* IEEE 1284-1994 standard defines "a signalling method for asynchronous,
* fully interlocked, bidirectional parallel communications between hosts
* and printers or other peripherals." (1.1) The standard defines 5 modes
* of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ
* in direction, bandwidth, pins assignment, DMA capability, etc.
*
* Negotiation is a mechanism for moving between modes. Compatibility mode
* is a default mode, from which negotiations to other modes occur and
* to which both host and peripheral break in case of interface errors.
* Compatibility mode provides a unidirectional (forward) channel for
* communicating with old pre-1284 peripherals.
*
* Each mode has a number of phases. [Mode, phase] pair represents the
* interface state. Host initiates all transfers, though peripheral can
* request backchannel transfer by asserting nErr pin.
*
* Ecpp driver implements an IEEE 1284-compliant host using a combination
* of hardware and software. Hardware part is represented by a controller,
* which is a part of the SuperIO chip. Ecpp supports the following SuperIOs:
* PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover).
* Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach().
*
* Negotiation is performed in software. Transfer may be performed either
* in software by driving output pins for each byte (PIO method), or with
* hardware assistance - SuperIO has a 16-byte FIFO, which is filled by
* the driver (normally using DMA), while the chip performs the actual xfer.
* PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes.
*
* Driver currently supports the following modes:
*
* - Compatibility mode: byte-wide forward channel ~50KB/sec;
* pp->io_mode defines PIO or DMA method of transfer;
* - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec;
* - ECP mode: byte-wide bidirectional channel (~1MB/sec);
*
* Theory of operation
* ===================
* The manner in which ecpp drives 1284 interface is that of a state machine.
* State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*}
* and transfer method {PIO, DMA}. State is a function of application actions
* {write(2), ioctl(2)} and peripheral reaction.
*
* 1284 interface state is described by the following variables:
* pp->current_mode -- 1284 mode used for forward transfers;
* pp->backchannel -- 1284 mode used for backward transfers;
* pp->curent_phase -- 1284 phase;
*
* Bidirectional operation in Compatibility mode is provided by a combination:
* pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE
* ECPP_CENTRONICS means no backchannel
*
* Driver internal state is defined by pp->e_busy as follows:
* ECPP_IDLE -- idle, no active transfers;
* ECPP_BUSY -- transfer is in progress;
* ECPP_ERR -- have data to transfer, but peripheral can`t receive data;
* ECPP_FLUSH -- flushing the queues;
*
* When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS
* Default negotiation tries to negotiate to the best mode supported by printer,
* sets pp->current_mode and pp->backchannel accordingly.
*
* When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue
* to let ecpp_wsrv() concatenate small blocks into one big transfer
* by copying them into pp->ioblock. If first the mblk data is bigger than
* pp->ioblock, then it is used instead of i/o block (pointed by pp->msg)
*
* Before starting the transfer the driver will check if peripheral is ready
* by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state
* and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively
* rechecking the peripheral readiness and restarting itself until it is ready.
* The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY
*
* While transfer is in progress all arriving messages will be queued up.
* Transfer can end up in either of two ways:
* - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so
* cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable();
* - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data;
*
* PIO transfer method is very CPU intensive: for each sent byte the peripheral
* state is checked, then the byte is transfered and driver waits for an nAck
* interrupt; ecpp_isr() will then look if there is more data and if so
* triggers the soft interrupt, which transfers the next byte. PIO method
* is needed only for legacy printers which are sensitive to strobe problem
* (Bugid 4192788).
*
* ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and
* going idle (ecpp_idle_phase()). Many routines qenable() the write queue,
* meaning "check if there are pending requests, process them and go idle".
*
* In it`s idle state the driver will always try to listen to the backchannel
* (as advised by 1284).
*
* The mechanism for handling backchannel requests is as follows:
* - when the peripheral has data to send it asserts nErr pin
* (and also nAck in Nibble Mode) which results in an interrupt on the host;
* - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and
* puts it back on the write queue;
* - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off
* the transfer;
*
* This way Nibble and ECP mode backchannel are implemented.
* If the read queue gets full, backchannel request is rejected.
* As the application reads data and queue size falls below the low watermark,
* ecpp_rsrv() gets called and enables the backchannel again.
*
* Future enhancements
* ===================
*
* Support new modes: Byte and EPP.
*/
#ifndef ECPP_DEBUG
#define ECPP_DEBUG 0
#endif /* ECPP_DEBUG */
int ecpp_debug = ECPP_DEBUG;
int noecp = 0; /* flag not to use ECP mode */
/* driver entry point fn definitions */
static int ecpp_open(queue_t *, dev_t *, int, int, cred_t *);
static int ecpp_close(queue_t *, int, cred_t *);
static uint_t ecpp_isr(caddr_t);
static uint_t ecpp_softintr(caddr_t);
/* configuration entry point fn definitions */
static int ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
static int ecpp_attach(dev_info_t *, ddi_attach_cmd_t);
static int ecpp_detach(dev_info_t *, ddi_detach_cmd_t);
static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *);
/* isr support routines */
static uint_t ecpp_nErr_ihdlr(struct ecppunit *);
static uint_t ecpp_pio_ihdlr(struct ecppunit *);
static uint_t ecpp_dma_ihdlr(struct ecppunit *);
static uint_t ecpp_M1553_intr(struct ecppunit *);
/* configuration support routines */
static void ecpp_get_props(struct ecppunit *);
/* Streams Routines */
static int ecpp_wput(queue_t *, mblk_t *);
static int ecpp_wsrv(queue_t *);
static int ecpp_rsrv(queue_t *);
static void ecpp_flush(struct ecppunit *, int);
static void ecpp_start(struct ecppunit *, caddr_t, size_t);
/* ioctl handling */
static void ecpp_putioc(queue_t *, mblk_t *);
static void ecpp_srvioc(queue_t *, mblk_t *);
static void ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t);
static void ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int);
static void ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t);
static void ecpp_srvioc_devid(queue_t *, mblk_t *,
struct ecpp_device_id *, int *);
static void ecpp_srvioc_prnif(queue_t *, mblk_t *);
static void ecpp_ack_ioctl(queue_t *, mblk_t *);
static void ecpp_nack_ioctl(queue_t *, mblk_t *, int);
/* kstat routines */
static void ecpp_kstat_init(struct ecppunit *);
static int ecpp_kstat_update(kstat_t *, int);
static int ecpp_kstatintr_update(kstat_t *, int);
/* dma routines */
static void ecpp_putback_untransfered(struct ecppunit *, void *, uint_t);
static uint8_t ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t);
static uint8_t ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t);
/* pio routines */
static void ecpp_pio_writeb(struct ecppunit *);
static void ecpp_xfer_cleanup(struct ecppunit *);
static uint8_t ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t);
/* misc */
static uchar_t ecpp_reset_port_regs(struct ecppunit *);
static void ecpp_xfer_timeout(void *);
static void ecpp_fifo_timer(void *);
static void ecpp_wsrv_timer(void *);
static uchar_t dcr_write(struct ecppunit *, uint8_t);
static uchar_t ecr_write(struct ecppunit *, uint8_t);
static uchar_t ecpp_check_status(struct ecppunit *);
static int ecpp_backchan_req(struct ecppunit *);
static void ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *);
static uint_t ecpp_get_prn_ifcap(struct ecppunit *);
/* stubs */
static void empty_config_mode(struct ecppunit *);
static void empty_mask_intr(struct ecppunit *);
/* PC87332 support */
static int pc87332_map_regs(struct ecppunit *);
static void pc87332_unmap_regs(struct ecppunit *);
static int pc87332_config_chip(struct ecppunit *);
static void pc87332_config_mode(struct ecppunit *);
static uint8_t pc87332_read_config_reg(struct ecppunit *, uint8_t);
static void pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
static void cheerio_mask_intr(struct ecppunit *);
static void cheerio_unmask_intr(struct ecppunit *);
static int cheerio_dma_start(struct ecppunit *);
static int cheerio_dma_stop(struct ecppunit *, size_t *);
static size_t cheerio_getcnt(struct ecppunit *);
static void cheerio_reset_dcsr(struct ecppunit *);
/* PC97317 support */
static int pc97317_map_regs(struct ecppunit *);
static void pc97317_unmap_regs(struct ecppunit *);
static int pc97317_config_chip(struct ecppunit *);
static void pc97317_config_mode(struct ecppunit *);
/* M1553 Southbridge support */
static int m1553_map_regs(struct ecppunit *pp);
static void m1553_unmap_regs(struct ecppunit *pp);
static int m1553_config_chip(struct ecppunit *);
static uint8_t m1553_read_config_reg(struct ecppunit *, uint8_t);
static void m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
/* M1553 Southbridge DMAC 8237 support routines */
static int dma8237_dma_start(struct ecppunit *);
static int dma8237_dma_stop(struct ecppunit *, size_t *);
static size_t dma8237_getcnt(struct ecppunit *);
static void dma8237_write_addr(struct ecppunit *, uint32_t);
static void dma8237_write_count(struct ecppunit *, uint32_t);
static uint32_t dma8237_read_count(struct ecppunit *);
static void dma8237_write(struct ecppunit *, int, uint8_t);
static uint8_t dma8237_read(struct ecppunit *, int);
#ifdef INCLUDE_DMA8237_READ_ADDR
static uint32_t dma8237_read_addr(struct ecppunit *);
#endif
/* i86 PC support rountines */
#if defined(__x86)
static int x86_dma_start(struct ecppunit *);
static int x86_dma_stop(struct ecppunit *, size_t *);
static int x86_map_regs(struct ecppunit *);
static void x86_unmap_regs(struct ecppunit *);
static int x86_config_chip(struct ecppunit *);
static size_t x86_getcnt(struct ecppunit *);
#endif
/* IEEE 1284 phase transitions */
static void ecpp_1284_init_interface(struct ecppunit *);
static int ecpp_1284_termination(struct ecppunit *);
static uchar_t ecpp_idle_phase(struct ecppunit *);
static int ecp_forward2reverse(struct ecppunit *);
static int ecp_reverse2forward(struct ecppunit *);
static int read_nibble_backchan(struct ecppunit *);
/* reverse transfers */
static uint_t ecpp_peripheral2host(struct ecppunit *);
static uchar_t ecp_peripheral2host(struct ecppunit *);
static uchar_t nibble_peripheral2host(struct ecppunit *pp, uint8_t *);
static int ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int);
static void ecpp_ecp_read_timeout(void *);
static void ecpp_ecp_read_completion(struct ecppunit *);
/* IEEE 1284 mode transitions */
static void ecpp_default_negotiation(struct ecppunit *);
static int ecpp_mode_negotiation(struct ecppunit *, uchar_t);
static int ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *);
static int ecp_negotiation(struct ecppunit *);
static int nibble_negotiation(struct ecppunit *);
static int devidnib_negotiation(struct ecppunit *);
/* IEEE 1284 utility routines */
static int wait_dsr(struct ecppunit *, uint8_t, uint8_t, int);
/* debugging functions */
static void ecpp_error(dev_info_t *, char *, ...);
static uchar_t ecpp_get_error_status(uchar_t);
/*
* Chip-dependent structures
*/
static ddi_dma_attr_t cheerio_dma_attr = {
DMA_ATTR_VERSION, /* version */
0x00000000ull, /* dlim_addr_lo */
0xfffffffeull, /* dlim_addr_hi */
0xffffff, /* DMA counter register */
1, /* DMA address alignment */
0x74, /* burst sizes */
0x0001, /* min effective DMA size */
0xffff, /* maximum transfer size */
0xffff, /* segment boundary */
1, /* s/g list length */
1, /* granularity of device */
0 /* DMA flags */
};
static struct ecpp_hw pc87332 = {
pc87332_map_regs,
pc87332_unmap_regs,
pc87332_config_chip,
pc87332_config_mode,
cheerio_mask_intr,
cheerio_unmask_intr,
cheerio_dma_start,
cheerio_dma_stop,
cheerio_getcnt,
&cheerio_dma_attr
};
static struct ecpp_hw pc97317 = {
pc97317_map_regs,
pc97317_unmap_regs,
pc97317_config_chip,
pc97317_config_mode,
cheerio_mask_intr,
cheerio_unmask_intr,
cheerio_dma_start,
cheerio_dma_stop,
cheerio_getcnt,
&cheerio_dma_attr
};
static ddi_dma_attr_t i8237_dma_attr = {
DMA_ATTR_VERSION, /* version */
0x00000000ull, /* dlim_addr_lo */
0xfffffffeull, /* dlim_addr_hi */
0xffff, /* DMA counter register */
1, /* DMA address alignment */
0x01, /* burst sizes */
0x0001, /* min effective DMA size */
0xffff, /* maximum transfer size */
0x7fff, /* segment boundary */
1, /* s/g list length */
1, /* granularity of device */
0 /* DMA flags */
};
static struct ecpp_hw m1553 = {
m1553_map_regs,
m1553_unmap_regs,
m1553_config_chip,
empty_config_mode, /* no config_mode */
empty_mask_intr, /* no mask_intr */
empty_mask_intr, /* no unmask_intr */
dma8237_dma_start,
dma8237_dma_stop,
dma8237_getcnt,
&i8237_dma_attr
};
#if defined(__x86)
static ddi_dma_attr_t sb_dma_attr = {
DMA_ATTR_VERSION, /* version */
0x00000000ull, /* dlim_addr_lo */
0xffffff, /* dlim_addr_hi */
0xffff, /* DMA counter register */
1, /* DMA address alignment */
0x01, /* burst sizes */
0x0001, /* min effective DMA size */
0xffffffff, /* maximum transfer size */
0xffff, /* segment boundary */
1, /* s/g list length */
1, /* granularity of device */
0 /* DMA flags */
};
static struct ecpp_hw x86 = {
x86_map_regs,
x86_unmap_regs,
x86_config_chip,
empty_config_mode, /* no config_mode */
empty_mask_intr, /* no mask_intr */
empty_mask_intr, /* no unmask_intr */
x86_dma_start,
x86_dma_stop,
x86_getcnt,
&sb_dma_attr
};
#endif
/*
* list of supported devices
*/
struct ecpp_hw_bind ecpp_hw_bind[] = {
{ "ns87317-ecpp", &pc97317, "PC97317" },
{ "pnpALI,1533,3", &m1553, "M1553" },
{ "ecpp", &pc87332, "PC87332" },
#if defined(__x86)
{ "lp", &x86, "i86pc"},
#endif
};
static ddi_device_acc_attr_t acc_attr = {
DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC
};
static struct ecpp_transfer_parms default_xfer_parms = {
FWD_TIMEOUT_DEFAULT, /* write timeout in seconds */
ECPP_CENTRONICS /* supported mode */
};
/* prnio interface info string */
static const char prn_ifinfo[] = PRN_PARALLEL;
/* prnio timeouts */
static const struct prn_timeouts prn_timeouts_default = {
FWD_TIMEOUT_DEFAULT, /* forward timeout */
REV_TIMEOUT_DEFAULT /* reverse timeout */
};
static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY;
static int ecpp_def_timeout = 90; /* left in for 2.7 compatibility */
static void *ecppsoft_statep;
/*
* STREAMS framework manages locks for these structures
*/
_NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk))
_NOTE(SCHEME_PROTECTS_DATA("unique per call", datab))
_NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb))
_NOTE(SCHEME_PROTECTS_DATA("unique per call", queue))
_NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq))
_NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions))
struct module_info ecppinfo = {
/* id, name, min pkt siz, max pkt siz, hi water, low water */
42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT
};
static struct qinit ecpp_rinit = {
putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
};
static struct qinit ecpp_wint = {
ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
};
struct streamtab ecpp_str_info = {
&ecpp_rinit, &ecpp_wint, NULL, NULL
};
static struct cb_ops ecpp_cb_ops = {
nodev, /* cb_open */
nodev, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
nodev, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
&ecpp_str_info, /* cb_stream */
(D_NEW | D_MP | D_MTPERQ) /* cb_flag */
};
/*
* Declare ops vectors for auto configuration.
*/
struct dev_ops ecpp_ops = {
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
ecpp_getinfo, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
ecpp_attach, /* devo_attach */
ecpp_detach, /* devo_detach */
nodev, /* devo_reset */
&ecpp_cb_ops, /* devo_cb_ops */
(struct bus_ops *)NULL, /* devo_bus_ops */
nulldev, /* devo_power */
ddi_quiesce_not_needed, /* devo_quiesce */
};
extern struct mod_ops mod_driverops;
static struct modldrv ecppmodldrv = {
&mod_driverops, /* type of module - driver */
"parallel port driver",
&ecpp_ops,
};
static struct modlinkage ecppmodlinkage = {
MODREV_1,
&ecppmodldrv,
0
};
/*
*
* DDI/DKI entry points and supplementary routines
*
*/
int
_init(void)
{
int error;
if ((error = mod_install(&ecppmodlinkage)) == 0) {
(void) ddi_soft_state_init(&ecppsoft_statep,
sizeof (struct ecppunit), 1);
}
return (error);
}
int
_fini(void)
{
int error;
if ((error = mod_remove(&ecppmodlinkage)) == 0) {
ddi_soft_state_fini(&ecppsoft_statep);
}
return (error);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&ecppmodlinkage, modinfop));
}
static int
ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
int instance;
char name[16];
struct ecppunit *pp;
struct ecpp_hw_bind *hw_bind;
instance = ddi_get_instance(dip);
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
return (DDI_FAILURE);
}
mutex_enter(&pp->umutex);
pp->suspended = FALSE;
/*
* Initialize the chip and restore current mode if needed
*/
(void) ECPP_CONFIG_CHIP(pp);
(void) ecpp_reset_port_regs(pp);
if (pp->oflag == TRUE) {
int current_mode = pp->current_mode;
(void) ecpp_1284_termination(pp);
(void) ecpp_mode_negotiation(pp, current_mode);
}
mutex_exit(&pp->umutex);
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) {
ecpp_error(dip, "ddi_soft_state_zalloc failed\n");
goto fail;
}
pp = ddi_get_soft_state(ecppsoft_statep, instance);
pp->dip = dip;
pp->suspended = FALSE;
/*
* Determine SuperIO type and set chip-dependent variables
*/
hw_bind = ecpp_determine_sio_type(pp);
if (hw_bind == NULL) {
cmn_err(CE_NOTE, "parallel port controller not supported");
goto fail_sio;
} else {
pp->hw = hw_bind->hw;
ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info);
}
/*
* Map registers
*/
if (ECPP_MAP_REGS(pp) != SUCCESS) {
goto fail_map;
}
if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT,
NULL, &pp->dma_handle) != DDI_SUCCESS) {
ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n");
goto fail_dma;
}
if (ddi_get_iblock_cookie(dip, 0,
&pp->ecpp_trap_cookie) != DDI_SUCCESS) {
ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n");
goto fail_ibc;
}
mutex_init(&pp->umutex, NULL, MUTEX_DRIVER,
(void *)pp->ecpp_trap_cookie);
cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL);
if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr,
(caddr_t)pp) != DDI_SUCCESS) {
ecpp_error(dip, "ecpp_attach: failed to add hard intr\n");
goto fail_intr;
}
if (ddi_add_softintr(dip, DDI_SOFTINT_LOW,
&pp->softintr_id, 0, 0, ecpp_softintr,
(caddr_t)pp) != DDI_SUCCESS) {
ecpp_error(dip, "ecpp_attach: failed to add soft intr\n");
goto fail_softintr;
}
(void) sprintf(name, "ecpp%d", instance);
if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
DDI_NT_PRINTER, NULL) == DDI_FAILURE) {
ecpp_error(dip, "ecpp_attach: create_minor_node failed\n");
goto fail_minor;
}
pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP);
if (pp->ioblock == NULL) {
ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n");
goto fail_iob;
} else {
ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock);
}
ecpp_get_props(pp);
#if defined(__x86)
if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) {
if (ddi_dmae_alloc(dip, pp->uh.x86.chn,
DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS)
ecpp_error(pp->dip, "dmae_alloc success!\n");
}
#endif
if (ECPP_CONFIG_CHIP(pp) == FAILURE) {
ecpp_error(pp->dip, "config_chip failed.\n");
goto fail_config;
}
ecpp_kstat_init(pp);
ddi_report_dev(dip);
return (DDI_SUCCESS);
fail_config:
ddi_prop_remove_all(dip);
kmem_free(pp->ioblock, IO_BLOCK_SZ);
fail_iob:
ddi_remove_minor_node(dip, NULL);
fail_minor:
ddi_remove_softintr(pp->softintr_id);
fail_softintr:
ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
fail_intr:
mutex_destroy(&pp->umutex);
cv_destroy(&pp->pport_cv);
fail_ibc:
ddi_dma_free_handle(&pp->dma_handle);
fail_dma:
ECPP_UNMAP_REGS(pp);
fail_map:
fail_sio:
ddi_soft_state_free(ecppsoft_statep, instance);
fail:
ecpp_error(dip, "ecpp_attach: failed.\n");
return (DDI_FAILURE);
}
static int
ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
int instance;
struct ecppunit *pp;
instance = ddi_get_instance(dip);
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
return (DDI_FAILURE);
}
mutex_enter(&pp->umutex);
ASSERT(pp->suspended == FALSE);
pp->suspended = TRUE; /* prevent new transfers */
/*
* Wait if there's any activity on the port
*/
if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
(void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
SUSPEND_TOUT * drv_usectohz(1000000),
TR_CLOCK_TICK);
if ((pp->e_busy == ECPP_BUSY) ||
(pp->e_busy == ECPP_FLUSH)) {
pp->suspended = FALSE;
mutex_exit(&pp->umutex);
ecpp_error(pp->dip,
"ecpp_detach: suspend timeout\n");
return (DDI_FAILURE);
}
}
mutex_exit(&pp->umutex);
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
pp = ddi_get_soft_state(ecppsoft_statep, instance);
#if defined(__x86)
if (pp->hw == &x86 && pp->uh.x86.chn != 0xff)
(void) ddi_dmae_release(pp->dip, pp->uh.x86.chn);
#endif
if (pp->dma_handle != NULL)
ddi_dma_free_handle(&pp->dma_handle);
ddi_remove_minor_node(dip, NULL);
ddi_remove_softintr(pp->softintr_id);
ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
if (pp->ksp) {
kstat_delete(pp->ksp);
}
if (pp->intrstats) {
kstat_delete(pp->intrstats);
}
cv_destroy(&pp->pport_cv);
mutex_destroy(&pp->umutex);
ECPP_UNMAP_REGS(pp);
kmem_free(pp->ioblock, IO_BLOCK_SZ);
ddi_prop_remove_all(dip);
ddi_soft_state_free(ecppsoft_statep, instance);
return (DDI_SUCCESS);
}
/*
* ecpp_get_props() reads ecpp.conf for user defineable tuneables.
* If the file or a particular variable is not there, a default value
* is assigned.
*/
static void
ecpp_get_props(struct ecppunit *pp)
{
char *prop;
#if defined(__x86)
int len;
int value;
#endif
/*
* If fast_centronics is TRUE, non-compliant IEEE 1284
* peripherals ( Centronics peripherals) will operate in DMA mode.
* Transfers betwee main memory and the device will be via DMA;
* peripheral handshaking will be conducted by superio logic.
* If ecpp can not read the variable correctly fast_centronics will
* be set to FALSE. In this case, transfers and handshaking
* will be conducted by PIO for Centronics devices.
*/
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
"fast-centronics", &prop) == DDI_PROP_SUCCESS) {
pp->fast_centronics =
(strcmp(prop, "true") == 0) ? TRUE : FALSE;
ddi_prop_free(prop);
} else {
pp->fast_centronics = FALSE;
}
/*
* If fast-1284-compatible is set to TRUE, when ecpp communicates
* with IEEE 1284 compliant peripherals, data transfers between
* main memory and the parallel port will be conducted by DMA.
* Handshaking between the port and peripheral will be conducted
* by superio logic. This is the default characteristic. If
* fast-1284-compatible is set to FALSE, transfers and handshaking
* will be conducted by PIO.
*/
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
"fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) {
pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
ddi_prop_free(prop);
} else {
pp->fast_compat = TRUE;
}
/*
* Some centronics peripherals require the nInit signal to be
* toggled to reset the device. If centronics_init_seq is set
* to TRUE, ecpp will toggle the nInit signal upon every ecpp_open().
* Applications have the opportunity to toggle the nInit signal
* with ioctl(2) calls as well. The default is to set it to FALSE.
*/
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
"centronics-init-seq", &prop) == DDI_PROP_SUCCESS) {
pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
ddi_prop_free(prop);
} else {
pp->init_seq = FALSE;
}
/*
* If one of the centronics status signals are in an erroneous
* state, ecpp_wsrv() will be reinvoked centronics-retry ms to
* check if the status is ok to transfer. If the property is not
* found, wsrv_retry will be set to CENTRONICS_RETRY ms.
*/
pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"centronics-retry", CENTRONICS_RETRY);
/*
* In PIO mode, ecpp_isr() will loop for wait for the busy signal
* to be deasserted before transferring the next byte. wait_for_busy
* is specificied in microseconds. If the property is not found
* ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us.
*/
pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"centronics-wait-for-busy", WAIT_FOR_BUSY);
/*
* In PIO mode, centronics transfers must hold the data signals
* for a data_setup_time milliseconds before the strobe is asserted.
*/
pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"centronics-data-setup-time", DATA_SETUP_TIME);
/*
* In PIO mode, centronics transfers asserts the strobe signal
* for a period of strobe_pulse_width milliseconds.
*/
pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"centronics-strobe-pulse-width", STROBE_PULSE_WIDTH);
/*
* Upon a transfer the peripheral, ecpp waits write_timeout seconds
* for the transmission to complete.
*/
default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout);
pp->xfer_parms = default_xfer_parms;
/*
* Get dma channel for M1553
*/
if (pp->hw == &m1553) {
pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY,
pp->dip, 0, "dma-channel", 0x1);
ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn);
}
#if defined(__x86)
len = sizeof (value);
/* Get dma channel for i86 pc */
if (pp->hw == &x86) {
if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF,
DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len)
!= DDI_PROP_SUCCESS) {
ecpp_error(pp->dip, "No dma channel found\n");
pp->uh.x86.chn = 0xff;
pp->fast_compat = FALSE;
pp->noecpregs = TRUE;
} else
pp->uh.x86.chn = (uint8_t)value;
}
#endif
/*
* these properties are not yet public
*/
pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"ecp-rev-speed", ECP_REV_SPEED);
pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
"rev-watchdog", REV_WATCHDOG);
ecpp_error(pp->dip,
"ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n"
"ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n"
"ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n"
"ecpp_get_prop: transfer-timeout=%d\n",
pp->fast_centronics, pp->fast_compat,
pp->wsrv_retry, pp->wait_for_busy,
pp->data_setup_time, pp->strobe_pulse_width,
pp->xfer_parms.write_timeout);
}
/*ARGSUSED*/
int
ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
dev_t dev = (dev_t)arg;
struct ecppunit *pp;
int instance, ret;
instance = getminor(dev);
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
pp = ddi_get_soft_state(ecppsoft_statep, instance);
if (pp != NULL) {
*result = pp->dip;
ret = DDI_SUCCESS;
} else {
ret = DDI_FAILURE;
}
break;
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)(uintptr_t)instance;
ret = DDI_SUCCESS;
break;
default:
ret = DDI_FAILURE;
break;
}
return (ret);
}
/*ARGSUSED2*/
static int
ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp)
{
struct ecppunit *pp;
int instance;
struct stroptions *sop;
mblk_t *mop;
instance = getminor(*dev);
if (instance < 0) {
return (ENXIO);
}
pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance);
if (pp == NULL) {
return (ENXIO);
}
mutex_enter(&pp->umutex);
/*
* Parallel port is an exclusive-use device
* thus providing print job integrity
*/
if (pp->oflag == TRUE) {
ecpp_error(pp->dip, "ecpp open failed");
mutex_exit(&pp->umutex);
return (EBUSY);
}
pp->oflag = TRUE;
/* initialize state variables */
pp->prn_timeouts = prn_timeouts_default;
pp->xfer_parms = default_xfer_parms;
pp->current_mode = ECPP_CENTRONICS;
pp->backchannel = ECPP_CENTRONICS;
pp->current_phase = ECPP_PHASE_PO;
pp->port = ECPP_PORT_DMA;
pp->instance = instance;
pp->timeout_error = 0;
pp->saved_dsr = DSR_READ(pp);
pp->ecpp_drain_counter = 0;
pp->dma_cancelled = FALSE;
pp->io_mode = ECPP_DMA;
pp->joblen = 0;
pp->tfifo_intr = 0;
pp->softintr_pending = 0;
pp->nread = 0;
/* clear the state flag */
pp->e_busy = ECPP_IDLE;
pp->readq = RD(q);
pp->writeq = WR(q);
pp->msg = NULL;
RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp;
/*
* Get ready: check host/peripheral, negotiate into default mode
*/
if (ecpp_reset_port_regs(pp) == FAILURE) {
mutex_exit(&pp->umutex);
return (EIO);
}
mutex_exit(&pp->umutex);
/*
* Configure the Stream head and enable the Stream
*/
if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) {
return (EAGAIN);
}
mop->b_datap->db_type = M_SETOPTS;
mop->b_wptr += sizeof (struct stroptions);
/*
* if device is open with O_NONBLOCK flag set, let read(2) return 0
* if no data waiting to be read. Writes will block on flow control.
*/
sop = (struct stroptions *)mop->b_rptr;
sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON;
sop->so_hiwat = ECPPHIWAT;
sop->so_lowat = ECPPLOWAT;
/* enable the stream */
qprocson(q);
putnext(q, mop);
mutex_enter(&pp->umutex);
ecpp_default_negotiation(pp);
/* go revidle */
(void) ecpp_idle_phase(pp);
ecpp_error(pp->dip,
"ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n",
pp->current_mode, pp->current_phase,
ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
mutex_exit(&pp->umutex);
return (0);
}
/*ARGSUSED1*/
static int
ecpp_close(queue_t *q, int flag, cred_t *cred_p)
{
struct ecppunit *pp;
timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id;
pp = (struct ecppunit *)q->q_ptr;
ecpp_error(pp->dip, "ecpp_close: entering ...\n");
mutex_enter(&pp->umutex);
/*
* ecpp_close() will continue to loop until the
* queue has been drained or if the thread
* has received a SIG. Typically, when the queue
* has data, the port will be ECPP_BUSY. However,
* after a dma completes and before the wsrv
* starts the next transfer, the port may be IDLE.
* In this case, ecpp_close() will loop within this
* while(qsize) segment. Since, ecpp_wsrv() runs
* at software interupt level, this shouldn't loop
* very long.
*/
while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) {
if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) {
ecpp_error(pp->dip, "ecpp_close:B: received SIG\n");
/*
* Returning from a signal such as
* SIGTERM or SIGKILL
*/
ecpp_flush(pp, FWRITE);
break;
} else {
ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n");
}
}
ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, "
"qsize(WR(q))=%d, qsize(RD(q))=%d\n",
pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q));
/*
* Cancel all timeouts, disable interrupts
*
* Note that we can`t call untimeout(9F) with mutex held:
* callout may be blocked on the same mutex, and untimeout() will
* cv_wait() while callout is executing, thus creating a deadlock
* So we zero the timeout id's inside mutex and call untimeout later
*/
timeout_id = pp->timeout_id;
fifo_timer_id = pp->fifo_timer_id;
wsrv_timer_id = pp->wsrv_timer_id;
pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0;
pp->softintr_pending = 0;
pp->dma_cancelled = TRUE;
ECPP_MASK_INTR(pp);
mutex_exit(&pp->umutex);
qprocsoff(q);
if (timeout_id) {
(void) untimeout(timeout_id);
}
if (fifo_timer_id) {
(void) untimeout(fifo_timer_id);
}
if (wsrv_timer_id) {
(void) untimeout(wsrv_timer_id);
}
mutex_enter(&pp->umutex);
/* set link to Compatible mode */
if ((pp->current_mode == ECPP_ECP_MODE) &&
(pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
(void) ecp_reverse2forward(pp);
}
(void) ecpp_1284_termination(pp);
pp->oflag = FALSE;
q->q_ptr = WR(q)->q_ptr = NULL;
pp->readq = pp->writeq = NULL;
pp->msg = NULL;
ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n",
ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
mutex_exit(&pp->umutex);
return (0);
}
/*
* standard put procedure for ecpp
*/
static int
ecpp_wput(queue_t *q, mblk_t *mp)
{
struct msgb *nmp;
struct ecppunit *pp;
pp = (struct ecppunit *)q->q_ptr;
if (!mp) {
return (0);
}
if ((mp->b_wptr - mp->b_rptr) <= 0) {
ecpp_error(pp->dip,
"ecpp_wput:bogus packet recieved mp=%x\n", mp);
freemsg(mp);
return (0);
}
switch (DB_TYPE(mp)) {
case M_DATA:
/*
* This is a quick fix for multiple message block problem,
* it will be changed later with better performance code.
*/
if (mp->b_cont) {
/*
* mblk has scattered data ... do msgpullup
* if it fails, continue with the current mblk
*/
if ((nmp = msgpullup(mp, -1)) != NULL) {
freemsg(mp);
mp = nmp;
ecpp_error(pp->dip,
"ecpp_wput:msgpullup: mp=%p len=%d\n",
mp, mp->b_wptr - mp->b_rptr);
}
}
/* let ecpp_wsrv() concatenate small blocks */
(void) putq(q, mp);
break;
case M_CTL:
(void) putq(q, mp);
break;
case M_IOCTL: {
struct iocblk *iocbp;
iocbp = (struct iocblk *)mp->b_rptr;
ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd);
mutex_enter(&pp->umutex);
/* TESTIO and GET_STATUS can be used during transfer */
if ((pp->e_busy == ECPP_BUSY) &&
(iocbp->ioc_cmd != BPPIOC_TESTIO) &&
(iocbp->ioc_cmd != PRNIOC_GET_STATUS)) {
mutex_exit(&pp->umutex);
(void) putq(q, mp);
} else {
mutex_exit(&pp->umutex);
ecpp_putioc(q, mp);
}
break;
}
case M_IOCDATA: {
struct copyresp *csp;
ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n");
csp = (struct copyresp *)mp->b_rptr;
/*
* If copy request failed, quit now
*/
if (csp->cp_rval != 0) {
freemsg(mp);
return (0);
}
switch (csp->cp_cmd) {
case ECPPIOC_SETPARMS:
case ECPPIOC_SETREGS:
case ECPPIOC_SETPORT:
case ECPPIOC_SETDATA:
case PRNIOC_SET_IFCAP:
case PRNIOC_SET_TIMEOUTS:
/*
* need to retrieve and use the data, but if the
* device is busy, wait.
*/
(void) putq(q, mp);
break;
case ECPPIOC_GETPARMS:
case ECPPIOC_GETREGS:
case ECPPIOC_GETPORT:
case ECPPIOC_GETDATA:
case BPPIOC_GETERR:
case BPPIOC_TESTIO:
case PRNIOC_GET_IFCAP:
case PRNIOC_GET_STATUS:
case PRNIOC_GET_1284_STATUS:
case PRNIOC_GET_TIMEOUTS:
/* data transfered to user space okay */
ecpp_ack_ioctl(q, mp);
break;
case ECPPIOC_GETDEVID:
ecpp_wput_iocdata_devid(q, mp,
offsetof(struct ecpp_device_id, rlen));
break;
case PRNIOC_GET_1284_DEVID:
ecpp_wput_iocdata_devid(q, mp,
offsetof(struct prn_1284_device_id, id_rlen));
break;
case PRNIOC_GET_IFINFO:
ecpp_wput_iocdata_devid(q, mp,
offsetof(struct prn_interface_info, if_rlen));
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
break;
}
case M_FLUSH:
ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n");
if (*mp->b_rptr & FLUSHW) {
mutex_enter(&pp->umutex);
ecpp_flush(pp, FWRITE);
mutex_exit(&pp->umutex);
}
if (*mp->b_rptr & FLUSHR) {
mutex_enter(&pp->umutex);
ecpp_flush(pp, FREAD);
mutex_exit(&pp->umutex);
qreply(q, mp);
} else {
freemsg(mp);
}
break;
case M_READ:
/*
* When the user calls read(2), M_READ message is sent to us,
* first byte of which is the number of requested bytes
* We add up user requests and use resulting number
* to calculate the reverse transfer block size
*/
mutex_enter(&pp->umutex);
if (pp->e_busy == ECPP_IDLE) {
pp->nread += *(size_t *)mp->b_rptr;
ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread);
freemsg(mp);
} else {
ecpp_error(pp->dip, "ecpp_wput: M_READ queueing");
(void) putq(q, mp);
}
mutex_exit(&pp->umutex);
break;
default:
ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n",
DB_TYPE(mp));
freemsg(mp);
break;
}
return (0);
}
/*
* Process ECPPIOC_GETDEVID-like ioctls
*/
static void
ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset)
{
struct copyresp *csp;
struct ecpp_copystate *stp;
mblk_t *datamp;
csp = (struct copyresp *)mp->b_rptr;
stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
/* determine the state of copyin/copyout process */
switch (stp->state) {
case ECPP_STRUCTIN:
/* user structure has arrived */
(void) putq(q, mp);
break;
case ECPP_ADDROUT:
/*
* data transfered to user space okay
* now update user structure
*/
datamp = allocb(sizeof (int), BPRI_MED);
if (datamp == NULL) {
ecpp_nack_ioctl(q, mp, ENOSR);
break;
}
*(int *)datamp->b_rptr =
*(int *)((char *)&stp->un + rlen_offset);
stp->state = ECPP_STRUCTOUT;
mcopyout(mp, csp->cp_private, sizeof (int),
(char *)stp->uaddr + rlen_offset, datamp);
qreply(q, mp);
break;
case ECPP_STRUCTOUT:
/* user structure was updated okay */
freemsg(csp->cp_private);
ecpp_ack_ioctl(q, mp);
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
}
static uchar_t
ecpp_get_error_status(uchar_t status)
{
uchar_t pin_status = 0;
if (!(status & ECPP_nERR)) {
pin_status |= BPP_ERR_ERR;
}
if (status & ECPP_PE) {
pin_status |= BPP_PE_ERR;
}
if (!(status & ECPP_SLCT)) {
pin_status |= BPP_SLCT_ERR;
}
if (!(status & ECPP_nBUSY)) {
pin_status |= BPP_SLCT_ERR;
}
return (pin_status);
}
/*
* ioctl handler for output PUT procedure.
*/
static void
ecpp_putioc(queue_t *q, mblk_t *mp)
{
struct iocblk *iocbp;
struct ecppunit *pp;
pp = (struct ecppunit *)q->q_ptr;
iocbp = (struct iocblk *)mp->b_rptr;
/* I_STR ioctls are invalid */
if (iocbp->ioc_count != TRANSPARENT) {
ecpp_nack_ioctl(q, mp, EINVAL);
return;
}
switch (iocbp->ioc_cmd) {
case ECPPIOC_SETPARMS: {
mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL);
qreply(q, mp);
break;
}
case ECPPIOC_GETPARMS: {
struct ecpp_transfer_parms xfer_parms;
mutex_enter(&pp->umutex);
pp->xfer_parms.mode = pp->current_mode;
xfer_parms = pp->xfer_parms;
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms));
break;
}
case ECPPIOC_SETREGS: {
mutex_enter(&pp->umutex);
if (pp->current_mode != ECPP_DIAG_MODE) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
mutex_exit(&pp->umutex);
mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL);
qreply(q, mp);
break;
}
case ECPPIOC_GETREGS: {
struct ecpp_regs rg;
mutex_enter(&pp->umutex);
if (pp->current_mode != ECPP_DIAG_MODE) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
rg.dsr = DSR_READ(pp);
rg.dcr = DCR_READ(pp);
mutex_exit(&pp->umutex);
ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n",
rg.dsr, rg.dcr);
/* these bits must be 1 */
rg.dsr |= ECPP_SETREGS_DSR_MASK;
rg.dcr |= ECPP_SETREGS_DCR_MASK;
ecpp_putioc_copyout(q, mp, &rg, sizeof (rg));
break;
}
case ECPPIOC_SETPORT:
case ECPPIOC_SETDATA: {
mutex_enter(&pp->umutex);
if (pp->current_mode != ECPP_DIAG_MODE) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
mutex_exit(&pp->umutex);
/*
* each of the commands fetches a byte quantity.
*/
mcopyin(mp, NULL, sizeof (uchar_t), NULL);
qreply(q, mp);
break;
}
case ECPPIOC_GETDATA:
case ECPPIOC_GETPORT: {
uchar_t byte;
mutex_enter(&pp->umutex);
/* must be in diagnostic mode for these commands to work */
if (pp->current_mode != ECPP_DIAG_MODE) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
if (iocbp->ioc_cmd == ECPPIOC_GETPORT) {
byte = pp->port;
} else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) {
switch (pp->port) {
case ECPP_PORT_PIO:
byte = DATAR_READ(pp);
break;
case ECPP_PORT_TDMA:
byte = TFIFO_READ(pp);
ecpp_error(pp->dip, "GETDATA=0x%x\n", byte);
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
} else {
mutex_exit(&pp->umutex);
ecpp_error(pp->dip, "weird command");
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &byte, sizeof (byte));
break;
}
case BPPIOC_GETERR: {
struct bpp_error_status bpp_status;
mutex_enter(&pp->umutex);
bpp_status.timeout_occurred = pp->timeout_error;
bpp_status.bus_error = 0; /* not used */
bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr);
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status));
break;
}
case BPPIOC_TESTIO: {
mutex_enter(&pp->umutex);
if (!((pp->current_mode == ECPP_CENTRONICS) ||
(pp->current_mode == ECPP_COMPAT_MODE))) {
ecpp_nack_ioctl(q, mp, EINVAL);
} else {
pp->saved_dsr = DSR_READ(pp);
if ((pp->saved_dsr & ECPP_PE) ||
!(pp->saved_dsr & ECPP_SLCT) ||
!(pp->saved_dsr & ECPP_nERR)) {
ecpp_nack_ioctl(q, mp, EIO);
} else {
ecpp_ack_ioctl(q, mp);
}
}
mutex_exit(&pp->umutex);
break;
}
case PRNIOC_RESET:
/*
* Initialize interface only if no transfer is in progress
*/
mutex_enter(&pp->umutex);
if (pp->e_busy == ECPP_BUSY) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EIO);
} else {
(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
DCR_WRITE(pp, ECPP_SLCTIN);
drv_usecwait(2);
DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
ecpp_default_negotiation(pp);
mutex_exit(&pp->umutex);
ecpp_ack_ioctl(q, mp);
}
break;
case PRNIOC_GET_IFCAP: {
uint_t ifcap;
mutex_enter(&pp->umutex);
ifcap = ecpp_get_prn_ifcap(pp);
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap));
break;
}
case PRNIOC_SET_IFCAP: {
mcopyin(mp, NULL, sizeof (uint_t), NULL);
qreply(q, mp);
break;
}
case PRNIOC_GET_TIMEOUTS: {
struct prn_timeouts timeouts;
mutex_enter(&pp->umutex);
timeouts = pp->prn_timeouts;
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts));
break;
}
case PRNIOC_SET_TIMEOUTS:
mcopyin(mp, NULL, sizeof (struct prn_timeouts),
*(caddr_t *)(void *)mp->b_cont->b_rptr);
qreply(q, mp);
break;
case PRNIOC_GET_STATUS: {
uint8_t dsr;
uint_t status;
mutex_enter(&pp->umutex);
/* DSR only makes sense in Centronics & Compat mode */
if (pp->current_mode == ECPP_CENTRONICS ||
pp->current_mode == ECPP_COMPAT_MODE) {
dsr = DSR_READ(pp);
if ((dsr & ECPP_PE) ||
!(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) {
status = PRN_ONLINE;
} else {
status = PRN_ONLINE | PRN_READY;
}
} else {
status = PRN_ONLINE | PRN_READY;
}
mutex_exit(&pp->umutex);
ecpp_putioc_copyout(q, mp, &status, sizeof (status));
break;
}
case PRNIOC_GET_1284_STATUS: {
uint8_t dsr;
uchar_t status;
mutex_enter(&pp->umutex);
/* status only makes sense in Centronics & Compat mode */
if (pp->current_mode != ECPP_COMPAT_MODE &&
pp->current_mode != ECPP_CENTRONICS) {
mutex_exit(&pp->umutex);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
dsr = DSR_READ(pp); /* read status */
mutex_exit(&pp->umutex);
ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr);
status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) |
(~dsr & ECPP_nBUSY);
ecpp_putioc_copyout(q, mp, &status, sizeof (status));
break;
}
case ECPPIOC_GETDEVID:
ecpp_putioc_stateful_copyin(q, mp,
sizeof (struct ecpp_device_id));
break;
case PRNIOC_GET_1284_DEVID:
ecpp_putioc_stateful_copyin(q, mp,
sizeof (struct prn_1284_device_id));
break;
case PRNIOC_GET_IFINFO:
ecpp_putioc_stateful_copyin(q, mp,
sizeof (struct prn_interface_info));
break;
default:
ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n",
iocbp->ioc_cmd);
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
}
/*
* allocate mblk and copyout the requested number of bytes
*/
static void
ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len)
{
mblk_t *tmp;
if ((tmp = allocb(len, BPRI_MED)) == NULL) {
ecpp_nack_ioctl(q, mp, ENOSR);
return;
}
bcopy(buf, tmp->b_wptr, len);
mcopyout(mp, NULL, len, NULL, tmp);
qreply(q, mp);
}
/*
* copyin the structure using struct ecpp_copystate
*/
static void
ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size)
{
mblk_t *tmp;
struct ecpp_copystate *stp;
if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) {
ecpp_nack_ioctl(q, mp, EAGAIN);
return;
}
stp = (struct ecpp_copystate *)tmp->b_rptr;
stp->state = ECPP_STRUCTIN;
stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr;
tmp->b_wptr += sizeof (struct ecpp_copystate);
mcopyin(mp, tmp, size, stp->uaddr);
qreply(q, mp);
}
/*
* read queue is only used when the peripheral sends data faster,
* then the application consumes it;
* once the low water mark is reached, this routine will be scheduled
*/
static int
ecpp_rsrv(queue_t *q)
{
struct msgb *mp;
/*
* send data upstream until next queue is full or the queue is empty
*/
while (canputnext(q) && (mp = getq(q))) {
putnext(q, mp);
}
/*
* if there is still space on the queue, enable backchannel
*/
if (canputnext(q)) {
struct ecppunit *pp = (struct ecppunit *)q->q_ptr;
mutex_enter(&pp->umutex);
if (pp->e_busy == ECPP_IDLE) {
(void) ecpp_idle_phase(pp);
cv_signal(&pp->pport_cv); /* signal ecpp_close() */
}
mutex_exit(&pp->umutex);
}
return (0);
}
static int
ecpp_wsrv(queue_t *q)
{
struct ecppunit *pp = (struct ecppunit *)q->q_ptr;
struct msgb *mp;
size_t len, total_len;
size_t my_ioblock_sz;
caddr_t my_ioblock;
caddr_t start_addr;
mutex_enter(&pp->umutex);
ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy);
/* if channel is actively doing work, wait till completed */
if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) {
mutex_exit(&pp->umutex);
return (0);
} else if (pp->suspended == TRUE) {
/*
* if the system is about to suspend and ecpp_detach()
* is blocked due to active transfers, wake it up and exit
*/
cv_signal(&pp->pport_cv);
mutex_exit(&pp->umutex);
return (0);
}
/* peripheral status should be okay before starting transfer */
if (pp->e_busy == ECPP_ERR) {
if (ecpp_check_status(pp) == FAILURE) {
if (pp->wsrv_timer_id == 0) {
ecpp_error(pp->dip, "wsrv: start wrsv_timer\n");
pp->wsrv_timer_id = timeout(ecpp_wsrv_timer,
(caddr_t)pp,
drv_usectohz(pp->wsrv_retry * 1000));
} else {
ecpp_error(pp->dip,
"ecpp_wsrv: wrsv_timer is active\n");
}
mutex_exit(&pp->umutex);
return (0);
} else {
pp->e_busy = ECPP_IDLE;
}
}
my_ioblock = pp->ioblock;
my_ioblock_sz = IO_BLOCK_SZ;
/*
* it`s important to null pp->msg here,
* cleaning up from the previous transfer attempts
*/
pp->msg = NULL;
start_addr = NULL;
len = total_len = 0;
/*
* The following loop is implemented to gather the
* many small writes that the lp subsystem makes and
* compile them into one large dma transfer. The len and
* total_len variables are a running count of the number of
* bytes that have been gathered. They are bcopied to the
* ioblock buffer. The pp->e_busy is set to E_BUSY as soon as
* we start gathering packets to indicate the following transfer.
*/
while (mp = getq(q)) {
switch (DB_TYPE(mp)) {
case M_DATA:
pp->e_busy = ECPP_BUSY;
len = mp->b_wptr - mp->b_rptr;
if ((total_len == 0) && (len >= my_ioblock_sz)) {
/*
* if the first M_DATA is bigger than ioblock,
* just use this mblk and start the transfer
*/
total_len = len;
start_addr = (caddr_t)mp->b_rptr;
pp->msg = mp;
goto breakout;
} else if (total_len + len > my_ioblock_sz) {
/*
* current M_DATA does not fit in ioblock,
* put it back and start the transfer
*/
(void) putbq(q, mp);
goto breakout;
} else {
/*
* otherwise add data to ioblock and free mblk
*/
bcopy(mp->b_rptr, my_ioblock, len);
my_ioblock += len;
total_len += len;
start_addr = (caddr_t)pp->ioblock;
freemsg(mp);
}
break;
case M_IOCTL:
/*
* Assume a simple loopback test: an application
* writes data into the TFIFO, reads it using
* ECPPIOC_GETDATA and compares. If the transfer
* times out (which is only possible on Grover),
* the ioctl might be processed before the data
* got to the TFIFO, which leads to miscompare.
* So if we met ioctl, postpone it until after xfer.
*/
if (total_len > 0) {
(void) putbq(q, mp);
goto breakout;
}
ecpp_error(pp->dip, "M_IOCTL.\n");
mutex_exit(&pp->umutex);
ecpp_putioc(q, mp);
mutex_enter(&pp->umutex);
break;
case M_IOCDATA: {
struct copyresp *csp = (struct copyresp *)mp->b_rptr;
ecpp_error(pp->dip, "M_IOCDATA\n");
/*
* If copy request failed, quit now
*/
if (csp->cp_rval != 0) {
freemsg(mp);
break;
}
switch (csp->cp_cmd) {
case ECPPIOC_SETPARMS:
case ECPPIOC_SETREGS:
case ECPPIOC_SETPORT:
case ECPPIOC_SETDATA:
case ECPPIOC_GETDEVID:
case PRNIOC_SET_IFCAP:
case PRNIOC_GET_1284_DEVID:
case PRNIOC_SET_TIMEOUTS:
case PRNIOC_GET_IFINFO:
ecpp_srvioc(q, mp);
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
break;
}
case M_CTL:
if (pp->e_busy != ECPP_IDLE) {
ecpp_error(pp->dip, "wsrv: M_CTL postponed\n");
(void) putbq(q, mp);
goto breakout;
} else {
ecpp_error(pp->dip, "wsrv: M_CTL\n");
}
/* sanity check */
if ((mp->b_wptr - mp->b_rptr != sizeof (int)) ||
(*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) {
ecpp_error(pp->dip, "wsrv: bogus M_CTL");
freemsg(mp);
break;
} else {
freemsg(mp);
}
/* This was a backchannel request */
(void) ecpp_peripheral2host(pp);
/* exit if transfer have been initiated */
if (pp->e_busy == ECPP_BUSY) {
goto breakout;
}
break;
case M_READ:
pp->nread += *(size_t *)mp->b_rptr;
freemsg(mp);
ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread);
break;
default:
ecpp_error(pp->dip, "wsrv: should never get here\n");
freemsg(mp);
break;
}
}
breakout:
/*
* If total_len > 0 then start the transfer, otherwise goto idle state
*/
if (total_len > 0) {
ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len);
pp->e_busy = ECPP_BUSY;
ecpp_start(pp, start_addr, total_len);
} else {
ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy);
/* IDLE if xfer_timeout, or FIFO_EMPTY */
if (pp->e_busy == ECPP_IDLE) {
(void) ecpp_idle_phase(pp);
cv_signal(&pp->pport_cv); /* signal ecpp_close() */
}
}
mutex_exit(&pp->umutex);
return (1);
}
/*
* Ioctl processor for queued ioctl data transfer messages.
*/
static void
ecpp_srvioc(queue_t *q, mblk_t *mp)
{
struct iocblk *iocbp;
struct ecppunit *pp;
iocbp = (struct iocblk *)mp->b_rptr;
pp = (struct ecppunit *)q->q_ptr;
switch (iocbp->ioc_cmd) {
case ECPPIOC_SETPARMS: {
struct ecpp_transfer_parms *xferp;
xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr;
if (xferp->write_timeout <= 0 ||
xferp->write_timeout >= ECPP_MAX_TIMEOUT) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
if (!((xferp->mode == ECPP_CENTRONICS) ||
(xferp->mode == ECPP_COMPAT_MODE) ||
(xferp->mode == ECPP_NIBBLE_MODE) ||
(xferp->mode == ECPP_ECP_MODE) ||
(xferp->mode == ECPP_DIAG_MODE))) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
pp->xfer_parms = *xferp;
pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout;
ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n",
pp->current_mode, pp->xfer_parms.mode);
if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) {
ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
} else {
/*
* mode nego was a success. If nibble mode check
* back channel and set into REVIDLE.
*/
if ((pp->current_mode == ECPP_NIBBLE_MODE) &&
(read_nibble_backchan(pp) == FAILURE)) {
/*
* problems reading the backchannel
* returned to centronics;
* ioctl fails.
*/
ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
break;
}
ecpp_ack_ioctl(q, mp);
}
if (pp->current_mode != ECPP_DIAG_MODE) {
pp->port = ECPP_PORT_DMA;
} else {
pp->port = ECPP_PORT_PIO;
}
pp->xfer_parms.mode = pp->current_mode;
break;
}
case ECPPIOC_SETREGS: {
struct ecpp_regs *rg;
uint8_t dcr;
rg = (struct ecpp_regs *)mp->b_cont->b_rptr;
/* must be in diagnostic mode for these commands to work */
if (pp->current_mode != ECPP_DIAG_MODE) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
/* bits 4-7 must be 1 or return EINVAL */
if ((rg->dcr & ECPP_SETREGS_DCR_MASK) !=
ECPP_SETREGS_DCR_MASK) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
/* get the old dcr */
dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
/* get the new dcr */
dcr = (dcr & ECPP_SETREGS_DCR_MASK) |
(rg->dcr & ~ECPP_SETREGS_DCR_MASK);
DCR_WRITE(pp, dcr);
ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr);
ecpp_ack_ioctl(q, mp);
break;
}
case ECPPIOC_SETPORT: {
uchar_t *port;
port = (uchar_t *)mp->b_cont->b_rptr;
/* must be in diagnostic mode for these commands to work */
if (pp->current_mode != ECPP_DIAG_MODE) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
switch (*port) {
case ECPP_PORT_PIO:
/* put superio into PIO mode */
ECR_WRITE(pp,
ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
pp->port = *port;
ecpp_ack_ioctl(q, mp);
break;
case ECPP_PORT_TDMA:
ecpp_error(pp->dip, "SETPORT: to TDMA\n");
pp->tfifo_intr = 1;
/* change to mode 110 */
ECR_WRITE(pp,
ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV);
pp->port = *port;
ecpp_ack_ioctl(q, mp);
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
}
break;
}
case ECPPIOC_SETDATA: {
uchar_t *data;
data = (uchar_t *)mp->b_cont->b_rptr;
/* must be in diagnostic mode for these commands to work */
if (pp->current_mode != ECPP_DIAG_MODE) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
switch (pp->port) {
case ECPP_PORT_PIO:
DATAR_WRITE(pp, *data);
ecpp_ack_ioctl(q, mp);
break;
case ECPP_PORT_TDMA:
TFIFO_WRITE(pp, *data);
ecpp_ack_ioctl(q, mp);
break;
default:
ecpp_nack_ioctl(q, mp, EINVAL);
}
break;
}
case ECPPIOC_GETDEVID: {
struct copyresp *csp;
struct ecpp_copystate *stp;
struct ecpp_device_id *dp;
struct ecpp_device_id id;
csp = (struct copyresp *)mp->b_rptr;
stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
dp = (struct ecpp_device_id *)mp->b_cont->b_rptr;
#ifdef _MULTI_DATAMODEL
if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
struct ecpp_device_id32 *dp32;
dp32 = (struct ecpp_device_id32 *)dp;
id.mode = dp32->mode;
id.len = dp32->len;
id.addr = (char *)(uintptr_t)dp32->addr;
} else {
#endif /* _MULTI_DATAMODEL */
id = *dp;
#ifdef _MULTI_DATAMODEL
}
#endif /* _MULTI_DATAMODEL */
ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen);
break;
}
case PRNIOC_GET_1284_DEVID: {
struct copyresp *csp;
struct ecpp_copystate *stp;
struct prn_1284_device_id *dp;
struct ecpp_device_id id;
csp = (struct copyresp *)mp->b_rptr;
stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr;
/* imitate struct ecpp_device_id */
id.mode = ECPP_NIBBLE_MODE;
#ifdef _MULTI_DATAMODEL
if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
struct prn_1284_device_id32 *dp32;
dp32 = (struct prn_1284_device_id32 *)dp;
id.len = dp32->id_len;
id.addr = (char *)(uintptr_t)dp32->id_data;
} else {
#endif /* _MULTI_DATAMODEL */
id.len = dp->id_len;
id.addr = (char *)dp->id_data;
#ifdef _MULTI_DATAMODEL
}
#endif /* _MULTI_DATAMODEL */
ecpp_srvioc_devid(q, mp, &id,
(int *)&stp->un.prn_devid.id_rlen);
break;
}
case PRNIOC_SET_IFCAP: {
uint_t ifcap, new_ifcap;
ifcap = ecpp_get_prn_ifcap(pp);
new_ifcap = *(uint_t *)mp->b_cont->b_rptr;
if (ifcap == new_ifcap) {
ecpp_ack_ioctl(q, mp);
break;
}
/* only changing PRN_BIDI is supported */
if ((ifcap ^ new_ifcap) & ~PRN_BIDI) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
if (new_ifcap & PRN_BIDI) { /* go bidirectional */
ecpp_default_negotiation(pp);
} else { /* go unidirectional */
(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
}
ecpp_ack_ioctl(q, mp);
break;
}
case PRNIOC_SET_TIMEOUTS: {
struct prn_timeouts *prn_timeouts;
prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr;
if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) {
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
pp->prn_timeouts = *prn_timeouts;
pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward;
ecpp_ack_ioctl(q, mp);
break;
}
case PRNIOC_GET_IFINFO:
ecpp_srvioc_prnif(q, mp);
break;
default: /* unexpected ioctl type */
ecpp_nack_ioctl(q, mp, EINVAL);
break;
}
}
static void
ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen)
{
struct ecppunit *pp;
struct copyresp *csp;
struct ecpp_copystate *stp;
int error;
int len;
int mode;
mblk_t *datamp;
pp = (struct ecppunit *)q->q_ptr;
csp = (struct copyresp *)mp->b_rptr;
stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
mode = id->mode;
/* check arguments */
if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) {
ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n",
mode, id->len);
ecpp_nack_ioctl(q, mp, EINVAL);
return;
}
/* Currently only Nibble mode is supported */
if (mode != ECPP_NIBBLE_MODE) {
ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
return;
}
if ((id->addr == NULL) && (id->len != 0)) {
ecpp_nack_ioctl(q, mp, EFAULT);
return;
}
/* read device ID length */
if (error = ecpp_getdevid(pp, NULL, &len, mode)) {
ecpp_nack_ioctl(q, mp, error);
goto breakout;
}
/* don't take into account two length bytes */
len -= 2;
*rlen = len;
/* limit transfer to user buffer length */
if (id->len < len) {
len = id->len;
}
if (len == 0) {
/* just return rlen */
stp->state = ECPP_ADDROUT;
ecpp_wput_iocdata_devid(q, mp,
(uintptr_t)rlen - (uintptr_t)&stp->un);
goto breakout;
}
if ((datamp = allocb(len, BPRI_MED)) == NULL) {
ecpp_nack_ioctl(q, mp, ENOSR);
goto breakout;
}
/* read ID string */
error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode);
if (error) {
freemsg(datamp);
ecpp_nack_ioctl(q, mp, error);
goto breakout;
} else {
datamp->b_wptr += len;
stp->state = ECPP_ADDROUT;
mcopyout(mp, csp->cp_private, len, id->addr, datamp);
qreply(q, mp);
}
return;
breakout:
(void) ecpp_1284_termination(pp);
}
/*
* PRNIOC_GET_IFINFO: return prnio interface info string
*/
static void
ecpp_srvioc_prnif(queue_t *q, mblk_t *mp)
{
struct copyresp *csp;
struct ecpp_copystate *stp;
uint_t len;
struct prn_interface_info *ip;
struct prn_interface_info info;
mblk_t *datamp;
#ifdef _MULTI_DATAMODEL
struct iocblk *iocbp = (struct iocblk *)mp->b_rptr;
#endif
csp = (struct copyresp *)mp->b_rptr;
stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
ip = (struct prn_interface_info *)mp->b_cont->b_rptr;
#ifdef _MULTI_DATAMODEL
if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
struct prn_interface_info32 *ip32;
ip32 = (struct prn_interface_info32 *)ip;
info.if_len = ip32->if_len;
info.if_data = (char *)(uintptr_t)ip32->if_data;
} else {
#endif /* _MULTI_DATAMODEL */
info = *ip;
#ifdef _MULTI_DATAMODEL
}
#endif /* _MULTI_DATAMODEL */
len = strlen(prn_ifinfo);
stp->un.prn_if.if_rlen = len;
stp->state = ECPP_ADDROUT;
/* check arguments */
if ((info.if_data == NULL) && (info.if_len != 0)) {
ecpp_nack_ioctl(q, mp, EFAULT);
return;
}
if (info.if_len == 0) {
/* just copyout rlen */
ecpp_wput_iocdata_devid(q, mp,
offsetof(struct prn_interface_info, if_rlen));
return;
}
/* if needed, trim to the buffer size */
if (len > info.if_len) {
len = info.if_len;
}
if ((datamp = allocb(len, BPRI_MED)) == NULL) {
ecpp_nack_ioctl(q, mp, ENOSR);
return;
}
bcopy(&prn_ifinfo[0], datamp->b_wptr, len);
datamp->b_wptr += len;
mcopyout(mp, csp->cp_private, len, info.if_data, datamp);
qreply(q, mp);
}
static void
ecpp_flush(struct ecppunit *pp, int cmd)
{
queue_t *q;
uint8_t ecr, dcr;
timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id;
ASSERT(mutex_owned(&pp->umutex));
if (!(cmd & FWRITE)) {
return;
}
q = pp->writeq;
timeout_id = fifo_timer_id = wsrv_timer_id = 0;
ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy);
/* if there is an ongoing DMA, it needs to be turned off. */
switch (pp->e_busy) {
case ECPP_BUSY:
/*
* Change the port status to ECPP_FLUSH to
* indicate to ecpp_wsrv that the wq is being flushed.
*/
pp->e_busy = ECPP_FLUSH;
/*
* dma_cancelled indicates to ecpp_isr() that we have
* turned off the DMA. Since the mutex is held, ecpp_isr()
* may be blocked. Once ecpp_flush() finishes and ecpp_isr()
* gains the mutex, ecpp_isr() will have a _reset_ DMAC. Most
* significantly, the DMAC will be reset after ecpp_isr() was
* invoked. Therefore we need to have a flag "dma_cancelled"
* to signify when the described condition has occured. If
* ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr
* and simply claim the interupt.
*/
pp->dma_cancelled = TRUE;
/* either DMA or PIO transfer */
if (COMPAT_DMA(pp) ||
(pp->current_mode == ECPP_ECP_MODE) ||
(pp->current_mode == ECPP_DIAG_MODE)) {
/*
* if the bcr is zero, then DMA is complete and
* we are waiting for the fifo to drain. Therefore,
* turn off dma.
*/
if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
ecpp_error(pp->dip,
"ecpp_flush: dma_stop failed.\n");
}
/*
* If the status of the port is ECPP_BUSY,
* the DMA is stopped by either explicitly above, or by
* ecpp_isr() but the FIFO hasn't drained yet. In either
* case, we need to unbind the dma mappings.
*/
if (ddi_dma_unbind_handle(
pp->dma_handle) != DDI_SUCCESS)
ecpp_error(pp->dip,
"ecpp_flush: unbind failed.\n");
if (pp->msg != NULL) {
freemsg(pp->msg);
pp->msg = NULL;
}
} else {
/*
* PIO transfer: disable nAck interrups
*/
dcr = DCR_READ(pp);
dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN);
DCR_WRITE(pp, dcr);
ECPP_MASK_INTR(pp);
}
/*
* The transfer is cleaned up. There may or may not be data
* in the fifo. We don't care at this point. Ie. SuperIO may
* transfer the remaining bytes in the fifo or not. it doesn't
* matter. All that is important at this stage is that no more
* fifo timers are started.
*/
timeout_id = pp->timeout_id;
fifo_timer_id = pp->fifo_timer_id;
pp->timeout_id = pp->fifo_timer_id = 0;
pp->softintr_pending = 0;
break;
case ECPP_ERR:
/*
* Change the port status to ECPP_FLUSH to
* indicate to ecpp_wsrv that the wq is being flushed.
*/
pp->e_busy = ECPP_FLUSH;
/*
* Most likely there are mblks in the queue,
* but the driver can not transmit because
* of the bad port status. In this case,
* ecpp_flush() should make sure ecpp_wsrv_timer()
* is turned off.
*/
wsrv_timer_id = pp->wsrv_timer_id;
pp->wsrv_timer_id = 0;
break;
case ECPP_IDLE:
/* No work to do. Ready to flush */
break;
default:
ecpp_error(pp->dip,
"ecpp_flush: illegal state %x\n", pp->e_busy);
}
/* in DIAG mode clear TFIFO if needed */
if (pp->current_mode == ECPP_DIAG_MODE) {
ecr = ECR_READ(pp);
if (!(ecr & ECPP_FIFO_EMPTY)) {
ECR_WRITE(pp,
ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
ECR_WRITE(pp, ecr);
}
}
/* Discard all messages on the output queue. */
flushq(q, FLUSHDATA);
/* The port is no longer flushing or dma'ing for that matter. */
pp->e_busy = ECPP_IDLE;
/* Set the right phase */
if (pp->current_mode == ECPP_ECP_MODE) {
if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
} else {
pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
}
}
/* cancel timeouts if any */
mutex_exit(&pp->umutex);
if (timeout_id) {
(void) untimeout(timeout_id);
}
if (fifo_timer_id) {
(void) untimeout(fifo_timer_id);
}
if (wsrv_timer_id) {
(void) untimeout(wsrv_timer_id);
}
mutex_enter(&pp->umutex);
cv_signal(&pp->pport_cv); /* wake up ecpp_close() */
}
static void
ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len)
{
ASSERT(mutex_owned(&pp->umutex));
ASSERT(pp->e_busy == ECPP_BUSY);
ecpp_error(pp->dip,
"ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n",
pp->current_mode, pp->current_phase, ECR_READ(pp), len);
pp->dma_dir = DDI_DMA_WRITE; /* this is a forward transfer */
switch (pp->current_mode) {
case ECPP_NIBBLE_MODE:
(void) ecpp_1284_termination(pp);
/* After termination we are either Compatible or Centronics */
/* FALLTHRU */
case ECPP_CENTRONICS:
case ECPP_COMPAT_MODE:
if (pp->io_mode == ECPP_DMA) {
if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
return;
}
} else {
/* PIO mode */
if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) {
return;
}
(void) ecpp_pio_writeb(pp);
}
break;
case ECPP_DIAG_MODE: {
int oldlen;
/* put superio into TFIFO mode, if not already */
ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
/*
* DMA would block if the TFIFO is not empty
* if by this moment nobody read these bytes, they`re gone
*/
drv_usecwait(1);
if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
ecpp_error(pp->dip,
"ecpp_start: TFIFO not empty, clearing\n");
ECR_WRITE(pp,
ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
ECR_WRITE(pp,
ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
}
/* we can DMA at most 16 bytes into TFIFO */
oldlen = len;
if (len > ECPP_FIFO_SZ) {
len = ECPP_FIFO_SZ;
}
if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
return;
}
/* put the rest of data back on the queue */
if (oldlen > len) {
ecpp_putback_untransfered(pp, addr + len, oldlen - len);
}
break;
}
case ECPP_ECP_MODE:
ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
/* if in Reverse Phase negotiate to Forward */
if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
if (ecp_reverse2forward(pp) == FAILURE) {
if (pp->msg) {
(void) putbq(pp->writeq, pp->msg);
} else {
ecpp_putback_untransfered(pp,
addr, len);
}
}
}
if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
return;
}
break;
}
/* schedule transfer timeout */
pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
pp->xfer_parms.write_timeout * drv_usectohz(1000000));
}
/*
* Transfer a PIO "block" a byte at a time.
* The block is starts at addr and ends at pp->last_byte
*/
static uint8_t
ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
{
pp->next_byte = addr;
pp->last_byte = (caddr_t)((ulong_t)addr + len);
if (ecpp_check_status(pp) == FAILURE) {
/*
* if status signals are bad, do not start PIO,
* put everything back on the queue.
*/
ecpp_error(pp->dip,
"ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
if (pp->msg != NULL) {
/*
* this circumstance we want to copy the
* untransfered section of msg to a new mblk,
* then free the orignal one.
*/
ecpp_putback_untransfered(pp,
(void *)pp->msg->b_rptr, len);
ecpp_error(pp->dip,
"ecpp_prep_pio_xfer: len1=%d\n", len);
freemsg(pp->msg);
pp->msg = NULL;
} else {
ecpp_putback_untransfered(pp, pp->ioblock, len);
ecpp_error(pp->dip,
"ecpp_prep_pio_xfer: len2=%d\n", len);
}
qenable(pp->writeq);
return (FAILURE);
}
pp->dma_cancelled = FALSE;
/* pport must be in PIO mode */
if (ecr_write(pp, ECR_mode_001 |
ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) {
ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n");
}
ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n",
DCR_READ(pp), ECR_READ(pp));
return (SUCCESS);
}
static uint8_t
ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
{
uint8_t ecr_mode[] = {
0,
ECR_mode_010, /* Centronix */
ECR_mode_010, /* Compat */
0, /* Byte */
0, /* Nibble */
ECR_mode_011, /* ECP */
0, /* Failure */
ECR_mode_110, /* Diag */