| /* |
| * CDDL HEADER START |
| * |
| * The contents of this file are subject to the terms of the |
| * Common Development and Distribution License (the "License"). |
| * You may not use this file except in compliance with the License. |
| * |
| * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| * or http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| * |
| * When distributing Covered Code, include this CDDL HEADER in each |
| * file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| * If applicable, add the following below this CDDL HEADER, with the |
| * fields enclosed by brackets "[]" replaced with your own identifying |
| * information: Portions Copyright [yyyy] [name of copyright owner] |
| * |
| * CDDL HEADER END |
| */ |
| |
| /* |
| * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved. |
| * Copyright (c) 2017, Joyent, Inc. |
| * Copyright (c) 2017 by Delphix. All rights reserved. |
| */ |
| |
| /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ |
| /* All Rights Reserved */ |
| |
| #include <sys/types.h> |
| #include <sys/param.h> |
| #include <sys/time.h> |
| #include <sys/cred.h> |
| #include <sys/policy.h> |
| #include <sys/debug.h> |
| #include <sys/dirent.h> |
| #include <sys/errno.h> |
| #include <sys/file.h> |
| #include <sys/inline.h> |
| #include <sys/kmem.h> |
| #include <sys/pathname.h> |
| #include <sys/proc.h> |
| #include <sys/brand.h> |
| #include <sys/signal.h> |
| #include <sys/stat.h> |
| #include <sys/sysmacros.h> |
| #include <sys/systm.h> |
| #include <sys/zone.h> |
| #include <sys/uio.h> |
| #include <sys/var.h> |
| #include <sys/mode.h> |
| #include <sys/poll.h> |
| #include <sys/user.h> |
| #include <sys/vfs.h> |
| #include <sys/vfs_opreg.h> |
| #include <sys/gfs.h> |
| #include <sys/vnode.h> |
| #include <sys/fault.h> |
| #include <sys/syscall.h> |
| #include <sys/procfs.h> |
| #include <sys/atomic.h> |
| #include <sys/cmn_err.h> |
| #include <sys/contract_impl.h> |
| #include <sys/ctfs.h> |
| #include <sys/avl.h> |
| #include <fs/fs_subr.h> |
| #include <vm/rm.h> |
| #include <vm/as.h> |
| #include <vm/seg.h> |
| #include <vm/seg_vn.h> |
| #include <vm/hat.h> |
| #include <fs/proc/prdata.h> |
| #if defined(__sparc) |
| #include <sys/regset.h> |
| #endif |
| #if defined(__x86) |
| #include <sys/sysi86.h> |
| #endif |
| |
| /* |
| * Created by prinit. |
| */ |
| vnodeops_t *prvnodeops; |
| |
| /* |
| * Directory characteristics (patterned after the s5 file system). |
| */ |
| #define PRROOTINO 2 |
| |
| #define PRDIRSIZE 14 |
| struct prdirect { |
| ushort_t d_ino; |
| char d_name[PRDIRSIZE]; |
| }; |
| |
| #define PRSDSIZE (sizeof (struct prdirect)) |
| |
| /* |
| * Directory characteristics. |
| */ |
| typedef struct prdirent { |
| ino64_t d_ino; /* "inode number" of entry */ |
| off64_t d_off; /* offset of disk directory entry */ |
| unsigned short d_reclen; /* length of this record */ |
| char d_name[14]; /* name of file */ |
| } prdirent_t; |
| |
| /* |
| * Contents of a /proc/<pid> directory. |
| * Reuse d_ino field for the /proc file type. |
| */ |
| static prdirent_t piddir[] = { |
| { PR_PIDDIR, 1 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "." }, |
| { PR_PROCDIR, 2 * sizeof (prdirent_t), sizeof (prdirent_t), |
| ".." }, |
| { PR_AS, 3 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "as" }, |
| { PR_CTL, 4 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "ctl" }, |
| { PR_STATUS, 5 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "status" }, |
| { PR_LSTATUS, 6 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lstatus" }, |
| { PR_PSINFO, 7 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "psinfo" }, |
| { PR_LPSINFO, 8 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lpsinfo" }, |
| { PR_MAP, 9 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "map" }, |
| { PR_RMAP, 10 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "rmap" }, |
| { PR_XMAP, 11 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "xmap" }, |
| { PR_CRED, 12 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "cred" }, |
| { PR_SIGACT, 13 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "sigact" }, |
| { PR_AUXV, 14 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "auxv" }, |
| { PR_USAGE, 15 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "usage" }, |
| { PR_LUSAGE, 16 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lusage" }, |
| { PR_PAGEDATA, 17 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "pagedata" }, |
| { PR_WATCH, 18 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "watch" }, |
| { PR_CURDIR, 19 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "cwd" }, |
| { PR_ROOTDIR, 20 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "root" }, |
| { PR_FDDIR, 21 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "fd" }, |
| { PR_OBJECTDIR, 22 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "object" }, |
| { PR_LWPDIR, 23 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lwp" }, |
| { PR_PRIV, 24 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "priv" }, |
| { PR_PATHDIR, 25 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "path" }, |
| { PR_CTDIR, 26 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "contracts" }, |
| { PR_SECFLAGS, 27 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "secflags" }, |
| #if defined(__x86) |
| { PR_LDT, 28 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "ldt" }, |
| #endif |
| }; |
| |
| #define NPIDDIRFILES (sizeof (piddir) / sizeof (piddir[0]) - 2) |
| |
| /* |
| * Contents of a /proc/<pid>/lwp/<lwpid> directory. |
| */ |
| static prdirent_t lwpiddir[] = { |
| { PR_LWPIDDIR, 1 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "." }, |
| { PR_LWPDIR, 2 * sizeof (prdirent_t), sizeof (prdirent_t), |
| ".." }, |
| { PR_LWPCTL, 3 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lwpctl" }, |
| { PR_LWPSTATUS, 4 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lwpstatus" }, |
| { PR_LWPSINFO, 5 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lwpsinfo" }, |
| { PR_LWPUSAGE, 6 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "lwpusage" }, |
| { PR_XREGS, 7 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "xregs" }, |
| { PR_TMPLDIR, 8 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "templates" }, |
| { PR_SPYMASTER, 9 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "spymaster" }, |
| #if defined(__sparc) |
| { PR_GWINDOWS, 10 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "gwindows" }, |
| { PR_ASRS, 11 * sizeof (prdirent_t), sizeof (prdirent_t), |
| "asrs" }, |
| #endif |
| }; |
| |
| #define NLWPIDDIRFILES (sizeof (lwpiddir) / sizeof (lwpiddir[0]) - 2) |
| |
| /* |
| * Span of entries in the array files (lstatus, lpsinfo, lusage). |
| * We make the span larger than the size of the structure on purpose, |
| * to make sure that programs cannot use the structure size by mistake. |
| * Align _ILP32 structures at 8 bytes, _LP64 structures at 16 bytes. |
| */ |
| #ifdef _LP64 |
| #define LSPAN(type) (round16(sizeof (type)) + 16) |
| #define LSPAN32(type) (round8(sizeof (type)) + 8) |
| #else |
| #define LSPAN(type) (round8(sizeof (type)) + 8) |
| #endif |
| |
| static void rebuild_objdir(struct as *); |
| static void prfreecommon(prcommon_t *); |
| static int praccess(vnode_t *, int, int, cred_t *, caller_context_t *); |
| |
| static int |
| propen(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) |
| { |
| vnode_t *vp = *vpp; |
| prnode_t *pnp = VTOP(vp); |
| prcommon_t *pcp = pnp->pr_pcommon; |
| prnodetype_t type = pnp->pr_type; |
| vnode_t *rvp; |
| vtype_t vtype; |
| proc_t *p; |
| int error = 0; |
| prnode_t *npnp = NULL; |
| |
| /* |
| * Nothing to do for the /proc directory itself. |
| */ |
| if (type == PR_PROCDIR) |
| return (0); |
| |
| /* |
| * If we are opening an underlying mapped object, reject opens |
| * for writing regardless of the objects's access modes. |
| * If we are opening a file in the /proc/pid/fd directory, |
| * reject the open for any but a regular file or directory. |
| * Just do it if we are opening the current or root directory. |
| */ |
| switch (type) { |
| case PR_OBJECT: |
| case PR_FD: |
| case PR_CURDIR: |
| case PR_ROOTDIR: |
| rvp = pnp->pr_realvp; |
| vtype = rvp->v_type; |
| if ((type == PR_OBJECT && (flag & FWRITE)) || |
| (type == PR_FD && vtype != VREG && vtype != VDIR)) |
| error = EACCES; |
| else { |
| /* |
| * Need to hold rvp since VOP_OPEN() may release it. |
| */ |
| VN_HOLD(rvp); |
| error = VOP_OPEN(&rvp, flag, cr, ct); |
| if (error) { |
| VN_RELE(rvp); |
| } else { |
| *vpp = rvp; |
| VN_RELE(vp); |
| } |
| } |
| return (error); |
| default: |
| break; |
| } |
| |
| /* |
| * If we are opening the pagedata file, allocate a prnode now |
| * to avoid calling kmem_alloc() while holding p->p_lock. |
| */ |
| if (type == PR_PAGEDATA || type == PR_OPAGEDATA) |
| npnp = prgetnode(vp, type); |
| |
| /* |
| * If the process exists, lock it now. |
| * Otherwise we have a race condition with prclose(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) { |
| if (npnp != NULL) |
| prfreenode(npnp); |
| return (ENOENT); |
| } |
| ASSERT(p == pcp->prc_proc); |
| ASSERT(p->p_proc_flag & P_PR_LOCK); |
| |
| /* |
| * Maintain a count of opens for write. Allow exactly one |
| * O_WRITE|O_EXCL request and fail subsequent ones. |
| * Don't fail opens of old (bletch!) /proc lwp files. |
| * Special case for open by the process itself: |
| * Always allow the open by self and discount this |
| * open for other opens for writing. |
| */ |
| if (flag & FWRITE) { |
| if (p == curproc) { |
| pcp->prc_selfopens++; |
| pnp->pr_flags |= PR_ISSELF; |
| } else if (type == PR_LWPIDFILE) { |
| /* EMPTY */; |
| } else if (flag & FEXCL) { |
| if (pcp->prc_writers > pcp->prc_selfopens) { |
| error = EBUSY; |
| goto out; |
| } |
| /* semantic for old /proc interface */ |
| if (type == PR_PIDDIR) |
| pcp->prc_flags |= PRC_EXCL; |
| } else if (pcp->prc_flags & PRC_EXCL) { |
| ASSERT(pcp->prc_writers > pcp->prc_selfopens); |
| error = secpolicy_proc_excl_open(cr); |
| if (error) |
| goto out; |
| } |
| pcp->prc_writers++; |
| /* |
| * The vnode may have become invalid between the |
| * VOP_LOOKUP() of the /proc vnode and the VOP_OPEN(). |
| * If so, do now what prinvalidate() should have done. |
| */ |
| if ((pnp->pr_flags & PR_INVAL) || |
| (type == PR_PIDDIR && |
| (VTOP(pnp->pr_pidfile)->pr_flags & PR_INVAL))) { |
| if (p != curproc) |
| pcp->prc_selfopens++; |
| ASSERT(pcp->prc_selfopens <= pcp->prc_writers); |
| if (pcp->prc_selfopens == pcp->prc_writers) |
| pcp->prc_flags &= ~PRC_EXCL; |
| } |
| } |
| |
| /* |
| * If this is a large file open, indicate that in our flags -- some |
| * procfs structures are not off_t-neutral (e.g., priovec_t), and |
| * the open will need to be differentiated where 32-bit processes |
| * pass these structures across the user/kernel boundary. |
| */ |
| if (flag & FOFFMAX) |
| pnp->pr_flags |= PR_OFFMAX; |
| |
| /* |
| * Do file-specific things. |
| */ |
| switch (type) { |
| default: |
| break; |
| case PR_PAGEDATA: |
| case PR_OPAGEDATA: |
| /* |
| * Enable data collection for page data file; |
| * get unique id from the hat layer. |
| */ |
| { |
| int id; |
| |
| /* |
| * Drop p->p_lock to call hat_startstat() |
| */ |
| mutex_exit(&p->p_lock); |
| if ((p->p_flag & SSYS) || p->p_as == &kas || |
| (id = hat_startstat(p->p_as)) == -1) { |
| mutex_enter(&p->p_lock); |
| error = ENOMEM; |
| } else if (pnp->pr_hatid == 0) { |
| mutex_enter(&p->p_lock); |
| pnp->pr_hatid = (uint_t)id; |
| } else { |
| mutex_enter(&p->p_lock); |
| /* |
| * Use our newly allocated prnode. |
| */ |
| npnp->pr_hatid = (uint_t)id; |
| /* |
| * prgetnode() initialized most of the prnode. |
| * Duplicate the remainder. |
| */ |
| npnp->pr_ino = pnp->pr_ino; |
| npnp->pr_common = pnp->pr_common; |
| npnp->pr_pcommon = pnp->pr_pcommon; |
| npnp->pr_parent = pnp->pr_parent; |
| VN_HOLD(npnp->pr_parent); |
| npnp->pr_index = pnp->pr_index; |
| |
| npnp->pr_next = p->p_plist; |
| p->p_plist = PTOV(npnp); |
| |
| VN_RELE(PTOV(pnp)); |
| pnp = npnp; |
| npnp = NULL; |
| *vpp = PTOV(pnp); |
| } |
| } |
| break; |
| } |
| |
| out: |
| prunlock(pnp); |
| |
| if (npnp != NULL) |
| prfreenode(npnp); |
| return (error); |
| } |
| |
| /* ARGSUSED */ |
| static int |
| prclose(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, |
| caller_context_t *ct) |
| { |
| prnode_t *pnp = VTOP(vp); |
| prcommon_t *pcp = pnp->pr_pcommon; |
| prnodetype_t type = pnp->pr_type; |
| proc_t *p; |
| kthread_t *t; |
| user_t *up; |
| |
| /* |
| * Nothing to do for the /proc directory itself. |
| */ |
| if (type == PR_PROCDIR) |
| return (0); |
| |
| ASSERT(type != PR_OBJECT && type != PR_FD && |
| type != PR_CURDIR && type != PR_ROOTDIR); |
| |
| /* |
| * If the process exists, lock it now. |
| * Otherwise we have a race condition with propen(). |
| * Hold pr_pidlock across the reference to prc_selfopens, |
| * and prc_writers in case there is no process anymore, |
| * to cover the case of concurrent calls to prclose() |
| * after the process has been reaped by freeproc(). |
| */ |
| p = pr_p_lock(pnp); |
| |
| /* |
| * There is nothing more to do until the last close of |
| * the file table entry except to clear the pr_owner |
| * field of the prnode and notify any waiters |
| * (their file descriptor may have just been closed). |
| */ |
| if (count > 1) { |
| mutex_exit(&pr_pidlock); |
| if (pnp->pr_owner == curproc && !fisopen(vp)) |
| pnp->pr_owner = NULL; |
| if (p != NULL) { |
| prnotify(vp); |
| prunlock(pnp); |
| } |
| return (0); |
| } |
| |
| /* |
| * Decrement the count of self-opens for writing. |
| * Decrement the total count of opens for writing. |
| * Cancel exclusive opens when only self-opens remain. |
| */ |
| if (flag & FWRITE) { |
| /* |
| * prc_selfopens also contains the count of |
| * invalid writers. See prinvalidate(). |
| */ |
| if ((pnp->pr_flags & (PR_ISSELF|PR_INVAL)) || |
| (type == PR_PIDDIR && |
| (VTOP(pnp->pr_pidfile)->pr_flags & PR_INVAL))) { |
| ASSERT(pcp->prc_selfopens != 0); |
| --pcp->prc_selfopens; |
| } |
| ASSERT(pcp->prc_writers != 0); |
| if (--pcp->prc_writers == pcp->prc_selfopens) |
| pcp->prc_flags &= ~PRC_EXCL; |
| } |
| ASSERT(pcp->prc_writers >= pcp->prc_selfopens); |
| mutex_exit(&pr_pidlock); |
| if (pnp->pr_owner == curproc && !fisopen(vp)) |
| pnp->pr_owner = NULL; |
| |
| /* |
| * If there is no process, there is nothing more to do. |
| */ |
| if (p == NULL) |
| return (0); |
| |
| ASSERT(p == pcp->prc_proc); |
| prnotify(vp); /* notify waiters */ |
| |
| /* |
| * Do file-specific things. |
| */ |
| switch (type) { |
| default: |
| break; |
| case PR_PAGEDATA: |
| case PR_OPAGEDATA: |
| /* |
| * This is a page data file. |
| * Free the hat level statistics. |
| * Drop p->p_lock before calling hat_freestat(). |
| */ |
| mutex_exit(&p->p_lock); |
| if (p->p_as != &kas && pnp->pr_hatid != 0) |
| hat_freestat(p->p_as, pnp->pr_hatid); |
| mutex_enter(&p->p_lock); |
| pnp->pr_hatid = 0; |
| break; |
| } |
| |
| /* |
| * On last close of all writable file descriptors, |
| * perform run-on-last-close and/or kill-on-last-close logic. |
| * Can't do this is the /proc agent lwp still exists. |
| */ |
| if (pcp->prc_writers == 0 && |
| p->p_agenttp == NULL && |
| !(pcp->prc_flags & PRC_DESTROY) && |
| p->p_stat != SZOMB && |
| (p->p_proc_flag & (P_PR_RUNLCL|P_PR_KILLCL))) { |
| int killproc; |
| |
| /* |
| * Cancel any watchpoints currently in effect. |
| * The process might disappear during this operation. |
| */ |
| if (pr_cancel_watch(pnp) == NULL) |
| return (0); |
| /* |
| * If any tracing flags are set, clear them. |
| */ |
| if (p->p_proc_flag & P_PR_TRACE) { |
| up = PTOU(p); |
| premptyset(&up->u_entrymask); |
| premptyset(&up->u_exitmask); |
| up->u_systrap = 0; |
| } |
| premptyset(&p->p_sigmask); |
| premptyset(&p->p_fltmask); |
| killproc = (p->p_proc_flag & P_PR_KILLCL); |
| p->p_proc_flag &= ~(P_PR_RUNLCL|P_PR_KILLCL|P_PR_TRACE); |
| /* |
| * Cancel any outstanding single-step requests. |
| */ |
| if ((t = p->p_tlist) != NULL) { |
| /* |
| * Drop p_lock because prnostep() touches the stack. |
| * The loop is safe because the process is P_PR_LOCK'd. |
| */ |
| mutex_exit(&p->p_lock); |
| do { |
| prnostep(ttolwp(t)); |
| } while ((t = t->t_forw) != p->p_tlist); |
| mutex_enter(&p->p_lock); |
| } |
| /* |
| * Set runnable all lwps stopped by /proc. |
| */ |
| if (killproc) |
| sigtoproc(p, NULL, SIGKILL); |
| else |
| allsetrun(p); |
| } |
| |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* |
| * Array of read functions, indexed by /proc file type. |
| */ |
| static int pr_read_inval(), pr_read_as(), pr_read_status(), |
| pr_read_lstatus(), pr_read_psinfo(), pr_read_lpsinfo(), |
| pr_read_map(), pr_read_rmap(), pr_read_xmap(), |
| pr_read_cred(), pr_read_sigact(), pr_read_auxv(), |
| #if defined(__x86) |
| pr_read_ldt(), |
| #endif |
| pr_read_usage(), pr_read_lusage(), pr_read_pagedata(), |
| pr_read_watch(), pr_read_lwpstatus(), pr_read_lwpsinfo(), |
| pr_read_lwpusage(), pr_read_xregs(), pr_read_priv(), |
| pr_read_spymaster(), pr_read_secflags(), |
| #if defined(__sparc) |
| pr_read_gwindows(), pr_read_asrs(), |
| #endif |
| pr_read_piddir(), pr_read_pidfile(), pr_read_opagedata(); |
| |
| static int (*pr_read_function[PR_NFILES])() = { |
| pr_read_inval, /* /proc */ |
| pr_read_inval, /* /proc/self */ |
| pr_read_piddir, /* /proc/<pid> (old /proc read()) */ |
| pr_read_as, /* /proc/<pid>/as */ |
| pr_read_inval, /* /proc/<pid>/ctl */ |
| pr_read_status, /* /proc/<pid>/status */ |
| pr_read_lstatus, /* /proc/<pid>/lstatus */ |
| pr_read_psinfo, /* /proc/<pid>/psinfo */ |
| pr_read_lpsinfo, /* /proc/<pid>/lpsinfo */ |
| pr_read_map, /* /proc/<pid>/map */ |
| pr_read_rmap, /* /proc/<pid>/rmap */ |
| pr_read_xmap, /* /proc/<pid>/xmap */ |
| pr_read_cred, /* /proc/<pid>/cred */ |
| pr_read_sigact, /* /proc/<pid>/sigact */ |
| pr_read_auxv, /* /proc/<pid>/auxv */ |
| #if defined(__x86) |
| pr_read_ldt, /* /proc/<pid>/ldt */ |
| #endif |
| pr_read_usage, /* /proc/<pid>/usage */ |
| pr_read_lusage, /* /proc/<pid>/lusage */ |
| pr_read_pagedata, /* /proc/<pid>/pagedata */ |
| pr_read_watch, /* /proc/<pid>/watch */ |
| pr_read_inval, /* /proc/<pid>/cwd */ |
| pr_read_inval, /* /proc/<pid>/root */ |
| pr_read_inval, /* /proc/<pid>/fd */ |
| pr_read_inval, /* /proc/<pid>/fd/nn */ |
| pr_read_inval, /* /proc/<pid>/object */ |
| pr_read_inval, /* /proc/<pid>/object/xxx */ |
| pr_read_inval, /* /proc/<pid>/lwp */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid> */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/lwpctl */ |
| pr_read_lwpstatus, /* /proc/<pid>/lwp/<lwpid>/lwpstatus */ |
| pr_read_lwpsinfo, /* /proc/<pid>/lwp/<lwpid>/lwpsinfo */ |
| pr_read_lwpusage, /* /proc/<pid>/lwp/<lwpid>/lwpusage */ |
| pr_read_xregs, /* /proc/<pid>/lwp/<lwpid>/xregs */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/templates */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/templates/<id> */ |
| pr_read_spymaster, /* /proc/<pid>/lwp/<lwpid>/spymaster */ |
| #if defined(__sparc) |
| pr_read_gwindows, /* /proc/<pid>/lwp/<lwpid>/gwindows */ |
| pr_read_asrs, /* /proc/<pid>/lwp/<lwpid>/asrs */ |
| #endif |
| pr_read_priv, /* /proc/<pid>/priv */ |
| pr_read_inval, /* /proc/<pid>/path */ |
| pr_read_inval, /* /proc/<pid>/path/xxx */ |
| pr_read_inval, /* /proc/<pid>/contracts */ |
| pr_read_inval, /* /proc/<pid>/contracts/<ctid> */ |
| pr_read_secflags, /* /proc/<pid>/secflags */ |
| pr_read_pidfile, /* old process file */ |
| pr_read_pidfile, /* old lwp file */ |
| pr_read_opagedata, /* old pagedata file */ |
| }; |
| |
| /* ARGSUSED */ |
| static int |
| pr_read_inval(prnode_t *pnp, uio_t *uiop) |
| { |
| /* |
| * No read() on any /proc directory, use getdents(2) instead. |
| * Cannot read a control file either. |
| * An underlying mapped object file cannot get here. |
| */ |
| return (EINVAL); |
| } |
| |
| static int |
| pr_uioread(void *base, long count, uio_t *uiop) |
| { |
| int error = 0; |
| |
| ASSERT(count >= 0); |
| count -= uiop->uio_offset; |
| if (count > 0 && uiop->uio_offset >= 0) { |
| error = uiomove((char *)base + uiop->uio_offset, |
| count, UIO_READ, uiop); |
| } |
| |
| return (error); |
| } |
| |
| static int |
| pr_read_as(prnode_t *pnp, uio_t *uiop) |
| { |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_AS); |
| |
| if ((error = prlock(pnp, ZNO)) == 0) { |
| proc_t *p = pnp->pr_common->prc_proc; |
| struct as *as = p->p_as; |
| |
| /* |
| * /proc I/O cannot be done to a system process. |
| * A 32-bit process cannot read a 64-bit process. |
| */ |
| if ((p->p_flag & SSYS) || as == &kas) { |
| error = 0; |
| #ifdef _SYSCALL32_IMPL |
| } else if (curproc->p_model == DATAMODEL_ILP32 && |
| PROCESS_NOT_32BIT(p)) { |
| error = EOVERFLOW; |
| #endif |
| } else { |
| /* |
| * We don't hold p_lock over an i/o operation because |
| * that could lead to deadlock with the clock thread. |
| */ |
| mutex_exit(&p->p_lock); |
| error = prusrio(p, UIO_READ, uiop, 0); |
| mutex_enter(&p->p_lock); |
| } |
| prunlock(pnp); |
| } |
| |
| return (error); |
| } |
| |
| static int |
| pr_read_status(prnode_t *pnp, uio_t *uiop) |
| { |
| pstatus_t *sp; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_STATUS); |
| |
| /* |
| * We kmem_alloc() the pstatus structure because |
| * it is so big it might blow the kernel stack. |
| */ |
| sp = kmem_alloc(sizeof (*sp), KM_SLEEP); |
| if ((error = prlock(pnp, ZNO)) == 0) { |
| prgetstatus(pnp->pr_common->prc_proc, sp, VTOZONE(PTOV(pnp))); |
| prunlock(pnp); |
| error = pr_uioread(sp, sizeof (*sp), uiop); |
| } |
| kmem_free(sp, sizeof (*sp)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lstatus(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| size_t size; |
| prheader_t *php; |
| lwpstatus_t *sp; |
| int error; |
| int nlwp; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LSTATUS); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| p = pnp->pr_common->prc_proc; |
| nlwp = p->p_lwpcnt; |
| size = sizeof (prheader_t) + nlwp * LSPAN(lwpstatus_t); |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| php = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt); |
| |
| php->pr_nent = nlwp; |
| php->pr_entsize = LSPAN(lwpstatus_t); |
| |
| sp = (lwpstatus_t *)(php + 1); |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if (ldp->ld_entry == NULL || |
| (t = ldp->ld_entry->le_thread) == NULL) |
| continue; |
| prgetlwpstatus(t, sp, VTOZONE(PTOV(pnp))); |
| sp = (lwpstatus_t *)((caddr_t)sp + LSPAN(lwpstatus_t)); |
| } |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(php, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_psinfo(prnode_t *pnp, uio_t *uiop) |
| { |
| psinfo_t psinfo; |
| proc_t *p; |
| int error = 0; |
| |
| ASSERT(pnp->pr_type == PR_PSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| error = ENOENT; |
| else { |
| ASSERT(p == pnp->pr_common->prc_proc); |
| prgetpsinfo(p, &psinfo); |
| prunlock(pnp); |
| error = pr_uioread(&psinfo, sizeof (psinfo), uiop); |
| } |
| return (error); |
| } |
| |
| static int |
| pr_read_lpsinfo(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| lwpent_t *lep; |
| size_t size; |
| prheader_t *php; |
| lwpsinfo_t *sp; |
| int error; |
| int nlwp; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LPSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if ((nlwp = p->p_lwpcnt + p->p_zombcnt) == 0) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| size = sizeof (prheader_t) + nlwp * LSPAN(lwpsinfo_t); |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| php = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt + p->p_zombcnt); |
| |
| php->pr_nent = nlwp; |
| php->pr_entsize = LSPAN(lwpsinfo_t); |
| |
| sp = (lwpsinfo_t *)(php + 1); |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if ((lep = ldp->ld_entry) == NULL) |
| continue; |
| if ((t = lep->le_thread) != NULL) |
| prgetlwpsinfo(t, sp); |
| else { |
| bzero(sp, sizeof (*sp)); |
| sp->pr_lwpid = lep->le_lwpid; |
| sp->pr_state = SZOMB; |
| sp->pr_sname = 'Z'; |
| sp->pr_start.tv_sec = lep->le_start; |
| sp->pr_bindpro = PBIND_NONE; |
| sp->pr_bindpset = PS_NONE; |
| } |
| sp = (lwpsinfo_t *)((caddr_t)sp + LSPAN(lwpsinfo_t)); |
| } |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(php, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_map_common(prnode_t *pnp, uio_t *uiop, prnodetype_t type) |
| { |
| proc_t *p; |
| struct as *as; |
| list_t iolhead; |
| int error; |
| |
| readmap_common: |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| as = p->p_as; |
| |
| if ((p->p_flag & SSYS) || as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if (!AS_LOCK_TRYENTER(as, RW_WRITER)) { |
| prunlock(pnp); |
| delay(1); |
| goto readmap_common; |
| } |
| mutex_exit(&p->p_lock); |
| |
| switch (type) { |
| case PR_XMAP: |
| error = prgetxmap(p, &iolhead); |
| break; |
| case PR_RMAP: |
| error = prgetmap(p, 1, &iolhead); |
| break; |
| case PR_MAP: |
| error = prgetmap(p, 0, &iolhead); |
| break; |
| } |
| |
| AS_LOCK_EXIT(as); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_iol_uiomove_and_free(&iolhead, uiop, error); |
| |
| return (error); |
| } |
| |
| static int |
| pr_read_map(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_MAP); |
| return (pr_read_map_common(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_rmap(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_RMAP); |
| return (pr_read_map_common(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_xmap(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_XMAP); |
| return (pr_read_map_common(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_cred(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| prcred_t *pcrp; |
| int error; |
| size_t count; |
| |
| ASSERT(pnp->pr_type == PR_CRED); |
| |
| /* |
| * We kmem_alloc() the prcred_t structure because |
| * the number of supplementary groups is variable. |
| */ |
| pcrp = |
| kmem_alloc(sizeof (prcred_t) + sizeof (gid_t) * (ngroups_max - 1), |
| KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| p = pnp->pr_common->prc_proc; |
| ASSERT(p != NULL); |
| |
| prgetcred(p, pcrp); |
| prunlock(pnp); |
| |
| count = sizeof (prcred_t); |
| if (pcrp->pr_ngroups > 1) |
| count += sizeof (gid_t) * (pcrp->pr_ngroups - 1); |
| error = pr_uioread(pcrp, count, uiop); |
| out: |
| kmem_free(pcrp, sizeof (prcred_t) + sizeof (gid_t) * (ngroups_max - 1)); |
| return (error); |
| } |
| |
| static int |
| pr_read_priv(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| size_t psize = prgetprivsize(); |
| prpriv_t *ppriv = kmem_alloc(psize, KM_SLEEP); |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_PRIV); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| p = pnp->pr_common->prc_proc; |
| ASSERT(p != NULL); |
| |
| prgetpriv(p, ppriv); |
| prunlock(pnp); |
| |
| error = pr_uioread(ppriv, psize, uiop); |
| out: |
| kmem_free(ppriv, psize); |
| return (error); |
| } |
| |
| static int |
| pr_read_sigact(prnode_t *pnp, uio_t *uiop) |
| { |
| int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG; |
| proc_t *p; |
| struct sigaction *sap; |
| int sig; |
| int error; |
| user_t *up; |
| |
| ASSERT(pnp->pr_type == PR_SIGACT); |
| |
| /* |
| * We kmem_alloc() the sigaction array because |
| * it is so big it might blow the kernel stack. |
| */ |
| sap = kmem_alloc((nsig-1) * sizeof (struct sigaction), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| p = pnp->pr_common->prc_proc; |
| ASSERT(p != NULL); |
| |
| if (uiop->uio_offset >= (nsig-1)*sizeof (struct sigaction)) { |
| prunlock(pnp); |
| goto out; |
| } |
| |
| up = PTOU(p); |
| for (sig = 1; sig < nsig; sig++) |
| prgetaction(p, up, sig, &sap[sig-1]); |
| prunlock(pnp); |
| |
| error = pr_uioread(sap, (nsig - 1) * sizeof (struct sigaction), uiop); |
| out: |
| kmem_free(sap, (nsig-1) * sizeof (struct sigaction)); |
| return (error); |
| } |
| |
| static int |
| pr_read_auxv(prnode_t *pnp, uio_t *uiop) |
| { |
| auxv_t auxv[__KERN_NAUXV_IMPL]; |
| proc_t *p; |
| user_t *up; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_AUXV); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| if (uiop->uio_offset >= sizeof (auxv)) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| p = pnp->pr_common->prc_proc; |
| up = PTOU(p); |
| bcopy(up->u_auxv, auxv, sizeof (auxv)); |
| prunlock(pnp); |
| |
| return (pr_uioread(auxv, sizeof (auxv), uiop)); |
| } |
| |
| #if defined(__x86) |
| /* |
| * XX64 |
| * This is almost certainly broken for the amd64 kernel, because |
| * we have two kinds of LDT structures to export -- one for compatibility |
| * mode, and one for long mode, sigh. |
| * |
| * For now lets just have a ldt of size 0 for 64-bit processes. |
| */ |
| static int |
| pr_read_ldt(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| struct ssd *ssd; |
| size_t size; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_LDT); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| p = pnp->pr_common->prc_proc; |
| |
| mutex_exit(&p->p_lock); |
| mutex_enter(&p->p_ldtlock); |
| size = prnldt(p) * sizeof (struct ssd); |
| if (uiop->uio_offset >= size) { |
| mutex_exit(&p->p_ldtlock); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| return (0); |
| } |
| |
| ssd = kmem_alloc(size, KM_SLEEP); |
| prgetldt(p, ssd); |
| mutex_exit(&p->p_ldtlock); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_uioread(ssd, size, uiop); |
| kmem_free(ssd, size); |
| return (error); |
| } |
| #endif /* __x86 */ |
| |
| static int |
| pr_read_usage(prnode_t *pnp, uio_t *uiop) |
| { |
| prhusage_t *pup; |
| prusage_t *upup; |
| proc_t *p; |
| kthread_t *t; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_USAGE); |
| |
| /* allocate now, before locking the process */ |
| pup = kmem_zalloc(sizeof (*pup), KM_SLEEP); |
| upup = kmem_alloc(sizeof (*upup), KM_SLEEP); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) { |
| error = ENOENT; |
| goto out; |
| } |
| ASSERT(p == pnp->pr_common->prc_proc); |
| |
| if (uiop->uio_offset >= sizeof (prusage_t)) { |
| prunlock(pnp); |
| error = 0; |
| goto out; |
| } |
| |
| pup->pr_tstamp = gethrtime(); |
| |
| pup->pr_count = p->p_defunct; |
| pup->pr_create = p->p_mstart; |
| pup->pr_term = p->p_mterm; |
| |
| pup->pr_rtime = p->p_mlreal; |
| pup->pr_utime = p->p_acct[LMS_USER]; |
| pup->pr_stime = p->p_acct[LMS_SYSTEM]; |
| pup->pr_ttime = p->p_acct[LMS_TRAP]; |
| pup->pr_tftime = p->p_acct[LMS_TFAULT]; |
| pup->pr_dftime = p->p_acct[LMS_DFAULT]; |
| pup->pr_kftime = p->p_acct[LMS_KFAULT]; |
| pup->pr_ltime = p->p_acct[LMS_USER_LOCK]; |
| pup->pr_slptime = p->p_acct[LMS_SLEEP]; |
| pup->pr_wtime = p->p_acct[LMS_WAIT_CPU]; |
| pup->pr_stoptime = p->p_acct[LMS_STOPPED]; |
| |
| pup->pr_minf = p->p_ru.minflt; |
| pup->pr_majf = p->p_ru.majflt; |
| pup->pr_nswap = p->p_ru.nswap; |
| pup->pr_inblk = p->p_ru.inblock; |
| pup->pr_oublk = p->p_ru.oublock; |
| pup->pr_msnd = p->p_ru.msgsnd; |
| pup->pr_mrcv = p->p_ru.msgrcv; |
| pup->pr_sigs = p->p_ru.nsignals; |
| pup->pr_vctx = p->p_ru.nvcsw; |
| pup->pr_ictx = p->p_ru.nivcsw; |
| pup->pr_sysc = p->p_ru.sysc; |
| pup->pr_ioch = p->p_ru.ioch; |
| |
| /* |
| * Add the usage information for each active lwp. |
| */ |
| if ((t = p->p_tlist) != NULL && |
| !(pnp->pr_pcommon->prc_flags & PRC_DESTROY)) { |
| do { |
| if (t->t_proc_flag & TP_LWPEXIT) |
| continue; |
| pup->pr_count++; |
| praddusage(t, pup); |
| } while ((t = t->t_forw) != p->p_tlist); |
| } |
| |
| prunlock(pnp); |
| |
| prcvtusage(pup, upup); |
| |
| error = pr_uioread(upup, sizeof (prusage_t), uiop); |
| out: |
| kmem_free(pup, sizeof (*pup)); |
| kmem_free(upup, sizeof (*upup)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lusage(prnode_t *pnp, uio_t *uiop) |
| { |
| int nlwp; |
| prhusage_t *pup; |
| prheader_t *php; |
| prusage_t *upup; |
| size_t size; |
| hrtime_t curtime; |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| int error; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LUSAGE); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if ((nlwp = p->p_lwpcnt) == 0) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| |
| size = sizeof (prheader_t) + (nlwp + 1) * LSPAN(prusage_t); |
| if (uiop->uio_offset >= size) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| pup = kmem_zalloc(size + sizeof (prhusage_t), KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt); |
| |
| php = (prheader_t *)(pup + 1); |
| upup = (prusage_t *)(php + 1); |
| |
| php->pr_nent = nlwp + 1; |
| php->pr_entsize = LSPAN(prusage_t); |
| |
| curtime = gethrtime(); |
| |
| /* |
| * First the summation over defunct lwps. |
| */ |
| pup->pr_count = p->p_defunct; |
| pup->pr_tstamp = curtime; |
| pup->pr_create = p->p_mstart; |
| pup->pr_term = p->p_mterm; |
| |
| pup->pr_rtime = p->p_mlreal; |
| pup->pr_utime = p->p_acct[LMS_USER]; |
| pup->pr_stime = p->p_acct[LMS_SYSTEM]; |
| pup->pr_ttime = p->p_acct[LMS_TRAP]; |
| pup->pr_tftime = p->p_acct[LMS_TFAULT]; |
| pup->pr_dftime = p->p_acct[LMS_DFAULT]; |
| pup->pr_kftime = p->p_acct[LMS_KFAULT]; |
| pup->pr_ltime = p->p_acct[LMS_USER_LOCK]; |
| pup->pr_slptime = p->p_acct[LMS_SLEEP]; |
| pup->pr_wtime = p->p_acct[LMS_WAIT_CPU]; |
| pup->pr_stoptime = p->p_acct[LMS_STOPPED]; |
| |
| pup->pr_minf = p->p_ru.minflt; |
| pup->pr_majf = p->p_ru.majflt; |
| pup->pr_nswap = p->p_ru.nswap; |
| pup->pr_inblk = p->p_ru.inblock; |
| pup->pr_oublk = p->p_ru.oublock; |
| pup->pr_msnd = p->p_ru.msgsnd; |
| pup->pr_mrcv = p->p_ru.msgrcv; |
| pup->pr_sigs = p->p_ru.nsignals; |
| pup->pr_vctx = p->p_ru.nvcsw; |
| pup->pr_ictx = p->p_ru.nivcsw; |
| pup->pr_sysc = p->p_ru.sysc; |
| pup->pr_ioch = p->p_ru.ioch; |
| |
| prcvtusage(pup, upup); |
| |
| /* |
| * Fill one prusage struct for each active lwp. |
| */ |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if (ldp->ld_entry == NULL || |
| (t = ldp->ld_entry->le_thread) == NULL) |
| continue; |
| ASSERT(!(t->t_proc_flag & TP_LWPEXIT)); |
| ASSERT(nlwp > 0); |
| --nlwp; |
| upup = (prusage_t *)((caddr_t)upup + LSPAN(prusage_t)); |
| prgetusage(t, pup); |
| prcvtusage(pup, upup); |
| } |
| ASSERT(nlwp == 0); |
| |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(pup, size + sizeof (prhusage_t)); |
| return (error); |
| } |
| |
| static int |
| pr_read_pagedata(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_PAGEDATA); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| if ((p->p_flag & SSYS) || p->p_as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| mutex_exit(&p->p_lock); |
| error = prpdread(p, pnp->pr_hatid, uiop); |
| mutex_enter(&p->p_lock); |
| |
| prunlock(pnp); |
| return (error); |
| } |
| |
| static int |
| pr_read_opagedata(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| struct as *as; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_OPAGEDATA); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| as = p->p_as; |
| if ((p->p_flag & SSYS) || as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| mutex_exit(&p->p_lock); |
| error = oprpdread(as, pnp->pr_hatid, uiop); |
| mutex_enter(&p->p_lock); |
| |
| prunlock(pnp); |
| return (error); |
| } |
| |
| static int |
| pr_read_watch(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| int error; |
| prwatch_t *Bpwp; |
| size_t size; |
| prwatch_t *pwp; |
| int nwarea; |
| struct watched_area *pwarea; |
| |
| ASSERT(pnp->pr_type == PR_WATCH); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| nwarea = avl_numnodes(&p->p_warea); |
| size = nwarea * sizeof (prwatch_t); |
| if (uiop->uio_offset >= size) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| Bpwp = pwp = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_nwarea can't change while process is locked */ |
| ASSERT(nwarea == avl_numnodes(&p->p_warea)); |
| |
| /* gather the watched areas */ |
| for (pwarea = avl_first(&p->p_warea); pwarea != NULL; |
| pwarea = AVL_NEXT(&p->p_warea, pwarea), pwp++) { |
| pwp->pr_vaddr = (uintptr_t)pwarea->wa_vaddr; |
| pwp->pr_size = pwarea->wa_eaddr - pwarea->wa_vaddr; |
| pwp->pr_wflags = (int)pwarea->wa_flags; |
| } |
| |
| prunlock(pnp); |
| |
| error = pr_uioread(Bpwp, size, uiop); |
| kmem_free(Bpwp, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_lwpstatus(prnode_t *pnp, uio_t *uiop) |
| { |
| lwpstatus_t *sp; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_LWPSTATUS); |
| |
| /* |
| * We kmem_alloc() the lwpstatus structure because |
| * it is so big it might blow the kernel stack. |
| */ |
| sp = kmem_alloc(sizeof (*sp), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| |
| if (uiop->uio_offset >= sizeof (*sp)) { |
| prunlock(pnp); |
| goto out; |
| } |
| |
| prgetlwpstatus(pnp->pr_common->prc_thread, sp, VTOZONE(PTOV(pnp))); |
| prunlock(pnp); |
| |
| error = pr_uioread(sp, sizeof (*sp), uiop); |
| out: |
| kmem_free(sp, sizeof (*sp)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lwpsinfo(prnode_t *pnp, uio_t *uiop) |
| { |
| lwpsinfo_t lwpsinfo; |
| proc_t *p; |
| kthread_t *t; |
| lwpent_t *lep; |
| |
| ASSERT(pnp->pr_type == PR_LWPSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if (pnp->pr_common->prc_tslot == -1) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| |
| if (uiop->uio_offset >= sizeof (lwpsinfo)) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if ((t = pnp->pr_common->prc_thread) != NULL) |
| prgetlwpsinfo(t, &lwpsinfo); |
| else { |
| lep = p->p_lwpdir[pnp->pr_common->prc_tslot].ld_entry; |
| bzero(&lwpsinfo, sizeof (lwpsinfo)); |
| lwpsinfo.pr_lwpid = lep->le_lwpid; |
| lwpsinfo.pr_state = SZOMB; |
| lwpsinfo.pr_sname = 'Z'; |
| lwpsinfo.pr_start.tv_sec = lep->le_start; |
| lwpsinfo.pr_bindpro = PBIND_NONE; |
| lwpsinfo.pr_bindpset = PS_NONE; |
| } |
| prunlock(pnp); |
| |
| return (pr_uioread(&lwpsinfo, sizeof (lwpsinfo), uiop)); |
| } |
| |
| static int |
| pr_read_lwpusage(prnode_t *pnp, uio_t *uiop) |
| { |
| prhusage_t *pup; |
| prusage_t *upup; |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_LWPUSAGE); |
| |
| /* allocate now, before locking the process */ |
| pup = kmem_zalloc(sizeof (*pup), KM_SLEEP); |
| upup = kmem_alloc(sizeof (*upup), KM_SLEEP); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) { |
| error = ENOENT; |
| goto out; |
| } |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if (pnp->pr_common->prc_thread == NULL) { |
| prunlock(pnp); |
| error = ENOENT; |
| goto out; |
| } |
| if (uiop->uio_offset >= sizeof (prusage_t)) { |
| prunlock(pnp); |
| error = 0; |
| goto out; |
| } |
| |
| pup->pr_tstamp = gethrtime(); |
| prgetusage(pnp->pr_common->prc_thread, pup); |
| |
| prunlock(pnp); |
| |
| prcvtusage(pup, upup); |
| |
| error = pr_uioread(upup, sizeof (prusage_t), uiop); |
| out: |
| kmem_free(pup, sizeof (*pup)); |
| kmem_free(upup, sizeof (*upup)); |
| return (error); |
| } |
| |
| /* ARGSUSED */ |
| static int |
| pr_read_xregs(prnode_t *pnp, uio_t *uiop) |
| { |
| #if defined(__sparc) |
| proc_t *p; |
| kthread_t *t; |
| int error; |
| char *xreg; |
| size_t size; |
| |
| ASSERT(pnp->pr_type == PR_XREGS); |
| |
| xreg = kmem_zalloc(sizeof (prxregset_t), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| |
| p = pnp->pr_common->prc_proc; |
| t = pnp->pr_common->prc_thread; |
| |
| size = prhasx(p)? prgetprxregsize(p) : 0; |
| if (uiop->uio_offset >= size) { |
| prunlock(pnp); |
| goto out; |
| } |
| |
| /* drop p->p_lock while (possibly) touching the stack */ |
| mutex_exit(&p->p_lock); |
| prgetprxregs(ttolwp(t), xreg); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_uioread(xreg, size, uiop); |
| out: |
| kmem_free(xreg, sizeof (prxregset_t)); |
| return (error); |
| #else |
| return (0); |
| #endif |
| } |
| |
| static int |
| pr_read_spymaster(prnode_t *pnp, uio_t *uiop) |
| { |
| psinfo_t psinfo; |
| int error; |
| klwp_t *lwp; |
| |
| ASSERT(pnp->pr_type == PR_SPYMASTER); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| if (pnp->pr_common->prc_thread == NULL) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| lwp = pnp->pr_common->prc_thread->t_lwp; |
| |
| if (lwp->lwp_spymaster == NULL) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| bcopy(lwp->lwp_spymaster, &psinfo, sizeof (psinfo_t)); |
| prunlock(pnp); |
| |
| return (pr_uioread(&psinfo, sizeof (psinfo), uiop)); |
| } |
| |
| static int |
| pr_read_secflags(prnode_t *pnp, uio_t *uiop) |
| { |
| prsecflags_t ret; |
| int error; |
| proc_t *p; |
| |
| ASSERT(pnp->pr_type == PR_SECFLAGS); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| prgetsecflags(p, &ret); |
| prunlock(pnp); |
| |
| return (pr_uioread(&ret, sizeof (ret), uiop)); |
| } |
| |
| #if defined(__sparc) |
| |
| static int |
| pr_read_gwindows(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| gwindows_t *gwp; |
| int error; |
| size_t size; |
| |
| ASSERT(pnp->pr_type == PR_GWINDOWS); |
| |
| gwp = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| |
| p = pnp->pr_common->prc_proc; |
| t = pnp->pr_common->prc_thread; |
| |
| /* |
| * Drop p->p_lock while touching the stack. |
| * The P_PR_LOCK flag prevents the lwp from |
| * disappearing while we do this. |
| */ |
| mutex_exit(&p->p_lock); |
| if ((size = prnwindows(ttolwp(t))) != 0) |
| size = sizeof (gwindows_t) - |
| (SPARC_MAXREGWINDOW - size) * sizeof (struct rwindow); |
| if (uiop->uio_offset >= size) { |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| goto out; |
| } |
| prgetwindows(ttolwp(t), gwp); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_uioread(gwp, size, uiop); |
| out: |
| kmem_free(gwp, sizeof (gwindows_t)); |
| return (error); |
| } |
| |
| /* ARGSUSED */ |
| static int |
| pr_read_asrs(prnode_t *pnp, uio_t *uiop) |
| { |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_ASRS); |
| |
| /* the asrs file exists only for sparc v9 _LP64 processes */ |
| if ((error = prlock(pnp, ZNO)) == 0) { |
| proc_t *p = pnp->pr_common->prc_proc; |
| kthread_t *t = pnp->pr_common->prc_thread; |
| asrset_t asrset; |
| |
| if (p->p_model != DATAMODEL_LP64 || |
| uiop->uio_offset >= sizeof (asrset_t)) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* |
| * Drop p->p_lock while touching the stack. |
| * The P_PR_LOCK flag prevents the lwp from |
| * disappearing while we do this. |
| */ |
| mutex_exit(&p->p_lock); |
| prgetasregs(ttolwp(t), asrset); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_uioread(&asrset[0], sizeof (asrset_t), uiop); |
| } |
| |
| return (error); |
| } |
| |
| #endif /* __sparc */ |
| |
| static int |
| pr_read_piddir(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_PIDDIR); |
| ASSERT(pnp->pr_pidfile != NULL); |
| |
| /* use the underlying PR_PIDFILE to read the process */ |
| pnp = VTOP(pnp->pr_pidfile); |
| ASSERT(pnp->pr_type == PR_PIDFILE); |
| |
| return (pr_read_pidfile(pnp, uiop)); |
| } |
| |
| static int |
| pr_read_pidfile(prnode_t *pnp, uio_t *uiop) |
| { |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_PIDFILE || pnp->pr_type == PR_LWPIDFILE); |
| |
| if ((error = prlock(pnp, ZNO)) == 0) { |
| proc_t *p = pnp->pr_common->prc_proc; |
| struct as *as = p->p_as; |
| |
| if ((p->p_flag & SSYS) || as == &kas) { |
| /* |
| * /proc I/O cannot be done to a system process. |
| */ |
| error = EIO; /* old /proc semantics */ |
| } else { |
| /* |
| * We drop p_lock because we don't want to hold |
| * it over an I/O operation because that could |
| * lead to deadlock with the clock thread. |
| * The process will not disappear and its address |
| * space will not change because it is marked P_PR_LOCK. |
| */ |
| mutex_exit(&p->p_lock); |
| error = prusrio(p, UIO_READ, uiop, 1); |
| mutex_enter(&p->p_lock); |
| } |
| prunlock(pnp); |
| } |
| |
| return (error); |
| } |
| |
| #ifdef _SYSCALL32_IMPL |
| |
| /* |
| * Array of ILP32 read functions, indexed by /proc file type. |
| */ |
| static int pr_read_status_32(), |
| pr_read_lstatus_32(), pr_read_psinfo_32(), pr_read_lpsinfo_32(), |
| pr_read_map_32(), pr_read_rmap_32(), pr_read_xmap_32(), |
| pr_read_sigact_32(), pr_read_auxv_32(), |
| pr_read_usage_32(), pr_read_lusage_32(), pr_read_pagedata_32(), |
| pr_read_watch_32(), pr_read_lwpstatus_32(), pr_read_lwpsinfo_32(), |
| pr_read_lwpusage_32(), pr_read_spymaster_32(), |
| #if defined(__sparc) |
| pr_read_gwindows_32(), |
| #endif |
| pr_read_opagedata_32(); |
| |
| static int (*pr_read_function_32[PR_NFILES])() = { |
| pr_read_inval, /* /proc */ |
| pr_read_inval, /* /proc/self */ |
| pr_read_piddir, /* /proc/<pid> (old /proc read()) */ |
| pr_read_as, /* /proc/<pid>/as */ |
| pr_read_inval, /* /proc/<pid>/ctl */ |
| pr_read_status_32, /* /proc/<pid>/status */ |
| pr_read_lstatus_32, /* /proc/<pid>/lstatus */ |
| pr_read_psinfo_32, /* /proc/<pid>/psinfo */ |
| pr_read_lpsinfo_32, /* /proc/<pid>/lpsinfo */ |
| pr_read_map_32, /* /proc/<pid>/map */ |
| pr_read_rmap_32, /* /proc/<pid>/rmap */ |
| pr_read_xmap_32, /* /proc/<pid>/xmap */ |
| pr_read_cred, /* /proc/<pid>/cred */ |
| pr_read_sigact_32, /* /proc/<pid>/sigact */ |
| pr_read_auxv_32, /* /proc/<pid>/auxv */ |
| #if defined(__x86) |
| pr_read_ldt, /* /proc/<pid>/ldt */ |
| #endif |
| pr_read_usage_32, /* /proc/<pid>/usage */ |
| pr_read_lusage_32, /* /proc/<pid>/lusage */ |
| pr_read_pagedata_32, /* /proc/<pid>/pagedata */ |
| pr_read_watch_32, /* /proc/<pid>/watch */ |
| pr_read_inval, /* /proc/<pid>/cwd */ |
| pr_read_inval, /* /proc/<pid>/root */ |
| pr_read_inval, /* /proc/<pid>/fd */ |
| pr_read_inval, /* /proc/<pid>/fd/nn */ |
| pr_read_inval, /* /proc/<pid>/object */ |
| pr_read_inval, /* /proc/<pid>/object/xxx */ |
| pr_read_inval, /* /proc/<pid>/lwp */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid> */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/lwpctl */ |
| pr_read_lwpstatus_32, /* /proc/<pid>/lwp/<lwpid>/lwpstatus */ |
| pr_read_lwpsinfo_32, /* /proc/<pid>/lwp/<lwpid>/lwpsinfo */ |
| pr_read_lwpusage_32, /* /proc/<pid>/lwp/<lwpid>/lwpusage */ |
| pr_read_xregs, /* /proc/<pid>/lwp/<lwpid>/xregs */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/templates */ |
| pr_read_inval, /* /proc/<pid>/lwp/<lwpid>/templates/<id> */ |
| pr_read_spymaster_32, /* /proc/<pid>/lwp/<lwpid>/spymaster */ |
| #if defined(__sparc) |
| pr_read_gwindows_32, /* /proc/<pid>/lwp/<lwpid>/gwindows */ |
| pr_read_asrs, /* /proc/<pid>/lwp/<lwpid>/asrs */ |
| #endif |
| pr_read_priv, /* /proc/<pid>/priv */ |
| pr_read_inval, /* /proc/<pid>/path */ |
| pr_read_inval, /* /proc/<pid>/path/xxx */ |
| pr_read_inval, /* /proc/<pid>/contracts */ |
| pr_read_inval, /* /proc/<pid>/contracts/<ctid> */ |
| pr_read_secflags, /* /proc/<pid>/secflags */ |
| pr_read_pidfile, /* old process file */ |
| pr_read_pidfile, /* old lwp file */ |
| pr_read_opagedata_32, /* old pagedata file */ |
| }; |
| |
| static int |
| pr_read_status_32(prnode_t *pnp, uio_t *uiop) |
| { |
| pstatus32_t *sp; |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_STATUS); |
| |
| /* |
| * We kmem_alloc() the pstatus structure because |
| * it is so big it might blow the kernel stack. |
| */ |
| sp = kmem_alloc(sizeof (*sp), KM_SLEEP); |
| if ((error = prlock(pnp, ZNO)) == 0) { |
| /* |
| * A 32-bit process cannot get the status of a 64-bit process. |
| * The fields for the 64-bit quantities are not large enough. |
| */ |
| p = pnp->pr_common->prc_proc; |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| error = EOVERFLOW; |
| } else { |
| prgetstatus32(pnp->pr_common->prc_proc, sp, |
| VTOZONE(PTOV(pnp))); |
| prunlock(pnp); |
| error = pr_uioread(sp, sizeof (*sp), uiop); |
| } |
| } |
| kmem_free((caddr_t)sp, sizeof (*sp)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lstatus_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| size_t size; |
| prheader32_t *php; |
| lwpstatus32_t *sp; |
| int error; |
| int nlwp; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LSTATUS); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| p = pnp->pr_common->prc_proc; |
| /* |
| * A 32-bit process cannot get the status of a 64-bit process. |
| * The fields for the 64-bit quantities are not large enough. |
| */ |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| nlwp = p->p_lwpcnt; |
| size = sizeof (prheader32_t) + nlwp * LSPAN32(lwpstatus32_t); |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| php = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt); |
| |
| php->pr_nent = nlwp; |
| php->pr_entsize = LSPAN32(lwpstatus32_t); |
| |
| sp = (lwpstatus32_t *)(php + 1); |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if (ldp->ld_entry == NULL || |
| (t = ldp->ld_entry->le_thread) == NULL) |
| continue; |
| prgetlwpstatus32(t, sp, VTOZONE(PTOV(pnp))); |
| sp = (lwpstatus32_t *)((caddr_t)sp + LSPAN32(lwpstatus32_t)); |
| } |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(php, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_psinfo_32(prnode_t *pnp, uio_t *uiop) |
| { |
| psinfo32_t psinfo; |
| proc_t *p; |
| int error = 0; |
| |
| ASSERT(pnp->pr_type == PR_PSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| error = ENOENT; |
| else { |
| ASSERT(p == pnp->pr_common->prc_proc); |
| prgetpsinfo32(p, &psinfo); |
| prunlock(pnp); |
| error = pr_uioread(&psinfo, sizeof (psinfo), uiop); |
| } |
| return (error); |
| } |
| |
| static int |
| pr_read_lpsinfo_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| lwpent_t *lep; |
| size_t size; |
| prheader32_t *php; |
| lwpsinfo32_t *sp; |
| int error; |
| int nlwp; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LPSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if ((nlwp = p->p_lwpcnt + p->p_zombcnt) == 0) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| size = sizeof (prheader32_t) + nlwp * LSPAN32(lwpsinfo32_t); |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| php = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt + p->p_zombcnt); |
| |
| php->pr_nent = nlwp; |
| php->pr_entsize = LSPAN32(lwpsinfo32_t); |
| |
| sp = (lwpsinfo32_t *)(php + 1); |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if ((lep = ldp->ld_entry) == NULL) |
| continue; |
| if ((t = lep->le_thread) != NULL) |
| prgetlwpsinfo32(t, sp); |
| else { |
| bzero(sp, sizeof (*sp)); |
| sp->pr_lwpid = lep->le_lwpid; |
| sp->pr_state = SZOMB; |
| sp->pr_sname = 'Z'; |
| sp->pr_start.tv_sec = (time32_t)lep->le_start; |
| } |
| sp = (lwpsinfo32_t *)((caddr_t)sp + LSPAN32(lwpsinfo32_t)); |
| } |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(php, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_map_common_32(prnode_t *pnp, uio_t *uiop, prnodetype_t type) |
| { |
| proc_t *p; |
| struct as *as; |
| list_t iolhead; |
| int error; |
| |
| readmap32_common: |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| as = p->p_as; |
| |
| if ((p->p_flag & SSYS) || as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| |
| if (!AS_LOCK_TRYENTER(as, RW_WRITER)) { |
| prunlock(pnp); |
| delay(1); |
| goto readmap32_common; |
| } |
| mutex_exit(&p->p_lock); |
| |
| switch (type) { |
| case PR_XMAP: |
| error = prgetxmap32(p, &iolhead); |
| break; |
| case PR_RMAP: |
| error = prgetmap32(p, 1, &iolhead); |
| break; |
| case PR_MAP: |
| error = prgetmap32(p, 0, &iolhead); |
| break; |
| } |
| AS_LOCK_EXIT(as); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_iol_uiomove_and_free(&iolhead, uiop, error); |
| |
| return (error); |
| } |
| |
| static int |
| pr_read_map_32(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_MAP); |
| return (pr_read_map_common_32(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_rmap_32(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_RMAP); |
| return (pr_read_map_common_32(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_xmap_32(prnode_t *pnp, uio_t *uiop) |
| { |
| ASSERT(pnp->pr_type == PR_XMAP); |
| return (pr_read_map_common_32(pnp, uiop, pnp->pr_type)); |
| } |
| |
| static int |
| pr_read_sigact_32(prnode_t *pnp, uio_t *uiop) |
| { |
| int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG; |
| proc_t *p; |
| struct sigaction32 *sap; |
| int sig; |
| int error; |
| user_t *up; |
| |
| ASSERT(pnp->pr_type == PR_SIGACT); |
| |
| /* |
| * We kmem_alloc() the sigaction32 array because |
| * it is so big it might blow the kernel stack. |
| */ |
| sap = kmem_alloc((nsig-1) * sizeof (struct sigaction32), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| p = pnp->pr_common->prc_proc; |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| error = EOVERFLOW; |
| goto out; |
| } |
| |
| if (uiop->uio_offset >= (nsig-1) * sizeof (struct sigaction32)) { |
| prunlock(pnp); |
| goto out; |
| } |
| |
| up = PTOU(p); |
| for (sig = 1; sig < nsig; sig++) |
| prgetaction32(p, up, sig, &sap[sig-1]); |
| prunlock(pnp); |
| |
| error = pr_uioread(sap, (nsig - 1) * sizeof (struct sigaction32), uiop); |
| out: |
| kmem_free(sap, (nsig-1) * sizeof (struct sigaction32)); |
| return (error); |
| } |
| |
| static int |
| pr_read_auxv_32(prnode_t *pnp, uio_t *uiop) |
| { |
| auxv32_t auxv[__KERN_NAUXV_IMPL]; |
| proc_t *p; |
| user_t *up; |
| int error; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_AUXV); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| p = pnp->pr_common->prc_proc; |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| |
| if (uiop->uio_offset >= sizeof (auxv)) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| up = PTOU(p); |
| for (i = 0; i < __KERN_NAUXV_IMPL; i++) { |
| auxv[i].a_type = (int32_t)up->u_auxv[i].a_type; |
| auxv[i].a_un.a_val = (int32_t)up->u_auxv[i].a_un.a_val; |
| } |
| prunlock(pnp); |
| |
| return (pr_uioread(auxv, sizeof (auxv), uiop)); |
| } |
| |
| static int |
| pr_read_usage_32(prnode_t *pnp, uio_t *uiop) |
| { |
| prhusage_t *pup; |
| prusage32_t *upup; |
| proc_t *p; |
| kthread_t *t; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_USAGE); |
| |
| /* allocate now, before locking the process */ |
| pup = kmem_zalloc(sizeof (*pup), KM_SLEEP); |
| upup = kmem_alloc(sizeof (*upup), KM_SLEEP); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) { |
| error = ENOENT; |
| goto out; |
| } |
| ASSERT(p == pnp->pr_common->prc_proc); |
| |
| if (uiop->uio_offset >= sizeof (prusage32_t)) { |
| prunlock(pnp); |
| error = 0; |
| goto out; |
| } |
| |
| pup->pr_tstamp = gethrtime(); |
| |
| pup->pr_count = p->p_defunct; |
| pup->pr_create = p->p_mstart; |
| pup->pr_term = p->p_mterm; |
| |
| pup->pr_rtime = p->p_mlreal; |
| pup->pr_utime = p->p_acct[LMS_USER]; |
| pup->pr_stime = p->p_acct[LMS_SYSTEM]; |
| pup->pr_ttime = p->p_acct[LMS_TRAP]; |
| pup->pr_tftime = p->p_acct[LMS_TFAULT]; |
| pup->pr_dftime = p->p_acct[LMS_DFAULT]; |
| pup->pr_kftime = p->p_acct[LMS_KFAULT]; |
| pup->pr_ltime = p->p_acct[LMS_USER_LOCK]; |
| pup->pr_slptime = p->p_acct[LMS_SLEEP]; |
| pup->pr_wtime = p->p_acct[LMS_WAIT_CPU]; |
| pup->pr_stoptime = p->p_acct[LMS_STOPPED]; |
| |
| pup->pr_minf = p->p_ru.minflt; |
| pup->pr_majf = p->p_ru.majflt; |
| pup->pr_nswap = p->p_ru.nswap; |
| pup->pr_inblk = p->p_ru.inblock; |
| pup->pr_oublk = p->p_ru.oublock; |
| pup->pr_msnd = p->p_ru.msgsnd; |
| pup->pr_mrcv = p->p_ru.msgrcv; |
| pup->pr_sigs = p->p_ru.nsignals; |
| pup->pr_vctx = p->p_ru.nvcsw; |
| pup->pr_ictx = p->p_ru.nivcsw; |
| pup->pr_sysc = p->p_ru.sysc; |
| pup->pr_ioch = p->p_ru.ioch; |
| |
| /* |
| * Add the usage information for each active lwp. |
| */ |
| if ((t = p->p_tlist) != NULL && |
| !(pnp->pr_pcommon->prc_flags & PRC_DESTROY)) { |
| do { |
| if (t->t_proc_flag & TP_LWPEXIT) |
| continue; |
| pup->pr_count++; |
| praddusage(t, pup); |
| } while ((t = t->t_forw) != p->p_tlist); |
| } |
| |
| prunlock(pnp); |
| |
| prcvtusage32(pup, upup); |
| |
| error = pr_uioread(upup, sizeof (prusage32_t), uiop); |
| out: |
| kmem_free(pup, sizeof (*pup)); |
| kmem_free(upup, sizeof (*upup)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lusage_32(prnode_t *pnp, uio_t *uiop) |
| { |
| int nlwp; |
| prhusage_t *pup; |
| prheader32_t *php; |
| prusage32_t *upup; |
| size_t size; |
| hrtime_t curtime; |
| proc_t *p; |
| kthread_t *t; |
| lwpdir_t *ldp; |
| int error; |
| int i; |
| |
| ASSERT(pnp->pr_type == PR_LUSAGE); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if ((nlwp = p->p_lwpcnt) == 0) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| |
| size = sizeof (prheader32_t) + (nlwp + 1) * LSPAN32(prusage32_t); |
| if (uiop->uio_offset >= size) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| pup = kmem_zalloc(size + sizeof (prhusage_t), KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_lwpcnt can't change while process is locked */ |
| ASSERT(nlwp == p->p_lwpcnt); |
| |
| php = (prheader32_t *)(pup + 1); |
| upup = (prusage32_t *)(php + 1); |
| |
| php->pr_nent = nlwp + 1; |
| php->pr_entsize = LSPAN32(prusage32_t); |
| |
| curtime = gethrtime(); |
| |
| /* |
| * First the summation over defunct lwps. |
| */ |
| pup->pr_count = p->p_defunct; |
| pup->pr_tstamp = curtime; |
| pup->pr_create = p->p_mstart; |
| pup->pr_term = p->p_mterm; |
| |
| pup->pr_rtime = p->p_mlreal; |
| pup->pr_utime = p->p_acct[LMS_USER]; |
| pup->pr_stime = p->p_acct[LMS_SYSTEM]; |
| pup->pr_ttime = p->p_acct[LMS_TRAP]; |
| pup->pr_tftime = p->p_acct[LMS_TFAULT]; |
| pup->pr_dftime = p->p_acct[LMS_DFAULT]; |
| pup->pr_kftime = p->p_acct[LMS_KFAULT]; |
| pup->pr_ltime = p->p_acct[LMS_USER_LOCK]; |
| pup->pr_slptime = p->p_acct[LMS_SLEEP]; |
| pup->pr_wtime = p->p_acct[LMS_WAIT_CPU]; |
| pup->pr_stoptime = p->p_acct[LMS_STOPPED]; |
| |
| pup->pr_minf = p->p_ru.minflt; |
| pup->pr_majf = p->p_ru.majflt; |
| pup->pr_nswap = p->p_ru.nswap; |
| pup->pr_inblk = p->p_ru.inblock; |
| pup->pr_oublk = p->p_ru.oublock; |
| pup->pr_msnd = p->p_ru.msgsnd; |
| pup->pr_mrcv = p->p_ru.msgrcv; |
| pup->pr_sigs = p->p_ru.nsignals; |
| pup->pr_vctx = p->p_ru.nvcsw; |
| pup->pr_ictx = p->p_ru.nivcsw; |
| pup->pr_sysc = p->p_ru.sysc; |
| pup->pr_ioch = p->p_ru.ioch; |
| |
| prcvtusage32(pup, upup); |
| |
| /* |
| * Fill one prusage struct for each active lwp. |
| */ |
| for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { |
| if (ldp->ld_entry == NULL || |
| (t = ldp->ld_entry->le_thread) == NULL) |
| continue; |
| ASSERT(!(t->t_proc_flag & TP_LWPEXIT)); |
| ASSERT(nlwp > 0); |
| --nlwp; |
| upup = (prusage32_t *) |
| ((caddr_t)upup + LSPAN32(prusage32_t)); |
| prgetusage(t, pup); |
| prcvtusage32(pup, upup); |
| } |
| ASSERT(nlwp == 0); |
| |
| prunlock(pnp); |
| |
| error = pr_uioread(php, size, uiop); |
| kmem_free(pup, size + sizeof (prhusage_t)); |
| return (error); |
| } |
| |
| static int |
| pr_read_pagedata_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_PAGEDATA); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| if ((p->p_flag & SSYS) || p->p_as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| |
| mutex_exit(&p->p_lock); |
| error = prpdread32(p, pnp->pr_hatid, uiop); |
| mutex_enter(&p->p_lock); |
| |
| prunlock(pnp); |
| return (error); |
| } |
| |
| static int |
| pr_read_opagedata_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| struct as *as; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_OPAGEDATA); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| as = p->p_as; |
| |
| if ((p->p_flag & SSYS) || as == &kas) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| |
| mutex_exit(&p->p_lock); |
| error = oprpdread32(as, pnp->pr_hatid, uiop); |
| mutex_enter(&p->p_lock); |
| |
| prunlock(pnp); |
| return (error); |
| } |
| |
| static int |
| pr_read_watch_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| int error; |
| prwatch32_t *Bpwp; |
| size_t size; |
| prwatch32_t *pwp; |
| int nwarea; |
| struct watched_area *pwarea; |
| |
| ASSERT(pnp->pr_type == PR_WATCH); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| p = pnp->pr_common->prc_proc; |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| return (EOVERFLOW); |
| } |
| nwarea = avl_numnodes(&p->p_warea); |
| size = nwarea * sizeof (prwatch32_t); |
| if (uiop->uio_offset >= size) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| /* drop p->p_lock to do kmem_alloc(KM_SLEEP) */ |
| mutex_exit(&p->p_lock); |
| Bpwp = pwp = kmem_zalloc(size, KM_SLEEP); |
| mutex_enter(&p->p_lock); |
| /* p->p_nwarea can't change while process is locked */ |
| ASSERT(nwarea == avl_numnodes(&p->p_warea)); |
| |
| /* gather the watched areas */ |
| for (pwarea = avl_first(&p->p_warea); pwarea != NULL; |
| pwarea = AVL_NEXT(&p->p_warea, pwarea), pwp++) { |
| pwp->pr_vaddr = (caddr32_t)(uintptr_t)pwarea->wa_vaddr; |
| pwp->pr_size = (size32_t)(pwarea->wa_eaddr - pwarea->wa_vaddr); |
| pwp->pr_wflags = (int)pwarea->wa_flags; |
| } |
| |
| prunlock(pnp); |
| |
| error = pr_uioread(Bpwp, size, uiop); |
| kmem_free(Bpwp, size); |
| return (error); |
| } |
| |
| static int |
| pr_read_lwpstatus_32(prnode_t *pnp, uio_t *uiop) |
| { |
| lwpstatus32_t *sp; |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_LWPSTATUS); |
| |
| /* |
| * We kmem_alloc() the lwpstatus structure because |
| * it is so big it might blow the kernel stack. |
| */ |
| sp = kmem_alloc(sizeof (*sp), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| |
| /* |
| * A 32-bit process cannot get the status of a 64-bit process. |
| * The fields for the 64-bit quantities are not large enough. |
| */ |
| p = pnp->pr_common->prc_proc; |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| error = EOVERFLOW; |
| goto out; |
| } |
| |
| if (uiop->uio_offset >= sizeof (*sp)) { |
| prunlock(pnp); |
| goto out; |
| } |
| |
| prgetlwpstatus32(pnp->pr_common->prc_thread, sp, VTOZONE(PTOV(pnp))); |
| prunlock(pnp); |
| |
| error = pr_uioread(sp, sizeof (*sp), uiop); |
| out: |
| kmem_free(sp, sizeof (*sp)); |
| return (error); |
| } |
| |
| static int |
| pr_read_lwpsinfo_32(prnode_t *pnp, uio_t *uiop) |
| { |
| lwpsinfo32_t lwpsinfo; |
| proc_t *p; |
| kthread_t *t; |
| lwpent_t *lep; |
| |
| ASSERT(pnp->pr_type == PR_LWPSINFO); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) |
| return (ENOENT); |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if (pnp->pr_common->prc_tslot == -1) { |
| prunlock(pnp); |
| return (ENOENT); |
| } |
| |
| if (uiop->uio_offset >= sizeof (lwpsinfo)) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| if ((t = pnp->pr_common->prc_thread) != NULL) |
| prgetlwpsinfo32(t, &lwpsinfo); |
| else { |
| lep = p->p_lwpdir[pnp->pr_common->prc_tslot].ld_entry; |
| bzero(&lwpsinfo, sizeof (lwpsinfo)); |
| lwpsinfo.pr_lwpid = lep->le_lwpid; |
| lwpsinfo.pr_state = SZOMB; |
| lwpsinfo.pr_sname = 'Z'; |
| lwpsinfo.pr_start.tv_sec = (time32_t)lep->le_start; |
| } |
| prunlock(pnp); |
| |
| return (pr_uioread(&lwpsinfo, sizeof (lwpsinfo), uiop)); |
| } |
| |
| static int |
| pr_read_lwpusage_32(prnode_t *pnp, uio_t *uiop) |
| { |
| prhusage_t *pup; |
| prusage32_t *upup; |
| proc_t *p; |
| int error; |
| |
| ASSERT(pnp->pr_type == PR_LWPUSAGE); |
| |
| /* allocate now, before locking the process */ |
| pup = kmem_zalloc(sizeof (*pup), KM_SLEEP); |
| upup = kmem_alloc(sizeof (*upup), KM_SLEEP); |
| |
| /* |
| * We don't want the full treatment of prlock(pnp) here. |
| * This file is world-readable and never goes invalid. |
| * It doesn't matter if we are in the middle of an exec(). |
| */ |
| p = pr_p_lock(pnp); |
| mutex_exit(&pr_pidlock); |
| if (p == NULL) { |
| error = ENOENT; |
| goto out; |
| } |
| ASSERT(p == pnp->pr_common->prc_proc); |
| if (pnp->pr_common->prc_thread == NULL) { |
| prunlock(pnp); |
| error = ENOENT; |
| goto out; |
| } |
| if (uiop->uio_offset >= sizeof (prusage32_t)) { |
| prunlock(pnp); |
| error = 0; |
| goto out; |
| } |
| |
| pup->pr_tstamp = gethrtime(); |
| prgetusage(pnp->pr_common->prc_thread, pup); |
| |
| prunlock(pnp); |
| |
| prcvtusage32(pup, upup); |
| |
| error = pr_uioread(upup, sizeof (prusage32_t), uiop); |
| out: |
| kmem_free(pup, sizeof (*pup)); |
| kmem_free(upup, sizeof (*upup)); |
| return (error); |
| } |
| |
| static int |
| pr_read_spymaster_32(prnode_t *pnp, uio_t *uiop) |
| { |
| psinfo32_t psinfo; |
| int error; |
| klwp_t *lwp; |
| |
| ASSERT(pnp->pr_type == PR_SPYMASTER); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| return (error); |
| |
| if (pnp->pr_common->prc_thread == NULL) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| lwp = pnp->pr_common->prc_thread->t_lwp; |
| |
| if (lwp->lwp_spymaster == NULL) { |
| prunlock(pnp); |
| return (0); |
| } |
| |
| psinfo_kto32(lwp->lwp_spymaster, &psinfo); |
| prunlock(pnp); |
| |
| return (pr_uioread(&psinfo, sizeof (psinfo), uiop)); |
| } |
| |
| #if defined(__sparc) |
| static int |
| pr_read_gwindows_32(prnode_t *pnp, uio_t *uiop) |
| { |
| proc_t *p; |
| kthread_t *t; |
| gwindows32_t *gwp; |
| int error; |
| size_t size; |
| |
| ASSERT(pnp->pr_type == PR_GWINDOWS); |
| |
| gwp = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP); |
| |
| if ((error = prlock(pnp, ZNO)) != 0) |
| goto out; |
| |
| p = pnp->pr_common->prc_proc; |
| t = pnp->pr_common->prc_thread; |
| |
| if (PROCESS_NOT_32BIT(p)) { |
| prunlock(pnp); |
| error = EOVERFLOW; |
| goto out; |
| } |
| |
| /* |
| * Drop p->p_lock while touching the stack. |
| * The P_PR_LOCK flag prevents the lwp from |
| * disappearing while we do this. |
| */ |
| mutex_exit(&p->p_lock); |
| if ((size = prnwindows(ttolwp(t))) != 0) |
| size = sizeof (gwindows32_t) - |
| (SPARC_MAXREGWINDOW - size) * sizeof (struct rwindow32); |
| if (uiop->uio_offset >= size) { |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| goto out; |
| } |
| prgetwindows32(ttolwp(t), gwp); |
| mutex_enter(&p->p_lock); |
| prunlock(pnp); |
| |
| error = pr_uioread(gwp, size, uiop); |
| out: |
| kmem_free(gwp, sizeof (gwindows32_t)); |
| return (error); |
| } |
| #endif /* __sparc */ |
| |
| #endif /* _SYSCALL32_IMPL */ |
| |
| /* ARGSUSED */ |
| static int |
| prread(vnode_t *vp, uio_t *uiop, int ioflag, cred_t *cr, caller_context_t *ct) |
| { |
| prnode_t *pnp = VTOP(vp); |
| |
| ASSERT(pnp->pr_type < PR_NFILES); |
| |
| #ifdef _SYSCALL32_IMPL |
| /* |
| * What is read from the /proc files depends on the data |
| * model of the caller. An LP64 process will see LP64 |
| * data. An ILP32 process will see ILP32 data. |
| */ |
| if (curproc->p_model == DATAMODEL_LP64) |
| return (pr_read_function[pnp->pr_type](pnp, uiop)); |
| else |
| return (pr_read_function_32[pnp->pr_type](pnp, uiop)); |
| #else |
| return (pr_read_function[pnp->pr_type](pnp, uiop)); |
| #endif |
| } |
|