| /* |
| * CDDL HEADER START |
| * |
| * The contents of this file are subject to the terms of the |
| * Common Development and Distribution License (the "License"). |
| * You may not use this file except in compliance with the License. |
| * |
| * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| * or http://www.opensolaris.org/os/licensing. |
| * See the License for the specific language governing permissions |
| * and limitations under the License. |
| * |
| * When distributing Covered Code, include this CDDL HEADER in each |
| * file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| * If applicable, add the following below this CDDL HEADER, with the |
| * fields enclosed by brackets "[]" replaced with your own identifying |
| * information: Portions Copyright [yyyy] [name of copyright owner] |
| * |
| * CDDL HEADER END |
| */ |
| |
| /* |
| * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
| * Copyright (c) 2012, 2017 by Delphix. All rights reserved. |
| * Copyright (c) 2014 Integros [integros.com] |
| * Copyright 2015 Joyent, Inc. |
| * Copyright 2017 Nexenta Systems, Inc. |
| */ |
| |
| /* Portions Copyright 2007 Jeremy Teo */ |
| /* Portions Copyright 2010 Robert Milkowski */ |
| |
| #include <sys/types.h> |
| #include <sys/param.h> |
| #include <sys/time.h> |
| #include <sys/systm.h> |
| #include <sys/sysmacros.h> |
| #include <sys/resource.h> |
| #include <sys/vfs.h> |
| #include <sys/vfs_opreg.h> |
| #include <sys/vnode.h> |
| #include <sys/file.h> |
| #include <sys/stat.h> |
| #include <sys/kmem.h> |
| #include <sys/taskq.h> |
| #include <sys/uio.h> |
| #include <sys/vmsystm.h> |
| #include <sys/atomic.h> |
| #include <sys/vm.h> |
| #include <vm/seg_vn.h> |
| #include <vm/pvn.h> |
| #include <vm/as.h> |
| #include <vm/kpm.h> |
| #include <vm/seg_kpm.h> |
| #include <sys/mman.h> |
| #include <sys/pathname.h> |
| #include <sys/cmn_err.h> |
| #include <sys/errno.h> |
| #include <sys/unistd.h> |
| #include <sys/zfs_dir.h> |
| #include <sys/zfs_acl.h> |
| #include <sys/zfs_ioctl.h> |
| #include <sys/fs/zfs.h> |
| #include <sys/dmu.h> |
| #include <sys/dmu_objset.h> |
| #include <sys/spa.h> |
| #include <sys/txg.h> |
| #include <sys/dbuf.h> |
| #include <sys/zap.h> |
| #include <sys/sa.h> |
| #include <sys/dirent.h> |
| #include <sys/policy.h> |
| #include <sys/sunddi.h> |
| #include <sys/filio.h> |
| #include <sys/sid.h> |
| #include "fs/fs_subr.h" |
| #include <sys/zfs_ctldir.h> |
| #include <sys/zfs_fuid.h> |
| #include <sys/zfs_sa.h> |
| #include <sys/dnlc.h> |
| #include <sys/zfs_rlock.h> |
| #include <sys/extdirent.h> |
| #include <sys/kidmap.h> |
| #include <sys/cred.h> |
| #include <sys/attr.h> |
| #include <sys/zil.h> |
| |
| /* |
| * Programming rules. |
| * |
| * Each vnode op performs some logical unit of work. To do this, the ZPL must |
| * properly lock its in-core state, create a DMU transaction, do the work, |
| * record this work in the intent log (ZIL), commit the DMU transaction, |
| * and wait for the intent log to commit if it is a synchronous operation. |
| * Moreover, the vnode ops must work in both normal and log replay context. |
| * The ordering of events is important to avoid deadlocks and references |
| * to freed memory. The example below illustrates the following Big Rules: |
| * |
| * (1) A check must be made in each zfs thread for a mounted file system. |
| * This is done avoiding races using ZFS_ENTER(zfsvfs). |
| * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes |
| * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros |
| * can return EIO from the calling function. |
| * |
| * (2) VN_RELE() should always be the last thing except for zil_commit() |
| * (if necessary) and ZFS_EXIT(). This is for 3 reasons: |
| * First, if it's the last reference, the vnode/znode |
| * can be freed, so the zp may point to freed memory. Second, the last |
| * reference will call zfs_zinactive(), which may induce a lot of work -- |
| * pushing cached pages (which acquires range locks) and syncing out |
| * cached atime changes. Third, zfs_zinactive() may require a new tx, |
| * which could deadlock the system if you were already holding one. |
| * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC(). |
| * |
| * (3) All range locks must be grabbed before calling dmu_tx_assign(), |
| * as they can span dmu_tx_assign() calls. |
| * |
| * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to |
| * dmu_tx_assign(). This is critical because we don't want to block |
| * while holding locks. |
| * |
| * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This |
| * reduces lock contention and CPU usage when we must wait (note that if |
| * throughput is constrained by the storage, nearly every transaction |
| * must wait). |
| * |
| * Note, in particular, that if a lock is sometimes acquired before |
| * the tx assigns, and sometimes after (e.g. z_lock), then failing |
| * to use a non-blocking assign can deadlock the system. The scenario: |
| * |
| * Thread A has grabbed a lock before calling dmu_tx_assign(). |
| * Thread B is in an already-assigned tx, and blocks for this lock. |
| * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() |
| * forever, because the previous txg can't quiesce until B's tx commits. |
| * |
| * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, |
| * then drop all locks, call dmu_tx_wait(), and try again. On subsequent |
| * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT, |
| * to indicate that this operation has already called dmu_tx_wait(). |
| * This will ensure that we don't retry forever, waiting a short bit |
| * each time. |
| * |
| * (5) If the operation succeeded, generate the intent log entry for it |
| * before dropping locks. This ensures that the ordering of events |
| * in the intent log matches the order in which they actually occurred. |
| * During ZIL replay the zfs_log_* functions will update the sequence |
| * number to indicate the zil transaction has replayed. |
| * |
| * (6) At the end of each vnode op, the DMU tx must always commit, |
| * regardless of whether there were any errors. |
| * |
| * (7) After dropping all locks, invoke zil_commit(zilog, foid) |
| * to ensure that synchronous semantics are provided when necessary. |
| * |
| * In general, this is how things should be ordered in each vnode op: |
| * |
| * ZFS_ENTER(zfsvfs); // exit if unmounted |
| * top: |
| * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) |
| * rw_enter(...); // grab any other locks you need |
| * tx = dmu_tx_create(...); // get DMU tx |
| * dmu_tx_hold_*(); // hold each object you might modify |
| * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); |
| * if (error) { |
| * rw_exit(...); // drop locks |
| * zfs_dirent_unlock(dl); // unlock directory entry |
| * VN_RELE(...); // release held vnodes |
| * if (error == ERESTART) { |
| * waited = B_TRUE; |
| * dmu_tx_wait(tx); |
| * dmu_tx_abort(tx); |
| * goto top; |
| * } |
| * dmu_tx_abort(tx); // abort DMU tx |
| * ZFS_EXIT(zfsvfs); // finished in zfs |
| * return (error); // really out of space |
| * } |
| * error = do_real_work(); // do whatever this VOP does |
| * if (error == 0) |
| * zfs_log_*(...); // on success, make ZIL entry |
| * dmu_tx_commit(tx); // commit DMU tx -- error or not |
| * rw_exit(...); // drop locks |
| * zfs_dirent_unlock(dl); // unlock directory entry |
| * VN_RELE(...); // release held vnodes |
| * zil_commit(zilog, foid); // synchronous when necessary |
| * ZFS_EXIT(zfsvfs); // finished in zfs |
| * return (error); // done, report error |
| */ |
| |
| /* ARGSUSED */ |
| static int |
| zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(*vpp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) && |
| ((flag & FAPPEND) == 0)) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EPERM)); |
| } |
| |
| if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && |
| ZTOV(zp)->v_type == VREG && |
| !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { |
| if (fs_vscan(*vpp, cr, 0) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EACCES)); |
| } |
| } |
| |
| /* Keep a count of the synchronous opens in the znode */ |
| if (flag & (FSYNC | FDSYNC)) |
| atomic_inc_32(&zp->z_sync_cnt); |
| |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* ARGSUSED */ |
| static int |
| zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, |
| caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| |
| /* |
| * Clean up any locks held by this process on the vp. |
| */ |
| cleanlocks(vp, ddi_get_pid(), 0); |
| cleanshares(vp, ddi_get_pid()); |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| /* Decrement the synchronous opens in the znode */ |
| if ((flag & (FSYNC | FDSYNC)) && (count == 1)) |
| atomic_dec_32(&zp->z_sync_cnt); |
| |
| if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && |
| ZTOV(zp)->v_type == VREG && |
| !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) |
| VERIFY(fs_vscan(vp, cr, 1) == 0); |
| |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* |
| * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and |
| * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. |
| */ |
| static int |
| zfs_holey(vnode_t *vp, int cmd, offset_t *off) |
| { |
| znode_t *zp = VTOZ(vp); |
| uint64_t noff = (uint64_t)*off; /* new offset */ |
| uint64_t file_sz; |
| int error; |
| boolean_t hole; |
| |
| file_sz = zp->z_size; |
| if (noff >= file_sz) { |
| return (SET_ERROR(ENXIO)); |
| } |
| |
| if (cmd == _FIO_SEEK_HOLE) |
| hole = B_TRUE; |
| else |
| hole = B_FALSE; |
| |
| error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); |
| |
| if (error == ESRCH) |
| return (SET_ERROR(ENXIO)); |
| |
| /* |
| * We could find a hole that begins after the logical end-of-file, |
| * because dmu_offset_next() only works on whole blocks. If the |
| * EOF falls mid-block, then indicate that the "virtual hole" |
| * at the end of the file begins at the logical EOF, rather than |
| * at the end of the last block. |
| */ |
| if (noff > file_sz) { |
| ASSERT(hole); |
| noff = file_sz; |
| } |
| |
| if (noff < *off) |
| return (error); |
| *off = noff; |
| return (error); |
| } |
| |
| /* ARGSUSED */ |
| static int |
| zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, |
| int *rvalp, caller_context_t *ct) |
| { |
| offset_t off; |
| offset_t ndata; |
| dmu_object_info_t doi; |
| int error; |
| zfsvfs_t *zfsvfs; |
| znode_t *zp; |
| |
| switch (com) { |
| case _FIOFFS: |
| { |
| return (zfs_sync(vp->v_vfsp, 0, cred)); |
| |
| /* |
| * The following two ioctls are used by bfu. Faking out, |
| * necessary to avoid bfu errors. |
| */ |
| } |
| case _FIOGDIO: |
| case _FIOSDIO: |
| { |
| return (0); |
| } |
| |
| case _FIO_SEEK_DATA: |
| case _FIO_SEEK_HOLE: |
| { |
| if (ddi_copyin((void *)data, &off, sizeof (off), flag)) |
| return (SET_ERROR(EFAULT)); |
| |
| zp = VTOZ(vp); |
| zfsvfs = zp->z_zfsvfs; |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| /* offset parameter is in/out */ |
| error = zfs_holey(vp, com, &off); |
| ZFS_EXIT(zfsvfs); |
| if (error) |
| return (error); |
| if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) |
| return (SET_ERROR(EFAULT)); |
| return (0); |
| } |
| case _FIO_COUNT_FILLED: |
| { |
| /* |
| * _FIO_COUNT_FILLED adds a new ioctl command which |
| * exposes the number of filled blocks in a |
| * ZFS object. |
| */ |
| zp = VTOZ(vp); |
| zfsvfs = zp->z_zfsvfs; |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| /* |
| * Wait for all dirty blocks for this object |
| * to get synced out to disk, and the DMU info |
| * updated. |
| */ |
| error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id); |
| if (error) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Retrieve fill count from DMU object. |
| */ |
| error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi); |
| if (error) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| ndata = doi.doi_fill_count; |
| |
| ZFS_EXIT(zfsvfs); |
| if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag)) |
| return (SET_ERROR(EFAULT)); |
| return (0); |
| } |
| } |
| return (SET_ERROR(ENOTTY)); |
| } |
| |
| /* |
| * Utility functions to map and unmap a single physical page. These |
| * are used to manage the mappable copies of ZFS file data, and therefore |
| * do not update ref/mod bits. |
| */ |
| caddr_t |
| zfs_map_page(page_t *pp, enum seg_rw rw) |
| { |
| if (kpm_enable) |
| return (hat_kpm_mapin(pp, 0)); |
| ASSERT(rw == S_READ || rw == S_WRITE); |
| return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), |
| (caddr_t)-1)); |
| } |
| |
| void |
| zfs_unmap_page(page_t *pp, caddr_t addr) |
| { |
| if (kpm_enable) { |
| hat_kpm_mapout(pp, 0, addr); |
| } else { |
| ppmapout(addr); |
| } |
| } |
| |
| /* |
| * When a file is memory mapped, we must keep the IO data synchronized |
| * between the DMU cache and the memory mapped pages. What this means: |
| * |
| * On Write: If we find a memory mapped page, we write to *both* |
| * the page and the dmu buffer. |
| */ |
| static void |
| update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid) |
| { |
| int64_t off; |
| |
| off = start & PAGEOFFSET; |
| for (start &= PAGEMASK; len > 0; start += PAGESIZE) { |
| page_t *pp; |
| uint64_t nbytes = MIN(PAGESIZE - off, len); |
| |
| if (pp = page_lookup(vp, start, SE_SHARED)) { |
| caddr_t va; |
| |
| va = zfs_map_page(pp, S_WRITE); |
| (void) dmu_read(os, oid, start+off, nbytes, va+off, |
| DMU_READ_PREFETCH); |
| zfs_unmap_page(pp, va); |
| page_unlock(pp); |
| } |
| len -= nbytes; |
| off = 0; |
| } |
| } |
| |
| /* |
| * When a file is memory mapped, we must keep the IO data synchronized |
| * between the DMU cache and the memory mapped pages. What this means: |
| * |
| * On Read: We "read" preferentially from memory mapped pages, |
| * else we default from the dmu buffer. |
| * |
| * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when |
| * the file is memory mapped. |
| */ |
| static int |
| mappedread(vnode_t *vp, int nbytes, uio_t *uio) |
| { |
| znode_t *zp = VTOZ(vp); |
| int64_t start, off; |
| int len = nbytes; |
| int error = 0; |
| |
| start = uio->uio_loffset; |
| off = start & PAGEOFFSET; |
| for (start &= PAGEMASK; len > 0; start += PAGESIZE) { |
| page_t *pp; |
| uint64_t bytes = MIN(PAGESIZE - off, len); |
| |
| if (pp = page_lookup(vp, start, SE_SHARED)) { |
| caddr_t va; |
| |
| va = zfs_map_page(pp, S_READ); |
| error = uiomove(va + off, bytes, UIO_READ, uio); |
| zfs_unmap_page(pp, va); |
| page_unlock(pp); |
| } else { |
| error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), |
| uio, bytes); |
| } |
| len -= bytes; |
| off = 0; |
| if (error) |
| break; |
| } |
| return (error); |
| } |
| |
| offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ |
| |
| /* |
| * Read bytes from specified file into supplied buffer. |
| * |
| * IN: vp - vnode of file to be read from. |
| * uio - structure supplying read location, range info, |
| * and return buffer. |
| * ioflag - SYNC flags; used to provide FRSYNC semantics. |
| * cr - credentials of caller. |
| * ct - caller context |
| * |
| * OUT: uio - updated offset and range, buffer filled. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Side Effects: |
| * vp - atime updated if byte count > 0 |
| */ |
| /* ARGSUSED */ |
| static int |
| zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| ssize_t n, nbytes; |
| int error = 0; |
| rl_t *rl; |
| xuio_t *xuio = NULL; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| if (zp->z_pflags & ZFS_AV_QUARANTINED) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EACCES)); |
| } |
| |
| /* |
| * Validate file offset |
| */ |
| if (uio->uio_loffset < (offset_t)0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| /* |
| * Fasttrack empty reads |
| */ |
| if (uio->uio_resid == 0) { |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* |
| * Check for mandatory locks |
| */ |
| if (MANDMODE(zp->z_mode)) { |
| if (error = chklock(vp, FREAD, |
| uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| } |
| |
| /* |
| * If we're in FRSYNC mode, sync out this znode before reading it. |
| */ |
| if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zfsvfs->z_log, zp->z_id); |
| |
| /* |
| * Lock the range against changes. |
| */ |
| rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); |
| |
| /* |
| * If we are reading past end-of-file we can skip |
| * to the end; but we might still need to set atime. |
| */ |
| if (uio->uio_loffset >= zp->z_size) { |
| error = 0; |
| goto out; |
| } |
| |
| ASSERT(uio->uio_loffset < zp->z_size); |
| n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); |
| |
| if ((uio->uio_extflg == UIO_XUIO) && |
| (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) { |
| int nblk; |
| int blksz = zp->z_blksz; |
| uint64_t offset = uio->uio_loffset; |
| |
| xuio = (xuio_t *)uio; |
| if ((ISP2(blksz))) { |
| nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset, |
| blksz)) / blksz; |
| } else { |
| ASSERT(offset + n <= blksz); |
| nblk = 1; |
| } |
| (void) dmu_xuio_init(xuio, nblk); |
| |
| if (vn_has_cached_data(vp)) { |
| /* |
| * For simplicity, we always allocate a full buffer |
| * even if we only expect to read a portion of a block. |
| */ |
| while (--nblk >= 0) { |
| (void) dmu_xuio_add(xuio, |
| dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), |
| blksz), 0, blksz); |
| } |
| } |
| } |
| |
| while (n > 0) { |
| nbytes = MIN(n, zfs_read_chunk_size - |
| P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); |
| |
| if (vn_has_cached_data(vp)) { |
| error = mappedread(vp, nbytes, uio); |
| } else { |
| error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), |
| uio, nbytes); |
| } |
| if (error) { |
| /* convert checksum errors into IO errors */ |
| if (error == ECKSUM) |
| error = SET_ERROR(EIO); |
| break; |
| } |
| |
| n -= nbytes; |
| } |
| out: |
| zfs_range_unlock(rl); |
| |
| ZFS_ACCESSTIME_STAMP(zfsvfs, zp); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Write the bytes to a file. |
| * |
| * IN: vp - vnode of file to be written to. |
| * uio - structure supplying write location, range info, |
| * and data buffer. |
| * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is |
| * set if in append mode. |
| * cr - credentials of caller. |
| * ct - caller context (NFS/CIFS fem monitor only) |
| * |
| * OUT: uio - updated offset and range. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * vp - ctime|mtime updated if byte count > 0 |
| */ |
| |
| /* ARGSUSED */ |
| static int |
| zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| rlim64_t limit = uio->uio_llimit; |
| ssize_t start_resid = uio->uio_resid; |
| ssize_t tx_bytes; |
| uint64_t end_size; |
| dmu_tx_t *tx; |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| zilog_t *zilog; |
| offset_t woff; |
| ssize_t n, nbytes; |
| rl_t *rl; |
| int max_blksz = zfsvfs->z_max_blksz; |
| int error = 0; |
| arc_buf_t *abuf; |
| iovec_t *aiov = NULL; |
| xuio_t *xuio = NULL; |
| int i_iov = 0; |
| int iovcnt = uio->uio_iovcnt; |
| iovec_t *iovp = uio->uio_iov; |
| int write_eof; |
| int count = 0; |
| sa_bulk_attr_t bulk[4]; |
| uint64_t mtime[2], ctime[2]; |
| |
| /* |
| * Fasttrack empty write |
| */ |
| n = start_resid; |
| if (n == 0) |
| return (0); |
| |
| if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) |
| limit = MAXOFFSET_T; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, |
| &zp->z_size, 8); |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, |
| &zp->z_pflags, 8); |
| |
| /* |
| * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our |
| * callers might not be able to detect properly that we are read-only, |
| * so check it explicitly here. |
| */ |
| if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EROFS)); |
| } |
| |
| /* |
| * If immutable or not appending then return EPERM. |
| * Intentionally allow ZFS_READONLY through here. |
| * See zfs_zaccess_common() |
| */ |
| if ((zp->z_pflags & ZFS_IMMUTABLE) || |
| ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && |
| (uio->uio_loffset < zp->z_size))) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EPERM)); |
| } |
| |
| zilog = zfsvfs->z_log; |
| |
| /* |
| * Validate file offset |
| */ |
| woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset; |
| if (woff < 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| /* |
| * Check for mandatory locks before calling zfs_range_lock() |
| * in order to prevent a deadlock with locks set via fcntl(). |
| */ |
| if (MANDMODE((mode_t)zp->z_mode) && |
| (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Pre-fault the pages to ensure slow (eg NFS) pages |
| * don't hold up txg. |
| * Skip this if uio contains loaned arc_buf. |
| */ |
| if ((uio->uio_extflg == UIO_XUIO) && |
| (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) |
| xuio = (xuio_t *)uio; |
| else |
| uio_prefaultpages(MIN(n, max_blksz), uio); |
| |
| /* |
| * If in append mode, set the io offset pointer to eof. |
| */ |
| if (ioflag & FAPPEND) { |
| /* |
| * Obtain an appending range lock to guarantee file append |
| * semantics. We reset the write offset once we have the lock. |
| */ |
| rl = zfs_range_lock(zp, 0, n, RL_APPEND); |
| woff = rl->r_off; |
| if (rl->r_len == UINT64_MAX) { |
| /* |
| * We overlocked the file because this write will cause |
| * the file block size to increase. |
| * Note that zp_size cannot change with this lock held. |
| */ |
| woff = zp->z_size; |
| } |
| uio->uio_loffset = woff; |
| } else { |
| /* |
| * Note that if the file block size will change as a result of |
| * this write, then this range lock will lock the entire file |
| * so that we can re-write the block safely. |
| */ |
| rl = zfs_range_lock(zp, woff, n, RL_WRITER); |
| } |
| |
| if (woff >= limit) { |
| zfs_range_unlock(rl); |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EFBIG)); |
| } |
| |
| if ((woff + n) > limit || woff > (limit - n)) |
| n = limit - woff; |
| |
| /* Will this write extend the file length? */ |
| write_eof = (woff + n > zp->z_size); |
| |
| end_size = MAX(zp->z_size, woff + n); |
| |
| /* |
| * Write the file in reasonable size chunks. Each chunk is written |
| * in a separate transaction; this keeps the intent log records small |
| * and allows us to do more fine-grained space accounting. |
| */ |
| while (n > 0) { |
| abuf = NULL; |
| woff = uio->uio_loffset; |
| if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || |
| zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { |
| if (abuf != NULL) |
| dmu_return_arcbuf(abuf); |
| error = SET_ERROR(EDQUOT); |
| break; |
| } |
| |
| if (xuio && abuf == NULL) { |
| ASSERT(i_iov < iovcnt); |
| aiov = &iovp[i_iov]; |
| abuf = dmu_xuio_arcbuf(xuio, i_iov); |
| dmu_xuio_clear(xuio, i_iov); |
| DTRACE_PROBE3(zfs_cp_write, int, i_iov, |
| iovec_t *, aiov, arc_buf_t *, abuf); |
| ASSERT((aiov->iov_base == abuf->b_data) || |
| ((char *)aiov->iov_base - (char *)abuf->b_data + |
| aiov->iov_len == arc_buf_size(abuf))); |
| i_iov++; |
| } else if (abuf == NULL && n >= max_blksz && |
| woff >= zp->z_size && |
| P2PHASE(woff, max_blksz) == 0 && |
| zp->z_blksz == max_blksz) { |
| /* |
| * This write covers a full block. "Borrow" a buffer |
| * from the dmu so that we can fill it before we enter |
| * a transaction. This avoids the possibility of |
| * holding up the transaction if the data copy hangs |
| * up on a pagefault (e.g., from an NFS server mapping). |
| */ |
| size_t cbytes; |
| |
| abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), |
| max_blksz); |
| ASSERT(abuf != NULL); |
| ASSERT(arc_buf_size(abuf) == max_blksz); |
| if (error = uiocopy(abuf->b_data, max_blksz, |
| UIO_WRITE, uio, &cbytes)) { |
| dmu_return_arcbuf(abuf); |
| break; |
| } |
| ASSERT(cbytes == max_blksz); |
| } |
| |
| /* |
| * Start a transaction. |
| */ |
| tx = dmu_tx_create(zfsvfs->z_os); |
| dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); |
| dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); |
| zfs_sa_upgrade_txholds(tx, zp); |
| error = dmu_tx_assign(tx, TXG_WAIT); |
| if (error) { |
| dmu_tx_abort(tx); |
| if (abuf != NULL) |
| dmu_return_arcbuf(abuf); |
| break; |
| } |
| |
| /* |
| * If zfs_range_lock() over-locked we grow the blocksize |
| * and then reduce the lock range. This will only happen |
| * on the first iteration since zfs_range_reduce() will |
| * shrink down r_len to the appropriate size. |
| */ |
| if (rl->r_len == UINT64_MAX) { |
| uint64_t new_blksz; |
| |
| if (zp->z_blksz > max_blksz) { |
| /* |
| * File's blocksize is already larger than the |
| * "recordsize" property. Only let it grow to |
| * the next power of 2. |
| */ |
| ASSERT(!ISP2(zp->z_blksz)); |
| new_blksz = MIN(end_size, |
| 1 << highbit64(zp->z_blksz)); |
| } else { |
| new_blksz = MIN(end_size, max_blksz); |
| } |
| zfs_grow_blocksize(zp, new_blksz, tx); |
| zfs_range_reduce(rl, woff, n); |
| } |
| |
| /* |
| * XXX - should we really limit each write to z_max_blksz? |
| * Perhaps we should use SPA_MAXBLOCKSIZE chunks? |
| */ |
| nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); |
| |
| if (abuf == NULL) { |
| tx_bytes = uio->uio_resid; |
| error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), |
| uio, nbytes, tx); |
| tx_bytes -= uio->uio_resid; |
| } else { |
| tx_bytes = nbytes; |
| ASSERT(xuio == NULL || tx_bytes == aiov->iov_len); |
| /* |
| * If this is not a full block write, but we are |
| * extending the file past EOF and this data starts |
| * block-aligned, use assign_arcbuf(). Otherwise, |
| * write via dmu_write(). |
| */ |
| if (tx_bytes < max_blksz && (!write_eof || |
| aiov->iov_base != abuf->b_data)) { |
| ASSERT(xuio); |
| dmu_write(zfsvfs->z_os, zp->z_id, woff, |
| aiov->iov_len, aiov->iov_base, tx); |
| dmu_return_arcbuf(abuf); |
| xuio_stat_wbuf_copied(); |
| } else { |
| ASSERT(xuio || tx_bytes == max_blksz); |
| dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), |
| woff, abuf, tx); |
| } |
| ASSERT(tx_bytes <= uio->uio_resid); |
| uioskip(uio, tx_bytes); |
| } |
| if (tx_bytes && vn_has_cached_data(vp)) { |
| update_pages(vp, woff, |
| tx_bytes, zfsvfs->z_os, zp->z_id); |
| } |
| |
| /* |
| * If we made no progress, we're done. If we made even |
| * partial progress, update the znode and ZIL accordingly. |
| */ |
| if (tx_bytes == 0) { |
| (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), |
| (void *)&zp->z_size, sizeof (uint64_t), tx); |
| dmu_tx_commit(tx); |
| ASSERT(error != 0); |
| break; |
| } |
| |
| /* |
| * Clear Set-UID/Set-GID bits on successful write if not |
| * privileged and at least one of the excute bits is set. |
| * |
| * It would be nice to to this after all writes have |
| * been done, but that would still expose the ISUID/ISGID |
| * to another app after the partial write is committed. |
| * |
| * Note: we don't call zfs_fuid_map_id() here because |
| * user 0 is not an ephemeral uid. |
| */ |
| mutex_enter(&zp->z_acl_lock); |
| if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | |
| (S_IXUSR >> 6))) != 0 && |
| (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && |
| secpolicy_vnode_setid_retain(cr, |
| (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) { |
| uint64_t newmode; |
| zp->z_mode &= ~(S_ISUID | S_ISGID); |
| newmode = zp->z_mode; |
| (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), |
| (void *)&newmode, sizeof (uint64_t), tx); |
| } |
| mutex_exit(&zp->z_acl_lock); |
| |
| zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, |
| B_TRUE); |
| |
| /* |
| * Update the file size (zp_size) if it has changed; |
| * account for possible concurrent updates. |
| */ |
| while ((end_size = zp->z_size) < uio->uio_loffset) { |
| (void) atomic_cas_64(&zp->z_size, end_size, |
| uio->uio_loffset); |
| ASSERT(error == 0); |
| } |
| /* |
| * If we are replaying and eof is non zero then force |
| * the file size to the specified eof. Note, there's no |
| * concurrency during replay. |
| */ |
| if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) |
| zp->z_size = zfsvfs->z_replay_eof; |
| |
| error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); |
| |
| zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); |
| dmu_tx_commit(tx); |
| |
| if (error != 0) |
| break; |
| ASSERT(tx_bytes == nbytes); |
| n -= nbytes; |
| |
| if (!xuio && n > 0) |
| uio_prefaultpages(MIN(n, max_blksz), uio); |
| } |
| |
| zfs_range_unlock(rl); |
| |
| /* |
| * If we're in replay mode, or we made no progress, return error. |
| * Otherwise, it's at least a partial write, so it's successful. |
| */ |
| if (zfsvfs->z_replay || uio->uio_resid == start_resid) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| if (ioflag & (FSYNC | FDSYNC) || |
| zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zilog, zp->z_id); |
| |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* ARGSUSED */ |
| void |
| zfs_get_done(zgd_t *zgd, int error) |
| { |
| znode_t *zp = zgd->zgd_private; |
| objset_t *os = zp->z_zfsvfs->z_os; |
| |
| if (zgd->zgd_db) |
| dmu_buf_rele(zgd->zgd_db, zgd); |
| |
| zfs_range_unlock(zgd->zgd_rl); |
| |
| /* |
| * Release the vnode asynchronously as we currently have the |
| * txg stopped from syncing. |
| */ |
| VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os))); |
| |
| kmem_free(zgd, sizeof (zgd_t)); |
| } |
| |
| #ifdef DEBUG |
| static int zil_fault_io = 0; |
| #endif |
| |
| /* |
| * Get data to generate a TX_WRITE intent log record. |
| */ |
| int |
| zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio) |
| { |
| zfsvfs_t *zfsvfs = arg; |
| objset_t *os = zfsvfs->z_os; |
| znode_t *zp; |
| uint64_t object = lr->lr_foid; |
| uint64_t offset = lr->lr_offset; |
| uint64_t size = lr->lr_length; |
| dmu_buf_t *db; |
| zgd_t *zgd; |
| int error = 0; |
| |
| ASSERT3P(lwb, !=, NULL); |
| ASSERT3P(zio, !=, NULL); |
| ASSERT3U(size, !=, 0); |
| |
| /* |
| * Nothing to do if the file has been removed |
| */ |
| if (zfs_zget(zfsvfs, object, &zp) != 0) |
| return (SET_ERROR(ENOENT)); |
| if (zp->z_unlinked) { |
| /* |
| * Release the vnode asynchronously as we currently have the |
| * txg stopped from syncing. |
| */ |
| VN_RELE_ASYNC(ZTOV(zp), |
| dsl_pool_vnrele_taskq(dmu_objset_pool(os))); |
| return (SET_ERROR(ENOENT)); |
| } |
| |
| zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); |
| zgd->zgd_lwb = lwb; |
| zgd->zgd_private = zp; |
| |
| /* |
| * Write records come in two flavors: immediate and indirect. |
| * For small writes it's cheaper to store the data with the |
| * log record (immediate); for large writes it's cheaper to |
| * sync the data and get a pointer to it (indirect) so that |
| * we don't have to write the data twice. |
| */ |
| if (buf != NULL) { /* immediate write */ |
| zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER); |
| /* test for truncation needs to be done while range locked */ |
| if (offset >= zp->z_size) { |
| error = SET_ERROR(ENOENT); |
| } else { |
| error = dmu_read(os, object, offset, size, buf, |
| DMU_READ_NO_PREFETCH); |
| } |
| ASSERT(error == 0 || error == ENOENT); |
| } else { /* indirect write */ |
| /* |
| * Have to lock the whole block to ensure when it's |
| * written out and its checksum is being calculated |
| * that no one can change the data. We need to re-check |
| * blocksize after we get the lock in case it's changed! |
| */ |
| for (;;) { |
| uint64_t blkoff; |
| size = zp->z_blksz; |
| blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; |
| offset -= blkoff; |
| zgd->zgd_rl = zfs_range_lock(zp, offset, size, |
| RL_READER); |
| if (zp->z_blksz == size) |
| break; |
| offset += blkoff; |
| zfs_range_unlock(zgd->zgd_rl); |
| } |
| /* test for truncation needs to be done while range locked */ |
| if (lr->lr_offset >= zp->z_size) |
| error = SET_ERROR(ENOENT); |
| #ifdef DEBUG |
| if (zil_fault_io) { |
| error = SET_ERROR(EIO); |
| zil_fault_io = 0; |
| } |
| #endif |
| if (error == 0) |
| error = dmu_buf_hold(os, object, offset, zgd, &db, |
| DMU_READ_NO_PREFETCH); |
| |
| if (error == 0) { |
| blkptr_t *bp = &lr->lr_blkptr; |
| |
| zgd->zgd_db = db; |
| zgd->zgd_bp = bp; |
| |
| ASSERT(db->db_offset == offset); |
| ASSERT(db->db_size == size); |
| |
| error = dmu_sync(zio, lr->lr_common.lrc_txg, |
| zfs_get_done, zgd); |
| ASSERT(error || lr->lr_length <= size); |
| |
| /* |
| * On success, we need to wait for the write I/O |
| * initiated by dmu_sync() to complete before we can |
| * release this dbuf. We will finish everything up |
| * in the zfs_get_done() callback. |
| */ |
| if (error == 0) |
| return (0); |
| |
| if (error == EALREADY) { |
| lr->lr_common.lrc_txtype = TX_WRITE2; |
| /* |
| * TX_WRITE2 relies on the data previously |
| * written by the TX_WRITE that caused |
| * EALREADY. We zero out the BP because |
| * it is the old, currently-on-disk BP. |
| */ |
| zgd->zgd_bp = NULL; |
| BP_ZERO(bp); |
| error = 0; |
| } |
| } |
| } |
| |
| zfs_get_done(zgd, error); |
| |
| return (error); |
| } |
| |
| /*ARGSUSED*/ |
| static int |
| zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, |
| caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| int error; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| if (flag & V_ACE_MASK) |
| error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); |
| else |
| error = zfs_zaccess_rwx(zp, mode, flag, cr); |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * If vnode is for a device return a specfs vnode instead. |
| */ |
| static int |
| specvp_check(vnode_t **vpp, cred_t *cr) |
| { |
| int error = 0; |
| |
| if (IS_DEVVP(*vpp)) { |
| struct vnode *svp; |
| |
| svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); |
| VN_RELE(*vpp); |
| if (svp == NULL) |
| error = SET_ERROR(ENOSYS); |
| *vpp = svp; |
| } |
| return (error); |
| } |
| |
| |
| /* |
| * Lookup an entry in a directory, or an extended attribute directory. |
| * If it exists, return a held vnode reference for it. |
| * |
| * IN: dvp - vnode of directory to search. |
| * nm - name of entry to lookup. |
| * pnp - full pathname to lookup [UNUSED]. |
| * flags - LOOKUP_XATTR set if looking for an attribute. |
| * rdir - root directory vnode [UNUSED]. |
| * cr - credentials of caller. |
| * ct - caller context |
| * direntflags - directory lookup flags |
| * realpnp - returned pathname. |
| * |
| * OUT: vpp - vnode of located entry, NULL if not found. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * NA |
| */ |
| /* ARGSUSED */ |
| static int |
| zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, |
| int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, |
| int *direntflags, pathname_t *realpnp) |
| { |
| znode_t *zdp = VTOZ(dvp); |
| zfsvfs_t *zfsvfs = zdp->z_zfsvfs; |
| int error = 0; |
| |
| /* |
| * Fast path lookup, however we must skip DNLC lookup |
| * for case folding or normalizing lookups because the |
| * DNLC code only stores the passed in name. This means |
| * creating 'a' and removing 'A' on a case insensitive |
| * file system would work, but DNLC still thinks 'a' |
| * exists and won't let you create it again on the next |
| * pass through fast path. |
| */ |
| if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { |
| |
| if (dvp->v_type != VDIR) { |
| return (SET_ERROR(ENOTDIR)); |
| } else if (zdp->z_sa_hdl == NULL) { |
| return (SET_ERROR(EIO)); |
| } |
| |
| if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) { |
| error = zfs_fastaccesschk_execute(zdp, cr); |
| if (!error) { |
| *vpp = dvp; |
| VN_HOLD(*vpp); |
| return (0); |
| } |
| return (error); |
| } else if (!zdp->z_zfsvfs->z_norm && |
| (zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) { |
| |
| vnode_t *tvp = dnlc_lookup(dvp, nm); |
| |
| if (tvp) { |
| error = zfs_fastaccesschk_execute(zdp, cr); |
| if (error) { |
| VN_RELE(tvp); |
| return (error); |
| } |
| if (tvp == DNLC_NO_VNODE) { |
| VN_RELE(tvp); |
| return (SET_ERROR(ENOENT)); |
| } else { |
| *vpp = tvp; |
| return (specvp_check(vpp, cr)); |
| } |
| } |
| } |
| } |
| |
| DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm); |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zdp); |
| |
| *vpp = NULL; |
| |
| if (flags & LOOKUP_XATTR) { |
| /* |
| * If the xattr property is off, refuse the lookup request. |
| */ |
| if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| /* |
| * We don't allow recursive attributes.. |
| * Maybe someday we will. |
| */ |
| if (zdp->z_pflags & ZFS_XATTR) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Do we have permission to get into attribute directory? |
| */ |
| |
| if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, |
| B_FALSE, cr)) { |
| VN_RELE(*vpp); |
| *vpp = NULL; |
| } |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| if (dvp->v_type != VDIR) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(ENOTDIR)); |
| } |
| |
| /* |
| * Check accessibility of directory. |
| */ |
| |
| if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), |
| NULL, U8_VALIDATE_ENTIRE, &error) < 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EILSEQ)); |
| } |
| |
| error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); |
| if (error == 0) |
| error = specvp_check(vpp, cr); |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Attempt to create a new entry in a directory. If the entry |
| * already exists, truncate the file if permissible, else return |
| * an error. Return the vp of the created or trunc'd file. |
| * |
| * IN: dvp - vnode of directory to put new file entry in. |
| * name - name of new file entry. |
| * vap - attributes of new file. |
| * excl - flag indicating exclusive or non-exclusive mode. |
| * mode - mode to open file with. |
| * cr - credentials of caller. |
| * flag - large file flag [UNUSED]. |
| * ct - caller context |
| * vsecp - ACL to be set |
| * |
| * OUT: vpp - vnode of created or trunc'd entry. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * dvp - ctime|mtime updated if new entry created |
| * vp - ctime|mtime always, atime if new |
| */ |
| |
| /* ARGSUSED */ |
| static int |
| zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, |
| int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, |
| vsecattr_t *vsecp) |
| { |
| znode_t *zp, *dzp = VTOZ(dvp); |
| zfsvfs_t *zfsvfs = dzp->z_zfsvfs; |
| zilog_t *zilog; |
| objset_t *os; |
| zfs_dirlock_t *dl; |
| dmu_tx_t *tx; |
| int error; |
| ksid_t *ksid; |
| uid_t uid; |
| gid_t gid = crgetgid(cr); |
| zfs_acl_ids_t acl_ids; |
| boolean_t fuid_dirtied; |
| boolean_t have_acl = B_FALSE; |
| boolean_t waited = B_FALSE; |
| |
| /* |
| * If we have an ephemeral id, ACL, or XVATTR then |
| * make sure file system is at proper version |
| */ |
| |
| ksid = crgetsid(cr, KSID_OWNER); |
| if (ksid) |
| uid = ksid_getid(ksid); |
| else |
| uid = crgetuid(cr); |
| |
| if (zfsvfs->z_use_fuids == B_FALSE && |
| (vsecp || (vap->va_mask & AT_XVATTR) || |
| IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) |
| return (SET_ERROR(EINVAL)); |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(dzp); |
| os = zfsvfs->z_os; |
| zilog = zfsvfs->z_log; |
| |
| if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), |
| NULL, U8_VALIDATE_ENTIRE, &error) < 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EILSEQ)); |
| } |
| |
| if (vap->va_mask & AT_XVATTR) { |
| if ((error = secpolicy_xvattr((xvattr_t *)vap, |
| crgetuid(cr), cr, vap->va_type)) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| } |
| top: |
| *vpp = NULL; |
| |
| if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) |
| vap->va_mode &= ~VSVTX; |
| |
| if (*name == '\0') { |
| /* |
| * Null component name refers to the directory itself. |
| */ |
| VN_HOLD(dvp); |
| zp = dzp; |
| dl = NULL; |
| error = 0; |
| } else { |
| /* possible VN_HOLD(zp) */ |
| int zflg = 0; |
| |
| if (flag & FIGNORECASE) |
| zflg |= ZCILOOK; |
| |
| error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, |
| NULL, NULL); |
| if (error) { |
| if (have_acl) |
| zfs_acl_ids_free(&acl_ids); |
| if (strcmp(name, "..") == 0) |
| error = SET_ERROR(EISDIR); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| } |
| |
| if (zp == NULL) { |
| uint64_t txtype; |
| |
| /* |
| * Create a new file object and update the directory |
| * to reference it. |
| */ |
| if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { |
| if (have_acl) |
| zfs_acl_ids_free(&acl_ids); |
| goto out; |
| } |
| |
| /* |
| * We only support the creation of regular files in |
| * extended attribute directories. |
| */ |
| |
| if ((dzp->z_pflags & ZFS_XATTR) && |
| (vap->va_type != VREG)) { |
| if (have_acl) |
| zfs_acl_ids_free(&acl_ids); |
| error = SET_ERROR(EINVAL); |
| goto out; |
| } |
| |
| if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap, |
| cr, vsecp, &acl_ids)) != 0) |
| goto out; |
| have_acl = B_TRUE; |
| |
| if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { |
| zfs_acl_ids_free(&acl_ids); |
| error = SET_ERROR(EDQUOT); |
| goto out; |
| } |
| |
| tx = dmu_tx_create(os); |
| |
| dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + |
| ZFS_SA_BASE_ATTR_SIZE); |
| |
| fuid_dirtied = zfsvfs->z_fuid_dirty; |
| if (fuid_dirtied) |
| zfs_fuid_txhold(zfsvfs, tx); |
| dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); |
| dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); |
| if (!zfsvfs->z_use_sa && |
| acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { |
| dmu_tx_hold_write(tx, DMU_NEW_OBJECT, |
| 0, acl_ids.z_aclp->z_acl_bytes); |
| } |
| error = dmu_tx_assign(tx, |
| (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); |
| if (error) { |
| zfs_dirent_unlock(dl); |
| if (error == ERESTART) { |
| waited = B_TRUE; |
| dmu_tx_wait(tx); |
| dmu_tx_abort(tx); |
| goto top; |
| } |
| zfs_acl_ids_free(&acl_ids); |
| dmu_tx_abort(tx); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); |
| |
| if (fuid_dirtied) |
| zfs_fuid_sync(zfsvfs, tx); |
| |
| (void) zfs_link_create(dl, zp, tx, ZNEW); |
| txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); |
| if (flag & FIGNORECASE) |
| txtype |= TX_CI; |
| zfs_log_create(zilog, tx, txtype, dzp, zp, name, |
| vsecp, acl_ids.z_fuidp, vap); |
| zfs_acl_ids_free(&acl_ids); |
| dmu_tx_commit(tx); |
| } else { |
| int aflags = (flag & FAPPEND) ? V_APPEND : 0; |
| |
| if (have_acl) |
| zfs_acl_ids_free(&acl_ids); |
| have_acl = B_FALSE; |
| |
| /* |
| * A directory entry already exists for this name. |
| */ |
| /* |
| * Can't truncate an existing file if in exclusive mode. |
| */ |
| if (excl == EXCL) { |
| error = SET_ERROR(EEXIST); |
| goto out; |
| } |
| /* |
| * Can't open a directory for writing. |
| */ |
| if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { |
| error = SET_ERROR(EISDIR); |
| goto out; |
| } |
| /* |
| * Verify requested access to file. |
| */ |
| if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { |
| goto out; |
| } |
| |
| mutex_enter(&dzp->z_lock); |
| dzp->z_seq++; |
| mutex_exit(&dzp->z_lock); |
| |
| /* |
| * Truncate regular files if requested. |
| */ |
| if ((ZTOV(zp)->v_type == VREG) && |
| (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { |
| /* we can't hold any locks when calling zfs_freesp() */ |
| zfs_dirent_unlock(dl); |
| dl = NULL; |
| error = zfs_freesp(zp, 0, 0, mode, TRUE); |
| if (error == 0) { |
| vnevent_create(ZTOV(zp), ct); |
| } |
| } |
| } |
| out: |
| |
| if (dl) |
| zfs_dirent_unlock(dl); |
| |
| if (error) { |
| if (zp) |
| VN_RELE(ZTOV(zp)); |
| } else { |
| *vpp = ZTOV(zp); |
| error = specvp_check(vpp, cr); |
| } |
| |
| if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zilog, 0); |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Remove an entry from a directory. |
| * |
| * IN: dvp - vnode of directory to remove entry from. |
| * name - name of entry to remove. |
| * cr - credentials of caller. |
| * ct - caller context |
| * flags - case flags |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * dvp - ctime|mtime |
| * vp - ctime (if nlink > 0) |
| */ |
| |
| uint64_t null_xattr = 0; |
| |
| /*ARGSUSED*/ |
| static int |
| zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, |
| int flags) |
| { |
| znode_t *zp, *dzp = VTOZ(dvp); |
| znode_t *xzp; |
| vnode_t *vp; |
| zfsvfs_t *zfsvfs = dzp->z_zfsvfs; |
| zilog_t *zilog; |
| uint64_t acl_obj, xattr_obj; |
| uint64_t xattr_obj_unlinked = 0; |
| uint64_t obj = 0; |
| zfs_dirlock_t *dl; |
| dmu_tx_t *tx; |
| boolean_t may_delete_now, delete_now = FALSE; |
| boolean_t unlinked, toobig = FALSE; |
| uint64_t txtype; |
| pathname_t *realnmp = NULL; |
| pathname_t realnm; |
| int error; |
| int zflg = ZEXISTS; |
| boolean_t waited = B_FALSE; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(dzp); |
| zilog = zfsvfs->z_log; |
| |
| if (flags & FIGNORECASE) { |
| zflg |= ZCILOOK; |
| pn_alloc(&realnm); |
| realnmp = &realnm; |
| } |
| |
| top: |
| xattr_obj = 0; |
| xzp = NULL; |
| /* |
| * Attempt to lock directory; fail if entry doesn't exist. |
| */ |
| if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, |
| NULL, realnmp)) { |
| if (realnmp) |
| pn_free(realnmp); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| vp = ZTOV(zp); |
| |
| if (error = zfs_zaccess_delete(dzp, zp, cr)) { |
| goto out; |
| } |
| |
| /* |
| * Need to use rmdir for removing directories. |
| */ |
| if (vp->v_type == VDIR) { |
| error = SET_ERROR(EPERM); |
| goto out; |
| } |
| |
| vnevent_remove(vp, dvp, name, ct); |
| |
| if (realnmp) |
| dnlc_remove(dvp, realnmp->pn_buf); |
| else |
| dnlc_remove(dvp, name); |
| |
| mutex_enter(&vp->v_lock); |
| may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); |
| mutex_exit(&vp->v_lock); |
| |
| /* |
| * We may delete the znode now, or we may put it in the unlinked set; |
| * it depends on whether we're the last link, and on whether there are |
| * other holds on the vnode. So we dmu_tx_hold() the right things to |
| * allow for either case. |
| */ |
| obj = zp->z_id; |
| tx = dmu_tx_create(zfsvfs->z_os); |
| dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); |
| dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); |
| zfs_sa_upgrade_txholds(tx, zp); |
| zfs_sa_upgrade_txholds(tx, dzp); |
| if (may_delete_now) { |
| toobig = |
| zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; |
| /* if the file is too big, only hold_free a token amount */ |
| dmu_tx_hold_free(tx, zp->z_id, 0, |
| (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); |
| } |
| |
| /* are there any extended attributes? */ |
| error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), |
| &xattr_obj, sizeof (xattr_obj)); |
| if (error == 0 && xattr_obj) { |
| error = zfs_zget(zfsvfs, xattr_obj, &xzp); |
| ASSERT0(error); |
| dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); |
| dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); |
| } |
| |
| mutex_enter(&zp->z_lock); |
| if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now) |
| dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); |
| mutex_exit(&zp->z_lock); |
| |
| /* charge as an update -- would be nice not to charge at all */ |
| dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); |
| |
| /* |
| * Mark this transaction as typically resulting in a net free of space |
| */ |
| dmu_tx_mark_netfree(tx); |
| |
| error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); |
| if (error) { |
| zfs_dirent_unlock(dl); |
| VN_RELE(vp); |
| if (xzp) |
| VN_RELE(ZTOV(xzp)); |
| if (error == ERESTART) { |
| waited = B_TRUE; |
| dmu_tx_wait(tx); |
| dmu_tx_abort(tx); |
| goto top; |
| } |
| if (realnmp) |
| pn_free(realnmp); |
| dmu_tx_abort(tx); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Remove the directory entry. |
| */ |
| error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); |
| |
| if (error) { |
| dmu_tx_commit(tx); |
| goto out; |
| } |
| |
| if (unlinked) { |
| /* |
| * Hold z_lock so that we can make sure that the ACL obj |
| * hasn't changed. Could have been deleted due to |
| * zfs_sa_upgrade(). |
| */ |
| mutex_enter(&zp->z_lock); |
| mutex_enter(&vp->v_lock); |
| (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), |
| &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); |
| delete_now = may_delete_now && !toobig && |
| vp->v_count == 1 && !vn_has_cached_data(vp) && |
| xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) == |
| acl_obj; |
| mutex_exit(&vp->v_lock); |
| } |
| |
| if (delete_now) { |
| if (xattr_obj_unlinked) { |
| ASSERT3U(xzp->z_links, ==, 2); |
| mutex_enter(&xzp->z_lock); |
| xzp->z_unlinked = 1; |
| xzp->z_links = 0; |
| error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), |
| &xzp->z_links, sizeof (xzp->z_links), tx); |
| ASSERT3U(error, ==, 0); |
| mutex_exit(&xzp->z_lock); |
| zfs_unlinked_add(xzp, tx); |
| |
| if (zp->z_is_sa) |
| error = sa_remove(zp->z_sa_hdl, |
| SA_ZPL_XATTR(zfsvfs), tx); |
| else |
| error = sa_update(zp->z_sa_hdl, |
| SA_ZPL_XATTR(zfsvfs), &null_xattr, |
| sizeof (uint64_t), tx); |
| ASSERT0(error); |
| } |
| mutex_enter(&vp->v_lock); |
| VN_RELE_LOCKED(vp); |
| ASSERT0(vp->v_count); |
| mutex_exit(&vp->v_lock); |
| mutex_exit(&zp->z_lock); |
| zfs_znode_delete(zp, tx); |
| } else if (unlinked) { |
| mutex_exit(&zp->z_lock); |
| zfs_unlinked_add(zp, tx); |
| } |
| |
| txtype = TX_REMOVE; |
| if (flags & FIGNORECASE) |
| txtype |= TX_CI; |
| zfs_log_remove(zilog, tx, txtype, dzp, name, obj); |
| |
| dmu_tx_commit(tx); |
| out: |
| if (realnmp) |
| pn_free(realnmp); |
| |
| zfs_dirent_unlock(dl); |
| |
| if (!delete_now) |
| VN_RELE(vp); |
| if (xzp) |
| VN_RELE(ZTOV(xzp)); |
| |
| if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zilog, 0); |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Create a new directory and insert it into dvp using the name |
| * provided. Return a pointer to the inserted directory. |
| * |
| * IN: dvp - vnode of directory to add subdir to. |
| * dirname - name of new directory. |
| * vap - attributes of new directory. |
| * cr - credentials of caller. |
| * ct - caller context |
| * flags - case flags |
| * vsecp - ACL to be set |
| * |
| * OUT: vpp - vnode of created directory. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * dvp - ctime|mtime updated |
| * vp - ctime|mtime|atime updated |
| */ |
| /*ARGSUSED*/ |
| static int |
| zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, |
| caller_context_t *ct, int flags, vsecattr_t *vsecp) |
| { |
| znode_t *zp, *dzp = VTOZ(dvp); |
| zfsvfs_t *zfsvfs = dzp->z_zfsvfs; |
| zilog_t *zilog; |
| zfs_dirlock_t *dl; |
| uint64_t txtype; |
| dmu_tx_t *tx; |
| int error; |
| int zf = ZNEW; |
| ksid_t *ksid; |
| uid_t uid; |
| gid_t gid = crgetgid(cr); |
| zfs_acl_ids_t acl_ids; |
| boolean_t fuid_dirtied; |
| boolean_t waited = B_FALSE; |
| |
| ASSERT(vap->va_type == VDIR); |
| |
| /* |
| * If we have an ephemeral id, ACL, or XVATTR then |
| * make sure file system is at proper version |
| */ |
| |
| ksid = crgetsid(cr, KSID_OWNER); |
| if (ksid) |
| uid = ksid_getid(ksid); |
| else |
| uid = crgetuid(cr); |
| if (zfsvfs->z_use_fuids == B_FALSE && |
| (vsecp || (vap->va_mask & AT_XVATTR) || |
| IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) |
| return (SET_ERROR(EINVAL)); |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(dzp); |
| zilog = zfsvfs->z_log; |
| |
| if (dzp->z_pflags & ZFS_XATTR) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| if (zfsvfs->z_utf8 && u8_validate(dirname, |
| strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EILSEQ)); |
| } |
| if (flags & FIGNORECASE) |
| zf |= ZCILOOK; |
| |
| if (vap->va_mask & AT_XVATTR) { |
| if ((error = secpolicy_xvattr((xvattr_t *)vap, |
| crgetuid(cr), cr, vap->va_type)) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| } |
| |
| if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, |
| vsecp, &acl_ids)) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| /* |
| * First make sure the new directory doesn't exist. |
| * |
| * Existence is checked first to make sure we don't return |
| * EACCES instead of EEXIST which can cause some applications |
| * to fail. |
| */ |
| top: |
| *vpp = NULL; |
| |
| if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, |
| NULL, NULL)) { |
| zfs_acl_ids_free(&acl_ids); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { |
| zfs_acl_ids_free(&acl_ids); |
| zfs_dirent_unlock(dl); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { |
| zfs_acl_ids_free(&acl_ids); |
| zfs_dirent_unlock(dl); |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EDQUOT)); |
| } |
| |
| /* |
| * Add a new entry to the directory. |
| */ |
| tx = dmu_tx_create(zfsvfs->z_os); |
| dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); |
| dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); |
| fuid_dirtied = zfsvfs->z_fuid_dirty; |
| if (fuid_dirtied) |
| zfs_fuid_txhold(zfsvfs, tx); |
| if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { |
| dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, |
| acl_ids.z_aclp->z_acl_bytes); |
| } |
| |
| dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + |
| ZFS_SA_BASE_ATTR_SIZE); |
| |
| error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); |
| if (error) { |
| zfs_dirent_unlock(dl); |
| if (error == ERESTART) { |
| waited = B_TRUE; |
| dmu_tx_wait(tx); |
| dmu_tx_abort(tx); |
| goto top; |
| } |
| zfs_acl_ids_free(&acl_ids); |
| dmu_tx_abort(tx); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Create new node. |
| */ |
| zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); |
| |
| if (fuid_dirtied) |
| zfs_fuid_sync(zfsvfs, tx); |
| |
| /* |
| * Now put new name in parent dir. |
| */ |
| (void) zfs_link_create(dl, zp, tx, ZNEW); |
| |
| *vpp = ZTOV(zp); |
| |
| txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); |
| if (flags & FIGNORECASE) |
| txtype |= TX_CI; |
| zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, |
| acl_ids.z_fuidp, vap); |
| |
| zfs_acl_ids_free(&acl_ids); |
| |
| dmu_tx_commit(tx); |
| |
| zfs_dirent_unlock(dl); |
| |
| if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zilog, 0); |
| |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* |
| * Remove a directory subdir entry. If the current working |
| * directory is the same as the subdir to be removed, the |
| * remove will fail. |
| * |
| * IN: dvp - vnode of directory to remove from. |
| * name - name of directory to be removed. |
| * cwd - vnode of current working directory. |
| * cr - credentials of caller. |
| * ct - caller context |
| * flags - case flags |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * dvp - ctime|mtime updated |
| */ |
| /*ARGSUSED*/ |
| static int |
| zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, |
| caller_context_t *ct, int flags) |
| { |
| znode_t *dzp = VTOZ(dvp); |
| znode_t *zp; |
| vnode_t *vp; |
| zfsvfs_t *zfsvfs = dzp->z_zfsvfs; |
| zilog_t *zilog; |
| zfs_dirlock_t *dl; |
| dmu_tx_t *tx; |
| int error; |
| int zflg = ZEXISTS; |
| boolean_t waited = B_FALSE; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(dzp); |
| zilog = zfsvfs->z_log; |
| |
| if (flags & FIGNORECASE) |
| zflg |= ZCILOOK; |
| top: |
| zp = NULL; |
| |
| /* |
| * Attempt to lock directory; fail if entry doesn't exist. |
| */ |
| if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, |
| NULL, NULL)) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| vp = ZTOV(zp); |
| |
| if (error = zfs_zaccess_delete(dzp, zp, cr)) { |
| goto out; |
| } |
| |
| if (vp->v_type != VDIR) { |
| error = SET_ERROR(ENOTDIR); |
| goto out; |
| } |
| |
| if (vp == cwd) { |
| error = SET_ERROR(EINVAL); |
| goto out; |
| } |
| |
| vnevent_rmdir(vp, dvp, name, ct); |
| |
| /* |
| * Grab a lock on the directory to make sure that noone is |
| * trying to add (or lookup) entries while we are removing it. |
| */ |
| rw_enter(&zp->z_name_lock, RW_WRITER); |
| |
| /* |
| * Grab a lock on the parent pointer to make sure we play well |
| * with the treewalk and directory rename code. |
| */ |
| rw_enter(&zp->z_parent_lock, RW_WRITER); |
| |
| tx = dmu_tx_create(zfsvfs->z_os); |
| dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); |
| dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); |
| dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); |
| zfs_sa_upgrade_txholds(tx, zp); |
| zfs_sa_upgrade_txholds(tx, dzp); |
| dmu_tx_mark_netfree(tx); |
| error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); |
| if (error) { |
| rw_exit(&zp->z_parent_lock); |
| rw_exit(&zp->z_name_lock); |
| zfs_dirent_unlock(dl); |
| VN_RELE(vp); |
| if (error == ERESTART) { |
| waited = B_TRUE; |
| dmu_tx_wait(tx); |
| dmu_tx_abort(tx); |
| goto top; |
| } |
| dmu_tx_abort(tx); |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| error = zfs_link_destroy(dl, zp, tx, zflg, NULL); |
| |
| if (error == 0) { |
| uint64_t txtype = TX_RMDIR; |
| if (flags & FIGNORECASE) |
| txtype |= TX_CI; |
| zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT); |
| } |
| |
| dmu_tx_commit(tx); |
| |
| rw_exit(&zp->z_parent_lock); |
| rw_exit(&zp->z_name_lock); |
| out: |
| zfs_dirent_unlock(dl); |
| |
| VN_RELE(vp); |
| |
| if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) |
| zil_commit(zilog, 0); |
| |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * Read as many directory entries as will fit into the provided |
| * buffer from the given directory cursor position (specified in |
| * the uio structure). |
| * |
| * IN: vp - vnode of directory to read. |
| * uio - structure supplying read location, range info, |
| * and return buffer. |
| * cr - credentials of caller. |
| * ct - caller context |
| * flags - case flags |
| * |
| * OUT: uio - updated offset and range, buffer filled. |
| * eofp - set to true if end-of-file detected. |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * vp - atime updated |
| * |
| * Note that the low 4 bits of the cookie returned by zap is always zero. |
| * This allows us to use the low range for "special" directory entries: |
| * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, |
| * we use the offset 2 for the '.zfs' directory. |
| */ |
| /* ARGSUSED */ |
| static int |
| zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, |
| caller_context_t *ct, int flags) |
| { |
| znode_t *zp = VTOZ(vp); |
| iovec_t *iovp; |
| edirent_t *eodp; |
| dirent64_t *odp; |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| objset_t *os; |
| caddr_t outbuf; |
| size_t bufsize; |
| zap_cursor_t zc; |
| zap_attribute_t zap; |
| uint_t bytes_wanted; |
| uint64_t offset; /* must be unsigned; checks for < 1 */ |
| uint64_t parent; |
| int local_eof; |
| int outcount; |
| int error; |
| uint8_t prefetch; |
| boolean_t check_sysattrs; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), |
| &parent, sizeof (parent))) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * If we are not given an eof variable, |
| * use a local one. |
| */ |
| if (eofp == NULL) |
| eofp = &local_eof; |
| |
| /* |
| * Check for valid iov_len. |
| */ |
| if (uio->uio_iov->iov_len <= 0) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| /* |
| * Quit if directory has been removed (posix) |
| */ |
| if ((*eofp = zp->z_unlinked) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| error = 0; |
| os = zfsvfs->z_os; |
| offset = uio->uio_loffset; |
| prefetch = zp->z_zn_prefetch; |
| |
| /* |
| * Initialize the iterator cursor. |
| */ |
| if (offset <= 3) { |
| /* |
| * Start iteration from the beginning of the directory. |
| */ |
| zap_cursor_init(&zc, os, zp->z_id); |
| } else { |
| /* |
| * The offset is a serialized cursor. |
| */ |
| zap_cursor_init_serialized(&zc, os, zp->z_id, offset); |
| } |
| |
| /* |
| * Get space to change directory entries into fs independent format. |
| */ |
| iovp = uio->uio_iov; |
| bytes_wanted = iovp->iov_len; |
| if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { |
| bufsize = bytes_wanted; |
| outbuf = kmem_alloc(bufsize, KM_SLEEP); |
| odp = (struct dirent64 *)outbuf; |
| } else { |
| bufsize = bytes_wanted; |
| outbuf = NULL; |
| odp = (struct dirent64 *)iovp->iov_base; |
| } |
| eodp = (struct edirent *)odp; |
| |
| /* |
| * If this VFS supports the system attribute view interface; and |
| * we're looking at an extended attribute directory; and we care |
| * about normalization conflicts on this vfs; then we must check |
| * for normalization conflicts with the sysattr name space. |
| */ |
| check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && |
| (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && |
| (flags & V_RDDIR_ENTFLAGS); |
| |
| /* |
| * Transform to file-system independent format |
| */ |
| outcount = 0; |
| while (outcount < bytes_wanted) { |
| ino64_t objnum; |
| ushort_t reclen; |
| off64_t *next = NULL; |
| |
| /* |
| * Special case `.', `..', and `.zfs'. |
| */ |
| if (offset == 0) { |
| (void) strcpy(zap.za_name, "."); |
| zap.za_normalization_conflict = 0; |
| objnum = zp->z_id; |
| } else if (offset == 1) { |
| (void) strcpy(zap.za_name, ".."); |
| zap.za_normalization_conflict = 0; |
| objnum = parent; |
| } else if (offset == 2 && zfs_show_ctldir(zp)) { |
| (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); |
| zap.za_normalization_conflict = 0; |
| objnum = ZFSCTL_INO_ROOT; |
| } else { |
| /* |
| * Grab next entry. |
| */ |
| if (error = zap_cursor_retrieve(&zc, &zap)) { |
| if ((*eofp = (error == ENOENT)) != 0) |
| break; |
| else |
| goto update; |
| } |
| |
| if (zap.za_integer_length != 8 || |
| zap.za_num_integers != 1) { |
| cmn_err(CE_WARN, "zap_readdir: bad directory " |
| "entry, obj = %lld, offset = %lld\n", |
| (u_longlong_t)zp->z_id, |
| (u_longlong_t)offset); |
| error = SET_ERROR(ENXIO); |
| goto update; |
| } |
| |
| objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); |
| /* |
| * MacOS X can extract the object type here such as: |
| * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); |
| */ |
| |
| if (check_sysattrs && !zap.za_normalization_conflict) { |
| zap.za_normalization_conflict = |
| xattr_sysattr_casechk(zap.za_name); |
| } |
| } |
| |
| if (flags & V_RDDIR_ACCFILTER) { |
| /* |
| * If we have no access at all, don't include |
| * this entry in the returned information |
| */ |
| znode_t *ezp; |
| if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0) |
| goto skip_entry; |
| if (!zfs_has_access(ezp, cr)) { |
| VN_RELE(ZTOV(ezp)); |
| goto skip_entry; |
| } |
| VN_RELE(ZTOV(ezp)); |
| } |
| |
| if (flags & V_RDDIR_ENTFLAGS) |
| reclen = EDIRENT_RECLEN(strlen(zap.za_name)); |
| else |
| reclen = DIRENT64_RECLEN(strlen(zap.za_name)); |
| |
| /* |
| * Will this entry fit in the buffer? |
| */ |
| if (outcount + reclen > bufsize) { |
| /* |
| * Did we manage to fit anything in the buffer? |
| */ |
| if (!outcount) { |
| error = SET_ERROR(EINVAL); |
| goto update; |
| } |
| break; |
| } |
| if (flags & V_RDDIR_ENTFLAGS) { |
| /* |
| * Add extended flag entry: |
| */ |
| eodp->ed_ino = objnum; |
| eodp->ed_reclen = reclen; |
| /* NOTE: ed_off is the offset for the *next* entry */ |
| next = &(eodp->ed_off); |
| eodp->ed_eflags = zap.za_normalization_conflict ? |
| ED_CASE_CONFLICT : 0; |
| (void) strncpy(eodp->ed_name, zap.za_name, |
| EDIRENT_NAMELEN(reclen)); |
| eodp = (edirent_t *)((intptr_t)eodp + reclen); |
| } else { |
| /* |
| * Add normal entry: |
| */ |
| odp->d_ino = objnum; |
| odp->d_reclen = reclen; |
| /* NOTE: d_off is the offset for the *next* entry */ |
| next = &(odp->d_off); |
| (void) strncpy(odp->d_name, zap.za_name, |
| DIRENT64_NAMELEN(reclen)); |
| odp = (dirent64_t *)((intptr_t)odp + reclen); |
| } |
| outcount += reclen; |
| |
| ASSERT(outcount <= bufsize); |
| |
| /* Prefetch znode */ |
| if (prefetch) |
| dmu_prefetch(os, objnum, 0, 0, 0, |
| ZIO_PRIORITY_SYNC_READ); |
| |
| skip_entry: |
| /* |
| * Move to the next entry, fill in the previous offset. |
| */ |
| if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { |
| zap_cursor_advance(&zc); |
| offset = zap_cursor_serialize(&zc); |
| } else { |
| offset += 1; |
| } |
| if (next) |
| *next = offset; |
| } |
| zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ |
| |
| if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { |
| iovp->iov_base += outcount; |
| iovp->iov_len -= outcount; |
| uio->uio_resid -= outcount; |
| } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { |
| /* |
| * Reset the pointer. |
| */ |
| offset = uio->uio_loffset; |
| } |
| |
| update: |
| zap_cursor_fini(&zc); |
| if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) |
| kmem_free(outbuf, bufsize); |
| |
| if (error == ENOENT) |
| error = 0; |
| |
| ZFS_ACCESSTIME_STAMP(zfsvfs, zp); |
| |
| uio->uio_loffset = offset; |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| ulong_t zfs_fsync_sync_cnt = 4; |
| |
| static int |
| zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| |
| /* |
| * Regardless of whether this is required for standards conformance, |
| * this is the logical behavior when fsync() is called on a file with |
| * dirty pages. We use B_ASYNC since the ZIL transactions are already |
| * going to be pushed out as part of the zil_commit(). |
| */ |
| if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && |
| (vp->v_type == VREG) && !(IS_SWAPVP(vp))) |
| (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); |
| |
| (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); |
| |
| if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) { |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| zil_commit(zfsvfs->z_log, zp->z_id); |
| ZFS_EXIT(zfsvfs); |
| } |
| return (0); |
| } |
| |
| |
| /* |
| * Get the requested file attributes and place them in the provided |
| * vattr structure. |
| * |
| * IN: vp - vnode of file. |
| * vap - va_mask identifies requested attributes. |
| * If AT_XVATTR set, then optional attrs are requested |
| * flags - ATTR_NOACLCHECK (CIFS server context) |
| * cr - credentials of caller. |
| * ct - caller context |
| * |
| * OUT: vap - attribute values. |
| * |
| * RETURN: 0 (always succeeds). |
| */ |
| /* ARGSUSED */ |
| static int |
| zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, |
| caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| int error = 0; |
| uint64_t links; |
| uint64_t mtime[2], ctime[2]; |
| xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ |
| xoptattr_t *xoap = NULL; |
| boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; |
| sa_bulk_attr_t bulk[2]; |
| int count = 0; |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); |
| |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); |
| SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); |
| |
| if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| |
| /* |
| * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. |
| * Also, if we are the owner don't bother, since owner should |
| * always be allowed to read basic attributes of file. |
| */ |
| if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) && |
| (vap->va_uid != crgetuid(cr))) { |
| if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, |
| skipaclchk, cr)) { |
| ZFS_EXIT(zfsvfs); |
| return (error); |
| } |
| } |
| |
| /* |
| * Return all attributes. It's cheaper to provide the answer |
| * than to determine whether we were asked the question. |
| */ |
| |
| mutex_enter(&zp->z_lock); |
| vap->va_type = vp->v_type; |
| vap->va_mode = zp->z_mode & MODEMASK; |
| vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; |
| vap->va_nodeid = zp->z_id; |
| if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) |
| links = zp->z_links + 1; |
| else |
| links = zp->z_links; |
| vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ |
| vap->va_size = zp->z_size; |
| vap->va_rdev = vp->v_rdev; |
| vap->va_seq = zp->z_seq; |
| |
| /* |
| * Add in any requested optional attributes and the create time. |
| * Also set the corresponding bits in the returned attribute bitmap. |
| */ |
| if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { |
| if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { |
| xoap->xoa_archive = |
| ((zp->z_pflags & ZFS_ARCHIVE) != 0); |
| XVA_SET_RTN(xvap, XAT_ARCHIVE); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { |
| xoap->xoa_readonly = |
| ((zp->z_pflags & ZFS_READONLY) != 0); |
| XVA_SET_RTN(xvap, XAT_READONLY); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { |
| xoap->xoa_system = |
| ((zp->z_pflags & ZFS_SYSTEM) != 0); |
| XVA_SET_RTN(xvap, XAT_SYSTEM); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { |
| xoap->xoa_hidden = |
| ((zp->z_pflags & ZFS_HIDDEN) != 0); |
| XVA_SET_RTN(xvap, XAT_HIDDEN); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { |
| xoap->xoa_nounlink = |
| ((zp->z_pflags & ZFS_NOUNLINK) != 0); |
| XVA_SET_RTN(xvap, XAT_NOUNLINK); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { |
| xoap->xoa_immutable = |
| ((zp->z_pflags & ZFS_IMMUTABLE) != 0); |
| XVA_SET_RTN(xvap, XAT_IMMUTABLE); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { |
| xoap->xoa_appendonly = |
| ((zp->z_pflags & ZFS_APPENDONLY) != 0); |
| XVA_SET_RTN(xvap, XAT_APPENDONLY); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { |
| xoap->xoa_nodump = |
| ((zp->z_pflags & ZFS_NODUMP) != 0); |
| XVA_SET_RTN(xvap, XAT_NODUMP); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { |
| xoap->xoa_opaque = |
| ((zp->z_pflags & ZFS_OPAQUE) != 0); |
| XVA_SET_RTN(xvap, XAT_OPAQUE); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { |
| xoap->xoa_av_quarantined = |
| ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0); |
| XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { |
| xoap->xoa_av_modified = |
| ((zp->z_pflags & ZFS_AV_MODIFIED) != 0); |
| XVA_SET_RTN(xvap, XAT_AV_MODIFIED); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && |
| vp->v_type == VREG) { |
| zfs_sa_get_scanstamp(zp, xvap); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { |
| uint64_t times[2]; |
| |
| (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), |
| times, sizeof (times)); |
| ZFS_TIME_DECODE(&xoap->xoa_createtime, times); |
| XVA_SET_RTN(xvap, XAT_CREATETIME); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { |
| xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0); |
| XVA_SET_RTN(xvap, XAT_REPARSE); |
| } |
| if (XVA_ISSET_REQ(xvap, XAT_GEN)) { |
| xoap->xoa_generation = zp->z_gen; |
| XVA_SET_RTN(xvap, XAT_GEN); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) { |
| xoap->xoa_offline = |
| ((zp->z_pflags & ZFS_OFFLINE) != 0); |
| XVA_SET_RTN(xvap, XAT_OFFLINE); |
| } |
| |
| if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) { |
| xoap->xoa_sparse = |
| ((zp->z_pflags & ZFS_SPARSE) != 0); |
| XVA_SET_RTN(xvap, XAT_SPARSE); |
| } |
| } |
| |
| ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime); |
| ZFS_TIME_DECODE(&vap->va_mtime, mtime); |
| ZFS_TIME_DECODE(&vap->va_ctime, ctime); |
| |
| mutex_exit(&zp->z_lock); |
| |
| sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks); |
| |
| if (zp->z_blksz == 0) { |
| /* |
| * Block size hasn't been set; suggest maximal I/O transfers. |
| */ |
| vap->va_blksize = zfsvfs->z_max_blksz; |
| } |
| |
| ZFS_EXIT(zfsvfs); |
| return (0); |
| } |
| |
| /* |
| * Set the file attributes to the values contained in the |
| * vattr structure. |
| * |
| * IN: vp - vnode of file to be modified. |
| * vap - new attribute values. |
| * If AT_XVATTR set, then optional attrs are being set |
| * flags - ATTR_UTIME set if non-default time values provided. |
| * - ATTR_NOACLCHECK (CIFS context only). |
| * cr - credentials of caller. |
| * ct - caller context |
| * |
| * RETURN: 0 on success, error code on failure. |
| * |
| * Timestamps: |
| * vp - ctime updated, mtime updated if size changed. |
| */ |
| /* ARGSUSED */ |
| static int |
| zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, |
| caller_context_t *ct) |
| { |
| znode_t *zp = VTOZ(vp); |
| zfsvfs_t *zfsvfs = zp->z_zfsvfs; |
| zilog_t *zilog; |
| dmu_tx_t *tx; |
| vattr_t oldva; |
| xvattr_t tmpxvattr; |
| uint_t mask = vap->va_mask; |
| uint_t saved_mask = 0; |
| int trim_mask = 0; |
| uint64_t new_mode; |
| uint64_t new_uid, new_gid; |
| uint64_t xattr_obj; |
| uint64_t mtime[2], ctime[2]; |
| znode_t *attrzp; |
| int need_policy = FALSE; |
| int err, err2; |
| zfs_fuid_info_t *fuidp = NULL; |
| xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ |
| xoptattr_t *xoap; |
| zfs_acl_t *aclp; |
| boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; |
| boolean_t fuid_dirtied = B_FALSE; |
| sa_bulk_attr_t bulk[7], xattr_bulk[7]; |
| int count = 0, xattr_count = 0; |
| |
| if (mask == 0) |
| return (0); |
| |
| if (mask & AT_NOSET) |
| return (SET_ERROR(EINVAL)); |
| |
| ZFS_ENTER(zfsvfs); |
| ZFS_VERIFY_ZP(zp); |
| |
| zilog = zfsvfs->z_log; |
| |
| /* |
| * Make sure that if we have ephemeral uid/gid or xvattr specified |
| * that file system is at proper version level |
| */ |
| |
| if (zfsvfs->z_use_fuids == B_FALSE && |
| (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || |
| ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || |
| (mask & AT_XVATTR))) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| if (mask & AT_SIZE && vp->v_type == VDIR) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EISDIR)); |
| } |
| |
| if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EINVAL)); |
| } |
| |
| /* |
| * If this is an xvattr_t, then get a pointer to the structure of |
| * optional attributes. If this is NULL, then we have a vattr_t. |
| */ |
| xoap = xva_getxoptattr(xvap); |
| |
| xva_init(&tmpxvattr); |
| |
| /* |
| * Immutable files can only alter immutable bit and atime |
| */ |
| if ((zp->z_pflags & ZFS_IMMUTABLE) && |
| ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || |
| ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EPERM)); |
| } |
| |
| /* |
| * Note: ZFS_READONLY is handled in zfs_zaccess_common. |
| */ |
| |
| /* |
| * Verify timestamps doesn't overflow 32 bits. |
| * ZFS can handle large timestamps, but 32bit syscalls can't |
| * handle times greater than 2039. This check should be removed |
| * once large timestamps are fully supported. |
| */ |
| if (mask & (AT_ATIME | AT_MTIME)) { |
| if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || |
| ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EOVERFLOW)); |
| } |
| } |
| |
| top: |
| attrzp = NULL; |
| aclp = NULL; |
| |
| /* Can this be moved to before the top label? */ |
| if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { |
| ZFS_EXIT(zfsvfs); |
| return (SET_ERROR(EROFS)); |
| } |
| |