blob: e2eb4025a69f59acca9faef6d5c5e5cf7150b66e [file] [log] [blame]
ahrensfa9e4062005-10-31 11:33:35 -08001/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
bonwickd80c45e2006-04-13 16:15:06 -07005 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
ahrensfa9e4062005-10-31 11:33:35 -08007 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
Mark J Musante55da60b2010-05-03 09:07:08 -060022 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
Prakash Surya1271e4b2017-07-06 23:19:18 -070023 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
Matthew Ahrensc3d26ab2016-02-07 11:06:19 -080024 * Copyright (c) 2014 Integros [integros.com]
ahrensfa9e4062005-10-31 11:33:35 -080025 */
26
Mark J Musante55da60b2010-05-03 09:07:08 -060027/* Portions Copyright 2010 Robert Milkowski */
28
ahrensfa9e4062005-10-31 11:33:35 -080029#ifndef _SYS_ZIL_IMPL_H
30#define _SYS_ZIL_IMPL_H
31
ahrensfa9e4062005-10-31 11:33:35 -080032#include <sys/zil.h>
33#include <sys/dmu_objset.h>
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
ahrensfa9e4062005-10-31 11:33:35 -080039/*
Prakash Surya94ddd092017-12-07 11:26:32 -080040 * Possbile states for a given lwb structure.
41 *
42 * An lwb will start out in the "closed" state, and then transition to
43 * the "opened" state via a call to zil_lwb_write_open(). When
44 * transitioning from "closed" to "opened" the zilog's "zl_issuer_lock"
45 * must be held.
46 *
47 * After the lwb is "opened", it can transition into the "issued" state
48 * via zil_lwb_write_issue(). Again, the zilog's "zl_issuer_lock" must
49 * be held when making this transition.
50 *
Prakash Suryacab3a552018-10-23 14:14:27 -070051 * After the lwb's write zio completes, it transitions into the "write
52 * done" state via zil_lwb_write_done(); and then into the "flush done"
53 * state via zil_lwb_flush_vdevs_done(). When transitioning from
54 * "issued" to "write done", and then from "write done" to "flush done",
55 * the zilog's "zl_lock" must be held, *not* the "zl_issuer_lock".
Prakash Surya94ddd092017-12-07 11:26:32 -080056 *
57 * The zilog's "zl_issuer_lock" can become heavily contended in certain
58 * workloads, so we specifically avoid acquiring that lock when
59 * transitioning an lwb from "issued" to "done". This allows us to avoid
60 * having to acquire the "zl_issuer_lock" for each lwb ZIO completion,
61 * which would have added more lock contention on an already heavily
62 * contended lock.
63 *
64 * Additionally, correctness when reading an lwb's state is often
65 * acheived by exploiting the fact that these state transitions occur in
66 * this specific order; i.e. "closed" to "opened" to "issued" to "done".
67 *
68 * Thus, if an lwb is in the "closed" or "opened" state, holding the
69 * "zl_issuer_lock" will prevent a concurrent thread from transitioning
70 * that lwb to the "issued" state. Likewise, if an lwb is already in the
71 * "issued" state, holding the "zl_lock" will prevent a concurrent
Prakash Suryacab3a552018-10-23 14:14:27 -070072 * thread from transitioning that lwb to the "write done" state.
Prakash Surya1271e4b2017-07-06 23:19:18 -070073 */
74typedef enum {
75 LWB_STATE_CLOSED,
76 LWB_STATE_OPENED,
77 LWB_STATE_ISSUED,
Prakash Suryacab3a552018-10-23 14:14:27 -070078 LWB_STATE_WRITE_DONE,
79 LWB_STATE_FLUSH_DONE,
Prakash Surya1271e4b2017-07-06 23:19:18 -070080 LWB_NUM_STATES
81} lwb_state_t;
82
83/*
84 * Log write block (lwb)
85 *
86 * Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
Prakash Suryacf07d3d2017-09-01 11:04:26 -070087 * will be protected by the zilog's "zl_issuer_lock". Basically, prior
Prakash Surya1271e4b2017-07-06 23:19:18 -070088 * to it being issued, it will only be accessed by the thread that's
Prakash Suryacf07d3d2017-09-01 11:04:26 -070089 * holding the "zl_issuer_lock". After the lwb is issued, the zilog's
Prakash Surya1271e4b2017-07-06 23:19:18 -070090 * "zl_lock" is used to protect the lwb against concurrent access.
ahrensfa9e4062005-10-31 11:33:35 -080091 */
92typedef struct lwb {
93 zilog_t *lwb_zilog; /* back pointer to log struct */
94 blkptr_t lwb_blk; /* on disk address of this log blk */
Alexander Motinc5ee4682016-11-03 15:52:28 +020095 boolean_t lwb_slog; /* lwb_blk is on SLOG device */
ahrensfa9e4062005-10-31 11:33:35 -080096 int lwb_nused; /* # used bytes in buffer */
97 int lwb_sz; /* size of block and buffer */
Prakash Surya1271e4b2017-07-06 23:19:18 -070098 lwb_state_t lwb_state; /* the state of this lwb */
ahrensfa9e4062005-10-31 11:33:35 -080099 char *lwb_buf; /* log write buffer */
Prakash Surya1271e4b2017-07-06 23:19:18 -0700100 zio_t *lwb_write_zio; /* zio for the lwb buffer */
101 zio_t *lwb_root_zio; /* root zio for lwb write and flushes */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800102 dmu_tx_t *lwb_tx; /* tx for log block allocation */
ahrensfa9e4062005-10-31 11:33:35 -0800103 uint64_t lwb_max_txg; /* highest txg in this lwb */
ahrensfa9e4062005-10-31 11:33:35 -0800104 list_node_t lwb_node; /* zilog->zl_lwb_list linkage */
Prakash Surya1271e4b2017-07-06 23:19:18 -0700105 list_t lwb_waiters; /* list of zil_commit_waiter's */
106 avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
107 kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
108 hrtime_t lwb_issued_timestamp; /* when was the lwb issued? */
ahrensfa9e4062005-10-31 11:33:35 -0800109} lwb_t;
110
111/*
Prakash Surya1271e4b2017-07-06 23:19:18 -0700112 * ZIL commit waiter.
113 *
114 * This structure is allocated each time zil_commit() is called, and is
115 * used by zil_commit() to communicate with other parts of the ZIL, such
116 * that zil_commit() can know when it safe for it return. For more
117 * details, see the comment above zil_commit().
118 *
119 * The "zcw_lock" field is used to protect the commit waiter against
120 * concurrent access. This lock is often acquired while already holding
Prakash Suryacf07d3d2017-09-01 11:04:26 -0700121 * the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
Prakash Surya1271e4b2017-07-06 23:19:18 -0700122 * zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
123 * of this. Thus, one must be careful not to acquire the
Prakash Suryacf07d3d2017-09-01 11:04:26 -0700124 * "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
Prakash Surya1271e4b2017-07-06 23:19:18 -0700125 * e.g. see the zil_commit_waiter_timeout() function.
126 */
127typedef struct zil_commit_waiter {
128 kcondvar_t zcw_cv; /* signalled when "done" */
129 kmutex_t zcw_lock; /* protects fields of this struct */
130 list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
131 lwb_t *zcw_lwb; /* back pointer to lwb when linked */
132 boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
133 int zcw_zio_error; /* contains the zio io_error value */
134} zil_commit_waiter_t;
135
136/*
Neil Perrin50025582010-06-25 15:50:31 -0600137 * Intent log transaction lists
138 */
139typedef struct itxs {
140 list_t i_sync_list; /* list of synchronous itxs */
141 avl_tree_t i_async_tree; /* tree of foids for async itxs */
142} itxs_t;
143
144typedef struct itxg {
145 kmutex_t itxg_lock; /* lock for this structure */
146 uint64_t itxg_txg; /* txg for this chain */
Neil Perrin50025582010-06-25 15:50:31 -0600147 itxs_t *itxg_itxs; /* sync and async itxs */
148} itxg_t;
149
150/* for async nodes we build up an AVL tree of lists of async itxs per file */
151typedef struct itx_async_node {
152 uint64_t ia_foid; /* file object id */
153 list_t ia_list; /* list of async itxs for this foid */
154 avl_node_t ia_node; /* AVL tree linkage */
155} itx_async_node_t;
156
157/*
bonwick17f17c22007-12-13 16:31:22 -0800158 * Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs
159 * we've touched so we know which ones need a write cache flush at the end.
ahrensfa9e4062005-10-31 11:33:35 -0800160 */
bonwick17f17c22007-12-13 16:31:22 -0800161typedef struct zil_vdev_node {
162 uint64_t zv_vdev; /* vdev to be flushed */
163 avl_node_t zv_node; /* AVL tree linkage */
164} zil_vdev_node_t;
ahrensfa9e4062005-10-31 11:33:35 -0800165
Neil Perrin6e1f5ca2010-02-17 15:13:37 -0700166#define ZIL_PREV_BLKS 16
167
ahrensfa9e4062005-10-31 11:33:35 -0800168/*
169 * Stable storage intent log management structure. One per dataset.
170 */
171struct zilog {
172 kmutex_t zl_lock; /* protects most zilog_t fields */
173 struct dsl_pool *zl_dmu_pool; /* DSL pool */
174 spa_t *zl_spa; /* handle for read/write log */
bonwickd80c45e2006-04-13 16:15:06 -0700175 const zil_header_t *zl_header; /* log header buffer */
ahrensfa9e4062005-10-31 11:33:35 -0800176 objset_t *zl_os; /* object set we're logging */
177 zil_get_data_t *zl_get_data; /* callback to get object content */
Prakash Surya1271e4b2017-07-06 23:19:18 -0700178 lwb_t *zl_last_lwb_opened; /* most recent lwb opened */
179 hrtime_t zl_last_lwb_latency; /* zio latency of last lwb done */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800180 uint64_t zl_lr_seq; /* on-disk log record sequence number */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800181 uint64_t zl_commit_lr_seq; /* last committed on-disk lr seq */
ahrensfa9e4062005-10-31 11:33:35 -0800182 uint64_t zl_destroy_txg; /* txg of last zil_destroy() */
Neil Perrin1209a472008-11-25 13:18:25 -0700183 uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
184 uint64_t zl_replaying_seq; /* current replay seq number */
ahrensfa9e4062005-10-31 11:33:35 -0800185 uint32_t zl_suspend; /* log suspend count */
bonwickd80c45e2006-04-13 16:15:06 -0700186 kcondvar_t zl_cv_suspend; /* log suspend completion */
187 uint8_t zl_suspending; /* log is currently suspending */
188 uint8_t zl_keep_first; /* keep first log block in destroy */
Neil Perrin1209a472008-11-25 13:18:25 -0700189 uint8_t zl_replay; /* replaying records while set */
ahrensfa9e4062005-10-31 11:33:35 -0800190 uint8_t zl_stop_sync; /* for debugging */
Prakash Suryacf07d3d2017-09-01 11:04:26 -0700191 kmutex_t zl_issuer_lock; /* single writer, per ZIL, at a time */
Neil Perrine09fa4d2009-08-14 11:18:12 -0600192 uint8_t zl_logbias; /* latency or throughput */
Mark J Musante55da60b2010-05-03 09:07:08 -0600193 uint8_t zl_sync; /* synchronous or asynchronous */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800194 int zl_parse_error; /* last zil_parse() error */
195 uint64_t zl_parse_blk_seq; /* highest blk seq on last parse */
196 uint64_t zl_parse_lr_seq; /* highest lr seq on last parse */
197 uint64_t zl_parse_blk_count; /* number of blocks parsed */
198 uint64_t zl_parse_lr_count; /* number of log records parsed */
Neil Perrin50025582010-06-25 15:50:31 -0600199 itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */
200 list_t zl_itx_commit_list; /* itx list to be committed */
perrin22ac5be2005-12-16 16:22:45 -0800201 uint64_t zl_cur_used; /* current commit log size used */
ahrensfa9e4062005-10-31 11:33:35 -0800202 list_t zl_lwb_list; /* in-flight log write list */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800203 avl_tree_t zl_bp_tree; /* track bps during log parse */
perrin67bd71c2006-11-04 07:59:19 -0800204 clock_t zl_replay_time; /* lbolt of when replay started */
205 uint64_t zl_replay_blks; /* number of log blocks replayed */
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800206 zil_header_t zl_old_header; /* debugging aid */
Neil Perrin6e1f5ca2010-02-17 15:13:37 -0700207 uint_t zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */
208 uint_t zl_prev_rotor; /* rotor for zl_prev[] */
Matthew Ahrensce636f82012-08-21 04:36:41 -0700209 txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
Prakash Surya1271e4b2017-07-06 23:19:18 -0700210 uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
ahrensfa9e4062005-10-31 11:33:35 -0800211};
212
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800213typedef struct zil_bp_node {
ahrensfa9e4062005-10-31 11:33:35 -0800214 dva_t zn_dva;
215 avl_node_t zn_node;
Jeff Bonwickb24ab672009-11-01 14:14:46 -0800216} zil_bp_node_t;
ahrensfa9e4062005-10-31 11:33:35 -0800217
Alexander Motinc5ee4682016-11-03 15:52:28 +0200218/*
219 * Maximum amount of write data that can be put into single log block.
220 */
Matthew Ahrensb5152582014-11-07 08:30:07 -0800221#define ZIL_MAX_LOG_DATA (SPA_OLD_MAXBLOCKSIZE - sizeof (zil_chain_t) - \
Neil Perrin510b6c02009-04-20 16:11:21 -0600222 sizeof (lr_write_t))
223
Alexander Motinc5ee4682016-11-03 15:52:28 +0200224/*
225 * Maximum amount of log space we agree to waste to reduce number of
226 * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
227 */
228#define ZIL_MAX_WASTE_SPACE (ZIL_MAX_LOG_DATA / 8)
229
230/*
231 * Maximum amount of write data for WR_COPIED. Fall back to WR_NEED_COPY
232 * as more space efficient if we can't fit at least two log records into
233 * maximum sized log block.
234 */
235#define ZIL_MAX_COPIED_DATA ((SPA_OLD_MAXBLOCKSIZE - \
236 sizeof (zil_chain_t)) / 2 - sizeof (lr_write_t))
237
ahrensfa9e4062005-10-31 11:33:35 -0800238#ifdef __cplusplus
239}
240#endif
241
242#endif /* _SYS_ZIL_IMPL_H */