介绍
本文讲述一个传输组的同步过程。从txg_sync_thread
函数直到dbuf_sync_indirect
和dbuf_sync_leaf
函数层层调用( dbuf_sync_indirect
和dbuf_sync_leaf
作为数据集的缓存存在)。dbuf_sync_indirect
中,间接块I/O依赖于其子间接块I/O的调度更新,但是在同一等级(level)的间接块又是相互独立的。叶子节点数据块的写相对其他叶子节点数据节点也是相互独立的。分析dbuf_sync_{leaf, indirect}
,我们可以知道,最后缓存刷盘的本质是处理一个脏数据记录的链表。那么脏数据记录又是怎么跟ZFS对象,也就是dnode,对应起来呢?:blush:在下一篇文章里我们会介绍VFS的写操作是怎么最后在ZFS中生成脏数据记录的。
正常的写ZIO在ZIO流水线中被异步分发,流水线等待其所有的独立子IO结束。0级的数据块会被并发处理。
正文
接下来介绍对于任意存储池IO,从txg_sync_start
到 zio_wait
(或zio_nowait
) 的函数调用的流程。对于代码,我们只摘取其中核心的部分,使用(...)来省略其他代码,增加本文中代码的可读性。
一个存储池在创建和导入的时候,txg_sync_start
函数会被调用,创建txg_sync_thread
线程。
void
txg_sync_start(dsl_pool_t *dp)
{
...
tx->tx_sync_thread = thread_create(NULL, 32 << 10, txg_sync_thread, dp, 0, &p0, TS_RUN, minclsyspri);
...
}
存储池运行期间会不停地在txg
状态之间切换。在进入syncing
状态的时候,就会调用spa_sync
。而spa_sync
调用完毕后,就会唤醒所有等待在tx_sync_done_cv
上的线程。
static void
txg_sync_thread(void *arg)
{
dsl_pool_t *dp = arg;
spa_t *spa = dp->dp_spa;
...
for (;;) {
...
txg = tx->tx_quiesced_txg;
...
spa_sync(spa, txg);
...
cv_broadcast(&tx->tx_sync_done_cv);
...
}
}
spa_sync
会调用dsl_pool_sync
,直到没有新的脏数据需要被更新。
void
spa_sync(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
objset_t *mos = spa->spa_meta_objset;
...
do {
...
dsl_pool_sync(dp, txg);
} while (dmu_objset_is_dirty(mos, txg));
}
dsl_pool_sync
遍历存储池内所有脏数据集,调用dsl_dataset_sync
两次。第一次将所有脏数据块下盘。第二次则为所有的用户空间改变下盘。这两个遍历操作都会以一个同步ZIO
的形式创建到本存储池的根ZIO下。(同步的体现形式为调用了ZIO_WAIT
)。
void
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
{
...
dsl_dataset_t *ds;
objset_t *mos = dp->dp_meta_objset;
...
tx = dmu_tx_create_assigned(dp, txg);
/*
* Write out all dirty blocks of dirty datasets.
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
/*
* We must not sync any non-MOS datasets twice,
* because we may have taken a snapshot of them.
* However, we may sync newly-created datasets on
* pass 2.
*/
ASSERT(!list_link_active(&ds->ds_synced_link));
list_insert_tail(&synced_datasets, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
...
/*
* After the data blocks have been written (ensured by the zio_wait()
* above), update the user/group space accounting.
*/
for (ds = list_head(&synced_datasets); ds != NULL;
ds = list_next(&synced_datasets, ds)) {
dmu_objset_do_userquota_updates(ds->ds_objset, tx);
}
/*
* Sync the datasets again to push out the changes due to
* userspace updates. This must be done before we process the
* sync tasks, so that any snapshots will have the correct
* user accounting information (and we won't get confused
* about which blocks are part of the snapshot).
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
ASSERT(list_link_active(&ds->ds_synced_link));
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
...
}
dsl_dataset_sync
传递数据集(dataset)的对象集合(objset)给dmu_objset_sync
函数进行数据集同步。
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
...
dmu_objset_sync(ds->ds_objset, zio, tx);
}
dmu_objset_sync
调用dmu_objset_sync_dnodes
将对象集合(objectset)下的脏dnode链表和被释放dnode链表中的dnode下盘。需要注意的是,对于特殊的元数据对象(special metadata dnodes),需要先行同步,调用dnode_sync
即可。
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
...
list_t *newlist = NULL;
dbuf_dirty_record_t *dr;
...
/*
* Create the root block IO
*/
...
zio = arc_write(pio, os->os_spa, tx->tx_txg,
os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready,
NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED, &zb);
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
dnode_sync(DMU_META_DNODE(os), tx);
...
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
...
txgoff = tx->tx_txg & TXG_MASK;
...
if (dmu_objset_userused_enabled(os)) {
newlist = &os->os_synced_dnodes;
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg.
*/
list_create(newlist, sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
while (dr = list_head(list)) {
ASSERT0(dr->dr_dbuf->db_level);
list_remove(list, dr);
if (dr->dr_zio)
zio_nowait(dr->dr_zio);
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
dmu_objset_sync_dnodes
对于链表内的置脏对象,会调用dnode_sync
,将dnode下盘,把他们加入到newlist (如果,非空)中。(根据入参可以判断,已经加入到os->os_synced_dnodes)。
static void
dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
{
dnode_t *dn;
while (dn = list_head(list)) {
...
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it ouside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
list_remove(list, dn);
if (newlist) {
(void) dnode_add_ref(dn, newlist);
list_insert_tail(newlist, dn);
}
dnode_sync(dn, tx);
}
}
dnode_sync
将置脏的缓冲记录传递给dbuf_sync_list
。
void
dnode_sync(dnode_t *dn, dmu_tx_t *tx)
{
...
list_t *list = &dn->dn_dirty_records[txgoff];
...
dbuf_sync_list(list, tx);
}
dbuf_sync_list
函数遍历访问脏缓冲记录链表中的每个元素,根据缓冲数据的类型,调用 dbuf_sync_leaf
和dbuf_sync_indirect
。
void
dbuf_sync_list(list_t *list, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while (dr = list_head(list)) {
<...>
list_remove(list, dr);
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
ZFS是COW的文件系统,对于每个块都不例外。因此每次数据块更新后,指向该数据块的间接块也会被更新。因此在修改一个文件内的数据块的时候,必须从盘中读取这些间接数据。修改间接块意味着它指向的数据块有脏数据。在给间接快的所有孩子节点下发ZIO之后,本间接块的ZIO被下发。
static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
...
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
..
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
/* doesn't actually execute a write - it just creates
* dr->dr_zio which is executed by zio_nowait before
* returning
*/
dbuf_write(dr, db->db_buf, tx);
zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
dbuf_sync_leaf
为脏缓冲数据记录创建ZIO,异步分发之。
static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
...
/* doesn't actually execute a write - it just creates
* dr->dr_zio which is executed by zio_nowait before
* returning
*/
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
DB_DNODE_EXIT(db);
} else {
/*
* Although zio_nowait() does not "wait for an IO", it does
* initiate the IO. If this is an empty write it seems plausible
* that the IO could actually be completed before the nowait
* returns. We need to DB_DNODE_EXIT() first in case
* zio_nowait() invalidates the dbuf.
*/
DB_DNODE_EXIT(db);
zio_nowait(dr->dr_zio);
}
}