Home
last modified time | relevance | path

Searched refs:i_gl (Results 1 – 21 of 21) sorted by relevance

/linux/fs/gfs2/
A Dsuper.c132 struct gfs2_glock *j_gl = ip->i_gl; in gfs2_make_fs_rw()
501 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dirty_inode()
1076 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages()
1302 glock_clear_object(ip->i_gl, ip); in evict_unlinked_inode()
1329 gfs2_ail_flush(ip->i_gl, 0); in evict_linked_inode()
1392 glock_clear_object(ip->i_gl, ip); in gfs2_evict_inode()
1417 if (ip->i_gl) { in gfs2_evict_inode()
1418 glock_clear_object(ip->i_gl, ip); in gfs2_evict_inode()
1420 gfs2_glock_add_to_lru(ip->i_gl); in gfs2_evict_inode()
1422 ip->i_gl = NULL; in gfs2_evict_inode()
[all …]
A Dinode.c178 glock_set_object(ip->i_gl, ip); in gfs2_inode_lookup()
184 glock_clear_object(ip->i_gl, ip); in gfs2_inode_lookup()
442 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_init_xattr()
469 gfs2_trans_add_meta(ip->i_gl, dibh); in init_dinode()
738 glock_set_object(ip->i_gl, ip); in gfs2_create_inode()
786 glock_clear_object(ip->i_gl, ip); in gfs2_create_inode()
792 if (ip->i_gl) { in gfs2_create_inode()
794 gfs2_glock_put(ip->i_gl); in gfs2_create_inode()
866 gl = GFS2_I(inode)->i_gl; in __gfs2_lookup()
1003 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_link()
[all …]
A Dxattr.c278 gfs2_trans_add_meta(ip->i_gl, bh); in ea_dealloc_unstuffed()
504 gfs2_trans_add_meta(ip->i_gl, bh[x]); in gfs2_iter_unstuffed()
646 *bhp = gfs2_meta_new(ip->i_gl, block); in ea_alloc_blk()
647 gfs2_trans_add_meta(ip->i_gl, *bhp); in ea_alloc_blk()
708 bh = gfs2_meta_new(ip->i_gl, block); in ea_write()
709 gfs2_trans_add_meta(ip->i_gl, bh); in ea_write()
881 gfs2_trans_add_meta(ip->i_gl, bh); in ea_set_simple_noalloc()
1001 gfs2_trans_add_meta(ip->i_gl, indbh); in ea_set_block()
1332 gfs2_trans_add_meta(ip->i_gl, indbh); in ea_dealloc_indirect()
1366 gfs2_trans_add_meta(ip->i_gl, dibh); in ea_dealloc_indirect()
[all …]
A Dutil.c60 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | in check_journal_clean()
125 struct gfs2_glock *i_gl; in signal_our_withdraw() local
137 i_gl = ip->i_gl; in signal_our_withdraw()
191 wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, in signal_our_withdraw()
212 if (i_gl->gl_ops->go_free) { in signal_our_withdraw()
213 set_bit(GLF_FREEING, &i_gl->gl_flags); in signal_our_withdraw()
214 wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); in signal_our_withdraw()
457 gfs2_dump_glock(NULL, ip->i_gl, 1); in gfs2_consist_inode_i()
A Dacl.c70 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_get_acl()
71 int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_get_acl()
128 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_set_acl()
129 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_set_acl()
A Dfile.c168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fileattr_get()
244 gfs2_log_flush(sdp, ip->i_gl, in do_gfs2_set_flags()
263 gfs2_trans_add_meta(ip->i_gl, bh); in do_gfs2_set_flags()
431 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_page_mkwrite()
456 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_page_mkwrite()
557 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fault()
763 gfs2_ail_flush(ip->i_gl, 1); in gfs2_fsync()
831 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_read()
895 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_write()
972 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_file_read_iter()
[all …]
A Ddir.c94 bh = gfs2_meta_new(ip->i_gl, block); in gfs2_dir_get_new_buffer()
95 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_get_new_buffer()
129 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_stuffed()
210 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_write_data()
232 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_data()
678 gfs2_trans_add_meta(dip->i_gl, bh); in dirent_del()
717 gfs2_trans_add_meta(ip->i_gl, bh); in do_init_dirent()
874 bh = gfs2_meta_new(ip->i_gl, bn); in new_leaf()
879 gfs2_trans_add_meta(ip->i_gl, bh); in new_leaf()
1483 struct gfs2_glock *gl = ip->i_gl; in gfs2_dir_readahead()
[all …]
A Ddentry.c63 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); in gfs2_drevalidate()
65 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_drevalidate()
A Dbmap.c86 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_unstuffer_page()
132 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_unstuff_inode()
682 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_iomap_alloc()
754 gfs2_indirect_init(mp, ip->i_gl, i, in __gfs2_iomap_alloc()
1373 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_start()
1545 gfs2_trans_add_meta(ip->i_gl, bh); in sweep_bh_for_rgrps()
1580 gfs2_trans_add_meta(ip->i_gl, dibh); in sweep_bh_for_rgrps()
1946 gfs2_trans_add_meta(ip->i_gl, dibh); in punch_hole()
1991 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_end()
2098 gfs2_trans_add_meta(ip->i_gl, dibh); in do_grow()
[all …]
A Daops.c57 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_page_add_databufs()
96 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) in gfs2_writepage()
183 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) in gfs2_jdata_writepage()
418 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_jdata_writepages()
638 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_bmap()
A Dlops.c796 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements()
851 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan()
857 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan()
1020 struct gfs2_glock *gl = ip->i_gl; in databuf_lo_scan_elements()
1071 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
1078 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
A Dquota.c388 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); in bh_get()
660 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
743 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
908 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
967 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
1024 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1384 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1697 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
A Drecovery.c36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block()
353 gfs2_inode_metasync(ip->i_gl); in update_statfs_inode()
447 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_recover_func()
A Dtrace_gfs2.h455 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
491 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
523 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
A Dmeta_io.c449 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); in gfs2_journal_wipe()
484 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_buffer()
A Dexport.c112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_get_name()
A Dops_fstype.c580 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); in gfs2_jindex_hold()
694 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, in init_statfs()
791 sdp->sd_jinode_gl = ip->i_gl; in init_journal()
792 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in init_journal()
964 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, in init_per_node()
A Dincore.h390 struct gfs2_glock *i_gl; member
A Dglock.c740 if (gl == m_ip->i_gl) in is_system_glock()
998 inode_gl = ip->i_gl; in gfs2_try_evict()
2240 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate()
A Dglops.c604 struct gfs2_glock *j_gl = ip->i_gl; in freeze_go_xmote_bh()
A Drgrp.c1035 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update()
2458 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_alloc_blocks()

Completed in 53 milliseconds