Lines Matching refs:mp
46 static int xfs_icwalk(struct xfs_mount *mp,
71 struct xfs_mount *mp, in xfs_inode_alloc() argument
82 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
91 XFS_STATS_INC(mp, vn_active); in xfs_inode_alloc()
97 ip->i_mount = mp; in xfs_inode_alloc()
104 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
187 struct xfs_mount *mp) in xfs_reclaim_work_queue() argument
191 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_work_queue()
192 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
206 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue() local
208 if (!xfs_is_blockgc_enabled(mp)) in xfs_blockgc_queue()
226 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag() local
241 spin_lock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
242 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_set_inode_tag()
243 spin_unlock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
248 xfs_reclaim_work_queue(mp); in xfs_perag_set_inode_tag()
255 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); in xfs_perag_set_inode_tag()
265 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_inode_tag() local
285 spin_lock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
286 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_clear_inode_tag()
287 spin_unlock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
289 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); in xfs_perag_clear_inode_tag()
302 struct xfs_mount *mp, in xfs_reinit_inode() argument
314 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
335 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle() local
353 error = xfs_reinit_inode(mp, inode); in xfs_iget_recycle()
380 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
432 struct xfs_mount *mp) in xfs_inodegc_queue_all() argument
438 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all()
440 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work); in xfs_inodegc_queue_all()
456 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit() local
535 XFS_STATS_INC(mp, xs_ig_found); in xfs_iget_cache_hit()
541 XFS_STATS_INC(mp, xs_ig_frecycle); in xfs_iget_cache_hit()
555 if (xfs_is_inodegc_enabled(mp)) in xfs_iget_cache_hit()
556 xfs_inodegc_queue_all(mp); in xfs_iget_cache_hit()
562 struct xfs_mount *mp, in xfs_iget_cache_miss() argument
572 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget_cache_miss()
575 ip = xfs_inode_alloc(mp, ino); in xfs_iget_cache_miss()
579 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); in xfs_iget_cache_miss()
593 if (xfs_has_v3inodes(mp) && in xfs_iget_cache_miss()
594 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { in xfs_iget_cache_miss()
599 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
665 XFS_STATS_INC(mp, xs_ig_dup); in xfs_iget_cache_miss()
700 struct xfs_mount *mp, in xfs_iget() argument
715 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) in xfs_iget()
718 XFS_STATS_INC(mp, xs_ig_attempts); in xfs_iget()
721 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); in xfs_iget()
722 agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget()
739 XFS_STATS_INC(mp, xs_ig_missed); in xfs_iget()
741 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, in xfs_iget()
788 struct xfs_mount *mp, in xfs_icache_inode_is_allocated() argument
796 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); in xfs_icache_inode_is_allocated()
948 struct xfs_mount *mp) in xfs_want_reclaim_sick() argument
950 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) || in xfs_want_reclaim_sick()
951 xfs_is_shutdown(mp); in xfs_want_reclaim_sick()
956 struct xfs_mount *mp) in xfs_reclaim_inodes() argument
962 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes()
965 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_inodes()
966 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
967 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes()
980 struct xfs_mount *mp, in xfs_reclaim_inodes_nr() argument
988 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes_nr()
992 xfs_reclaim_work_queue(mp); in xfs_reclaim_inodes_nr()
993 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
995 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes_nr()
1005 struct xfs_mount *mp) in xfs_reclaim_inodes_count() argument
1011 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { in xfs_reclaim_inodes_count()
1103 struct xfs_mount *mp = container_of(to_delayed_work(work), in xfs_reclaim_worker() local
1106 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); in xfs_reclaim_worker()
1107 xfs_reclaim_work_queue(mp); in xfs_reclaim_worker()
1158 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag() local
1173 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1176 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1196 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag() local
1210 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1213 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1339 struct xfs_mount *mp) in xfs_blockgc_stop() argument
1344 if (!xfs_clear_blockgc_enabled(mp)) in xfs_blockgc_stop()
1347 for_each_perag(mp, agno, pag) in xfs_blockgc_stop()
1349 trace_xfs_blockgc_stop(mp, __return_address); in xfs_blockgc_stop()
1355 struct xfs_mount *mp) in xfs_blockgc_start() argument
1360 if (xfs_set_blockgc_enabled(mp)) in xfs_blockgc_start()
1363 trace_xfs_blockgc_start(mp, __return_address); in xfs_blockgc_start()
1364 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_start()
1440 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_worker() local
1443 trace_xfs_blockgc_worker(mp, __return_address); in xfs_blockgc_worker()
1447 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", in xfs_blockgc_worker()
1458 struct xfs_mount *mp, in xfs_blockgc_free_space() argument
1463 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_); in xfs_blockgc_free_space()
1465 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw); in xfs_blockgc_free_space()
1469 xfs_inodegc_flush(mp); in xfs_blockgc_free_space()
1479 struct xfs_mount *mp) in xfs_blockgc_flush_all() argument
1484 trace_xfs_blockgc_flush_all(mp, __return_address); in xfs_blockgc_flush_all()
1491 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1495 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1498 xfs_inodegc_flush(mp); in xfs_blockgc_flush_all()
1513 struct xfs_mount *mp, in xfs_blockgc_free_dquots() argument
1531 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { in xfs_blockgc_free_dquots()
1532 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); in xfs_blockgc_free_dquots()
1537 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { in xfs_blockgc_free_dquots()
1538 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); in xfs_blockgc_free_dquots()
1543 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { in xfs_blockgc_free_dquots()
1552 return xfs_blockgc_free_space(mp, &icw); in xfs_blockgc_free_dquots()
1632 struct xfs_mount *mp = pag->pag_mount; in xfs_icwalk_ag() local
1685 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_icwalk_ag()
1687 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1688 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1737 struct xfs_mount *mp, in xfs_icwalk() argument
1746 for_each_perag_tag(mp, agno, pag, goal) { in xfs_icwalk()
1791 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable() local
1794 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1800 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1807 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1860 struct xfs_mount *mp) in xfs_inodegc_flush() argument
1865 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_flush()
1868 trace_xfs_inodegc_flush(mp, __return_address); in xfs_inodegc_flush()
1870 xfs_inodegc_queue_all(mp); in xfs_inodegc_flush()
1873 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_flush()
1884 struct xfs_mount *mp) in xfs_inodegc_stop() argument
1889 if (!xfs_clear_inodegc_enabled(mp)) in xfs_inodegc_stop()
1892 xfs_inodegc_queue_all(mp); in xfs_inodegc_stop()
1895 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_stop()
1898 trace_xfs_inodegc_stop(mp, __return_address); in xfs_inodegc_stop()
1907 struct xfs_mount *mp) in xfs_inodegc_start() argument
1909 if (xfs_set_inodegc_enabled(mp)) in xfs_inodegc_start()
1912 trace_xfs_inodegc_start(mp, __return_address); in xfs_inodegc_start()
1913 xfs_inodegc_queue_all(mp); in xfs_inodegc_start()
1921 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file() local
1927 freertx = READ_ONCE(mp->m_sb.sb_frextents); in xfs_inodegc_want_queue_rt_file()
1928 return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT]; in xfs_inodegc_want_queue_rt_file()
1946 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work() local
1948 if (items > mp->m_ino_geo.inodes_per_cluster) in xfs_inodegc_want_queue_work()
1951 if (__percpu_counter_compare(&mp->m_fdblocks, in xfs_inodegc_want_queue_work()
1952 mp->m_low_space[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_work()
2013 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue() local
2023 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_queue()
2030 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_queue()
2034 trace_xfs_inodegc_queue(mp, __return_address); in xfs_inodegc_queue()
2035 queue_work(mp->m_inodegc_wq, &gc->work); in xfs_inodegc_queue()
2039 trace_xfs_inodegc_throttle(mp, __return_address); in xfs_inodegc_queue()
2049 struct xfs_mount *mp, in xfs_inodegc_cpu_dead() argument
2056 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu); in xfs_inodegc_cpu_dead()
2072 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_cpu_dead()
2078 if (xfs_is_inodegc_enabled(mp)) { in xfs_inodegc_cpu_dead()
2079 trace_xfs_inodegc_queue(mp, __return_address); in xfs_inodegc_cpu_dead()
2080 queue_work(mp->m_inodegc_wq, &gc->work); in xfs_inodegc_cpu_dead()
2098 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable() local
2101 XFS_STATS_INC(mp, vn_reclaim); in xfs_inode_mark_reclaimable()
2137 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_count() local
2142 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_count()
2146 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count()
2159 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_scan() local
2165 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_scan()
2168 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address); in xfs_inodegc_shrinker_scan()
2171 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_scan()
2176 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work); in xfs_inodegc_shrinker_scan()
2194 struct xfs_mount *mp) in xfs_inodegc_register_shrinker() argument
2196 struct shrinker *shrink = &mp->m_inodegc_shrinker; in xfs_inodegc_register_shrinker()