Lines Matching full:pool
106 static void free_cg_pool(struct dmem_cgroup_pool_state *pool) in free_cg_pool() argument
108 list_del(&pool->region_node); in free_cg_pool()
109 kfree(pool); in free_cg_pool()
113 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_min() argument
115 page_counter_set_min(&pool->cnt, val); in set_resource_min()
119 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_low() argument
121 page_counter_set_low(&pool->cnt, val); in set_resource_low()
125 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_max() argument
127 page_counter_set_max(&pool->cnt, val); in set_resource_max()
130 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool) in get_resource_low() argument
132 return pool ? READ_ONCE(pool->cnt.low) : 0; in get_resource_low()
135 static u64 get_resource_min(struct dmem_cgroup_pool_state *pool) in get_resource_min() argument
137 return pool ? READ_ONCE(pool->cnt.min) : 0; in get_resource_min()
140 static u64 get_resource_max(struct dmem_cgroup_pool_state *pool) in get_resource_max() argument
142 return pool ? READ_ONCE(pool->cnt.max) : PAGE_COUNTER_MAX; in get_resource_max()
145 static u64 get_resource_current(struct dmem_cgroup_pool_state *pool) in get_resource_current() argument
147 return pool ? page_counter_read(&pool->cnt) : 0; in get_resource_current()
160 struct dmem_cgroup_pool_state *pool; in dmemcs_offline() local
163 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node) in dmemcs_offline()
164 reset_all_resource_limits(pool); in dmemcs_offline()
171 struct dmem_cgroup_pool_state *pool, *next; in dmemcs_free() local
174 list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) { in dmemcs_free()
176 *The pool is dead and all references are 0, in dmemcs_free()
179 list_del(&pool->css_node); in dmemcs_free()
180 free_cg_pool(pool); in dmemcs_free()
201 struct dmem_cgroup_pool_state *pool; in find_cg_pool_locked() local
203 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock)) in find_cg_pool_locked()
204 if (pool->region == region) in find_cg_pool_locked()
205 return pool; in find_cg_pool_locked()
210 static struct dmem_cgroup_pool_state *pool_parent(struct dmem_cgroup_pool_state *pool) in pool_parent() argument
212 if (!pool->cnt.parent) in pool_parent()
215 return container_of(pool->cnt.parent, typeof(*pool), cnt); in pool_parent()
225 struct dmem_cgroup_pool_state *pool, *found_pool; in dmem_cgroup_calculate_protection() local
235 list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) { in dmem_cgroup_calculate_protection()
236 if (pool->region == limit_pool->region) { in dmem_cgroup_calculate_protection()
237 found_pool = pool; in dmem_cgroup_calculate_protection()
255 * @limit_pool: The pool for which we hit limits
256 * @test_pool: The pool for which to test
271 struct dmem_cgroup_pool_state *pool = test_pool; in dmem_cgroup_state_evict_valuable() local
275 /* Can always evict from current pool, despite limits */ in dmem_cgroup_state_evict_valuable()
283 for (pool = test_pool; pool && limit_pool != pool; pool = pool_parent(pool)) in dmem_cgroup_state_evict_valuable()
286 if (!pool) in dmem_cgroup_state_evict_valuable()
324 struct dmem_cgroup_pool_state *pool, *ppool = NULL; in alloc_pool_single() local
327 pool = kzalloc(sizeof(*pool), GFP_NOWAIT); in alloc_pool_single()
328 if (!pool) in alloc_pool_single()
331 pool = *allocpool; in alloc_pool_single()
335 pool->region = region; in alloc_pool_single()
336 pool->cs = dmemcs; in alloc_pool_single()
341 page_counter_init(&pool->cnt, in alloc_pool_single()
343 reset_all_resource_limits(pool); in alloc_pool_single()
345 list_add_tail_rcu(&pool->css_node, &dmemcs->pools); in alloc_pool_single()
346 list_add_tail(&pool->region_node, ®ion->pools); in alloc_pool_single()
349 pool->inited = true; in alloc_pool_single()
351 pool->inited = ppool ? ppool->inited : false; in alloc_pool_single()
352 return pool; in alloc_pool_single()
359 struct dmem_cgroup_pool_state *pool, *ppool, *retpool; in get_cg_pool_locked() local
363 * Recursively create pool, we may not initialize yet on in get_cg_pool_locked()
367 pool = find_cg_pool_locked(p, region); in get_cg_pool_locked()
368 if (!pool) in get_cg_pool_locked()
369 pool = alloc_pool_single(p, region, allocpool); in get_cg_pool_locked()
371 if (IS_ERR(pool)) in get_cg_pool_locked()
372 return pool; in get_cg_pool_locked()
374 if (p == dmemcs && pool->inited) in get_cg_pool_locked()
375 return pool; in get_cg_pool_locked()
377 if (pool->inited) in get_cg_pool_locked()
381 retpool = pool = find_cg_pool_locked(dmemcs, region); in get_cg_pool_locked()
383 if (pool->inited) in get_cg_pool_locked()
390 pool->cnt.parent = &ppool->cnt; in get_cg_pool_locked()
391 pool->inited = true; in get_cg_pool_locked()
393 pool = ppool; in get_cg_pool_locked()
402 struct dmem_cgroup_pool_state *pool, *next; in dmemcg_free_rcu() local
404 list_for_each_entry_safe(pool, next, ®ion->pools, region_node) in dmemcg_free_rcu()
405 free_cg_pool(pool); in dmemcg_free_rcu()
436 struct dmem_cgroup_pool_state *pool = in dmem_cgroup_unregister_region() local
437 container_of(entry, typeof(*pool), region_node); in dmem_cgroup_unregister_region()
439 list_del_rcu(&pool->css_node); in dmem_cgroup_unregister_region()
513 * @pool: &dmem_cgroup_pool_state
515 * Called to drop a reference to the limiting pool returned by
518 void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool) in dmem_cgroup_pool_state_put() argument
520 if (pool) in dmem_cgroup_pool_state_put()
521 css_put(&pool->cs->css); in dmem_cgroup_pool_state_put()
528 struct dmem_cgroup_pool_state *pool, *allocpool = NULL; in get_cg_pool_unlocked() local
532 pool = find_cg_pool_locked(cg, region); in get_cg_pool_unlocked()
533 if (pool && !READ_ONCE(pool->inited)) in get_cg_pool_unlocked()
534 pool = NULL; in get_cg_pool_unlocked()
537 while (!pool) { in get_cg_pool_unlocked()
540 pool = get_cg_pool_locked(cg, region, &allocpool); in get_cg_pool_unlocked()
542 pool = ERR_PTR(-ENODEV); in get_cg_pool_unlocked()
545 if (pool == ERR_PTR(-ENOMEM)) { in get_cg_pool_unlocked()
546 pool = NULL; in get_cg_pool_unlocked()
552 pool = NULL; in get_cg_pool_unlocked()
559 return pool; in get_cg_pool_unlocked()
563 * dmem_cgroup_uncharge() - Uncharge a pool.
564 * @pool: Pool to uncharge.
568 * Must be called with the returned pool as argument,
571 void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size) in dmem_cgroup_uncharge() argument
573 if (!pool) in dmem_cgroup_uncharge()
576 page_counter_uncharge(&pool->cnt, size); in dmem_cgroup_uncharge()
577 css_put(&pool->cs->css); in dmem_cgroup_uncharge()
585 * @ret_pool: On succesfull allocation, the pool that is charged.
586 * @ret_limit_pool: On a failed allocation, the limiting pool.
594 * will be set to the pool for which the limit is hit. This can be used for
605 struct dmem_cgroup_pool_state *pool; in dmem_cgroup_try_charge() local
619 pool = get_cg_pool_unlocked(cg, region); in dmem_cgroup_try_charge()
620 if (IS_ERR(pool)) { in dmem_cgroup_try_charge()
621 ret = PTR_ERR(pool); in dmem_cgroup_try_charge()
625 if (!page_counter_try_charge(&pool->cnt, size, &fail)) { in dmem_cgroup_try_charge()
635 *ret_pool = pool; in dmem_cgroup_try_charge()
682 struct dmem_cgroup_pool_state *pool = NULL; in dmemcg_limit_write() local
713 pool = get_cg_pool_unlocked(dmemcs, region); in dmemcg_limit_write()
714 if (IS_ERR(pool)) { in dmemcg_limit_write()
715 err = PTR_ERR(pool); in dmemcg_limit_write()
720 apply(pool, new_limit); in dmemcg_limit_write()
738 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region); in dmemcg_limit_show() local
743 val = fn(pool); in dmemcg_limit_show()