Root/mm/slab.h

1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16    DOWN, /* No slab functionality yet */
17    PARTIAL, /* SLUB: kmem_cache_node available */
18    PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19    PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
20    UP, /* Slab caches usable but not all extras yet */
21    FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
26/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
28
29/* The list of all slab caches on the system */
30extern struct list_head slab_caches;
31
32/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
35unsigned long calculate_alignment(unsigned long flags,
36        unsigned long align, unsigned long size);
37
38#ifndef CONFIG_SLOB
39/* Kmalloc array related functions */
40void create_kmalloc_caches(unsigned long);
41
42/* Find the kmalloc slab corresponding for a certain size */
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44#endif
45
46
47/* Functions provided by the slab allocators */
48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
49
50extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51            unsigned long flags);
52extern void create_boot_cache(struct kmem_cache *, const char *name,
53            size_t size, unsigned long flags);
54
55struct mem_cgroup;
56#ifdef CONFIG_SLUB
57struct kmem_cache *
58__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
59           size_t align, unsigned long flags, void (*ctor)(void *));
60#else
61static inline struct kmem_cache *
62__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
63           size_t align, unsigned long flags, void (*ctor)(void *))
64{ return NULL; }
65#endif
66
67
68/* Legal flag mask for kmem_cache_create(), for various configurations */
69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70             SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71
72#if defined(CONFIG_DEBUG_SLAB)
73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74#elif defined(CONFIG_SLUB_DEBUG)
75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76              SLAB_TRACE | SLAB_DEBUG_FREE)
77#else
78#define SLAB_DEBUG_FLAGS (0)
79#endif
80
81#if defined(CONFIG_SLAB)
82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83              SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84#elif defined(CONFIG_SLUB)
85#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86              SLAB_TEMPORARY | SLAB_NOTRACK)
87#else
88#define SLAB_CACHE_FLAGS (0)
89#endif
90
91#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
92
93int __kmem_cache_shutdown(struct kmem_cache *);
94
95struct seq_file;
96struct file;
97
98struct slabinfo {
99    unsigned long active_objs;
100    unsigned long num_objs;
101    unsigned long active_slabs;
102    unsigned long num_slabs;
103    unsigned long shared_avail;
104    unsigned int limit;
105    unsigned int batchcount;
106    unsigned int shared;
107    unsigned int objects_per_slab;
108    unsigned int cache_order;
109};
110
111void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
112void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
113ssize_t slabinfo_write(struct file *file, const char __user *buffer,
114               size_t count, loff_t *ppos);
115
116#ifdef CONFIG_MEMCG_KMEM
117static inline bool is_root_cache(struct kmem_cache *s)
118{
119    return !s->memcg_params || s->memcg_params->is_root_cache;
120}
121
122static inline bool cache_match_memcg(struct kmem_cache *cachep,
123                     struct mem_cgroup *memcg)
124{
125    return (is_root_cache(cachep) && !memcg) ||
126                (cachep->memcg_params->memcg == memcg);
127}
128
129static inline void memcg_bind_pages(struct kmem_cache *s, int order)
130{
131    if (!is_root_cache(s))
132        atomic_add(1 << order, &s->memcg_params->nr_pages);
133}
134
135static inline void memcg_release_pages(struct kmem_cache *s, int order)
136{
137    if (is_root_cache(s))
138        return;
139
140    if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
141        mem_cgroup_destroy_cache(s);
142}
143
144static inline bool slab_equal_or_root(struct kmem_cache *s,
145                    struct kmem_cache *p)
146{
147    return (p == s) ||
148        (s->memcg_params && (p == s->memcg_params->root_cache));
149}
150
151/*
152 * We use suffixes to the name in memcg because we can't have caches
153 * created in the system with the same name. But when we print them
154 * locally, better refer to them with the base name
155 */
156static inline const char *cache_name(struct kmem_cache *s)
157{
158    if (!is_root_cache(s))
159        return s->memcg_params->root_cache->name;
160    return s->name;
161}
162
163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
164{
165    if (!s->memcg_params)
166        return NULL;
167    return s->memcg_params->memcg_caches[idx];
168}
169
170static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
171{
172    if (is_root_cache(s))
173        return s;
174    return s->memcg_params->root_cache;
175}
176#else
177static inline bool is_root_cache(struct kmem_cache *s)
178{
179    return true;
180}
181
182static inline bool cache_match_memcg(struct kmem_cache *cachep,
183                     struct mem_cgroup *memcg)
184{
185    return true;
186}
187
188static inline void memcg_bind_pages(struct kmem_cache *s, int order)
189{
190}
191
192static inline void memcg_release_pages(struct kmem_cache *s, int order)
193{
194}
195
196static inline bool slab_equal_or_root(struct kmem_cache *s,
197                      struct kmem_cache *p)
198{
199    return true;
200}
201
202static inline const char *cache_name(struct kmem_cache *s)
203{
204    return s->name;
205}
206
207static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
208{
209    return NULL;
210}
211
212static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
213{
214    return s;
215}
216#endif
217
218static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
219{
220    struct kmem_cache *cachep;
221    struct page *page;
222
223    /*
224     * When kmemcg is not being used, both assignments should return the
225     * same value. but we don't want to pay the assignment price in that
226     * case. If it is not compiled in, the compiler should be smart enough
227     * to not do even the assignment. In that case, slab_equal_or_root
228     * will also be a constant.
229     */
230    if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
231        return s;
232
233    page = virt_to_head_page(x);
234    cachep = page->slab_cache;
235    if (slab_equal_or_root(cachep, s))
236        return cachep;
237
238    pr_err("%s: Wrong slab cache. %s but object is from %s\n",
239        __FUNCTION__, cachep->name, s->name);
240    WARN_ON_ONCE(1);
241    return s;
242}
243#endif
244
245
246/*
247 * The slab lists for all objects.
248 */
249struct kmem_cache_node {
250    spinlock_t list_lock;
251
252#ifdef CONFIG_SLAB
253    struct list_head slabs_partial; /* partial list first, better asm code */
254    struct list_head slabs_full;
255    struct list_head slabs_free;
256    unsigned long free_objects;
257    unsigned int free_limit;
258    unsigned int colour_next; /* Per-node cache coloring */
259    struct array_cache *shared; /* shared per node */
260    struct array_cache **alien; /* on other nodes */
261    unsigned long next_reap; /* updated without locking */
262    int free_touched; /* updated without locking */
263#endif
264
265#ifdef CONFIG_SLUB
266    unsigned long nr_partial;
267    struct list_head partial;
268#ifdef CONFIG_SLUB_DEBUG
269    atomic_long_t nr_slabs;
270    atomic_long_t total_objects;
271    struct list_head full;
272#endif
273#endif
274
275};
276
277void *slab_next(struct seq_file *m, void *p, loff_t *pos);
278void slab_stop(struct seq_file *m, void *p);
279

Archive Download this file



interactive