Root/
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H |
3 | /* |
4 | * Internal slab definitions |
5 | */ |
6 | |
7 | #ifdef CONFIG_SLOB |
8 | /* |
9 | * Common fields provided in kmem_cache by all slab allocators |
10 | * This struct is either used directly by the allocator (SLOB) |
11 | * or the allocator must include definitions for all fields |
12 | * provided in kmem_cache_common in their definition of kmem_cache. |
13 | * |
14 | * Once we can do anonymous structs (C11 standard) we could put a |
15 | * anonymous struct definition in these allocators so that the |
16 | * separate allocations in the kmem_cache structure of SLAB and |
17 | * SLUB is no longer needed. |
18 | */ |
19 | struct kmem_cache { |
20 | unsigned int object_size;/* The original size of the object */ |
21 | unsigned int size; /* The aligned/padded/added on size */ |
22 | unsigned int align; /* Alignment as calculated */ |
23 | unsigned long flags; /* Active flags on the slab */ |
24 | const char *name; /* Slab name for sysfs */ |
25 | int refcount; /* Use counter */ |
26 | void (*ctor)(void *); /* Called on object slot creation */ |
27 | struct list_head list; /* List of all slab caches on the system */ |
28 | }; |
29 | |
30 | #endif /* CONFIG_SLOB */ |
31 | |
32 | #ifdef CONFIG_SLAB |
33 | #include <linux/slab_def.h> |
34 | #endif |
35 | |
36 | #ifdef CONFIG_SLUB |
37 | #include <linux/slub_def.h> |
38 | #endif |
39 | |
40 | #include <linux/memcontrol.h> |
41 | |
42 | /* |
43 | * State of the slab allocator. |
44 | * |
45 | * This is used to describe the states of the allocator during bootup. |
46 | * Allocators use this to gradually bootstrap themselves. Most allocators |
47 | * have the problem that the structures used for managing slab caches are |
48 | * allocated from slab caches themselves. |
49 | */ |
50 | enum slab_state { |
51 | DOWN, /* No slab functionality yet */ |
52 | PARTIAL, /* SLUB: kmem_cache_node available */ |
53 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
54 | UP, /* Slab caches usable but not all extras yet */ |
55 | FULL /* Everything is working */ |
56 | }; |
57 | |
58 | extern enum slab_state slab_state; |
59 | |
60 | /* The slab cache mutex protects the management structures during changes */ |
61 | extern struct mutex slab_mutex; |
62 | |
63 | /* The list of all slab caches on the system */ |
64 | extern struct list_head slab_caches; |
65 | |
66 | /* The slab cache that manages slab cache information */ |
67 | extern struct kmem_cache *kmem_cache; |
68 | |
69 | unsigned long calculate_alignment(unsigned long flags, |
70 | unsigned long align, unsigned long size); |
71 | |
72 | #ifndef CONFIG_SLOB |
73 | /* Kmalloc array related functions */ |
74 | void create_kmalloc_caches(unsigned long); |
75 | |
76 | /* Find the kmalloc slab corresponding for a certain size */ |
77 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
78 | #endif |
79 | |
80 | |
81 | /* Functions provided by the slab allocators */ |
82 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
83 | |
84 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
85 | unsigned long flags); |
86 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
87 | size_t size, unsigned long flags); |
88 | |
89 | struct mem_cgroup; |
90 | |
91 | int slab_unmergeable(struct kmem_cache *s); |
92 | struct kmem_cache *find_mergeable(size_t size, size_t align, |
93 | unsigned long flags, const char *name, void (*ctor)(void *)); |
94 | #ifndef CONFIG_SLOB |
95 | struct kmem_cache * |
96 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
97 | unsigned long flags, void (*ctor)(void *)); |
98 | |
99 | unsigned long kmem_cache_flags(unsigned long object_size, |
100 | unsigned long flags, const char *name, |
101 | void (*ctor)(void *)); |
102 | #else |
103 | static inline struct kmem_cache * |
104 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
105 | unsigned long flags, void (*ctor)(void *)) |
106 | { return NULL; } |
107 | |
108 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
109 | unsigned long flags, const char *name, |
110 | void (*ctor)(void *)) |
111 | { |
112 | return flags; |
113 | } |
114 | #endif |
115 | |
116 | |
117 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
118 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ |
119 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) |
120 | |
121 | #if defined(CONFIG_DEBUG_SLAB) |
122 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
123 | #elif defined(CONFIG_SLUB_DEBUG) |
124 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
125 | SLAB_TRACE | SLAB_DEBUG_FREE) |
126 | #else |
127 | #define SLAB_DEBUG_FLAGS (0) |
128 | #endif |
129 | |
130 | #if defined(CONFIG_SLAB) |
131 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
132 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) |
133 | #elif defined(CONFIG_SLUB) |
134 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
135 | SLAB_TEMPORARY | SLAB_NOTRACK) |
136 | #else |
137 | #define SLAB_CACHE_FLAGS (0) |
138 | #endif |
139 | |
140 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
141 | |
142 | int __kmem_cache_shutdown(struct kmem_cache *); |
143 | int __kmem_cache_shrink(struct kmem_cache *); |
144 | void slab_kmem_cache_release(struct kmem_cache *); |
145 | |
146 | struct seq_file; |
147 | struct file; |
148 | |
149 | struct slabinfo { |
150 | unsigned long active_objs; |
151 | unsigned long num_objs; |
152 | unsigned long active_slabs; |
153 | unsigned long num_slabs; |
154 | unsigned long shared_avail; |
155 | unsigned int limit; |
156 | unsigned int batchcount; |
157 | unsigned int shared; |
158 | unsigned int objects_per_slab; |
159 | unsigned int cache_order; |
160 | }; |
161 | |
162 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
163 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
164 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
165 | size_t count, loff_t *ppos); |
166 | |
167 | #ifdef CONFIG_MEMCG_KMEM |
168 | static inline bool is_root_cache(struct kmem_cache *s) |
169 | { |
170 | return !s->memcg_params || s->memcg_params->is_root_cache; |
171 | } |
172 | |
173 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
174 | struct kmem_cache *p) |
175 | { |
176 | return (p == s) || |
177 | (s->memcg_params && (p == s->memcg_params->root_cache)); |
178 | } |
179 | |
180 | /* |
181 | * We use suffixes to the name in memcg because we can't have caches |
182 | * created in the system with the same name. But when we print them |
183 | * locally, better refer to them with the base name |
184 | */ |
185 | static inline const char *cache_name(struct kmem_cache *s) |
186 | { |
187 | if (!is_root_cache(s)) |
188 | return s->memcg_params->root_cache->name; |
189 | return s->name; |
190 | } |
191 | |
192 | /* |
193 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
194 | * That said the caller must assure the memcg's cache won't go away. Since once |
195 | * created a memcg's cache is destroyed only along with the root cache, it is |
196 | * true if we are going to allocate from the cache or hold a reference to the |
197 | * root cache by other means. Otherwise, we should hold either the slab_mutex |
198 | * or the memcg's slab_caches_mutex while calling this function and accessing |
199 | * the returned value. |
200 | */ |
201 | static inline struct kmem_cache * |
202 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
203 | { |
204 | struct kmem_cache *cachep; |
205 | struct memcg_cache_params *params; |
206 | |
207 | if (!s->memcg_params) |
208 | return NULL; |
209 | |
210 | rcu_read_lock(); |
211 | params = rcu_dereference(s->memcg_params); |
212 | cachep = params->memcg_caches[idx]; |
213 | rcu_read_unlock(); |
214 | |
215 | /* |
216 | * Make sure we will access the up-to-date value. The code updating |
217 | * memcg_caches issues a write barrier to match this (see |
218 | * memcg_register_cache()). |
219 | */ |
220 | smp_read_barrier_depends(); |
221 | return cachep; |
222 | } |
223 | |
224 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
225 | { |
226 | if (is_root_cache(s)) |
227 | return s; |
228 | return s->memcg_params->root_cache; |
229 | } |
230 | |
231 | static __always_inline int memcg_charge_slab(struct kmem_cache *s, |
232 | gfp_t gfp, int order) |
233 | { |
234 | if (!memcg_kmem_enabled()) |
235 | return 0; |
236 | if (is_root_cache(s)) |
237 | return 0; |
238 | return __memcg_charge_slab(s, gfp, order); |
239 | } |
240 | |
241 | static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) |
242 | { |
243 | if (!memcg_kmem_enabled()) |
244 | return; |
245 | if (is_root_cache(s)) |
246 | return; |
247 | __memcg_uncharge_slab(s, order); |
248 | } |
249 | #else |
250 | static inline bool is_root_cache(struct kmem_cache *s) |
251 | { |
252 | return true; |
253 | } |
254 | |
255 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
256 | struct kmem_cache *p) |
257 | { |
258 | return true; |
259 | } |
260 | |
261 | static inline const char *cache_name(struct kmem_cache *s) |
262 | { |
263 | return s->name; |
264 | } |
265 | |
266 | static inline struct kmem_cache * |
267 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
268 | { |
269 | return NULL; |
270 | } |
271 | |
272 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
273 | { |
274 | return s; |
275 | } |
276 | |
277 | static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) |
278 | { |
279 | return 0; |
280 | } |
281 | |
282 | static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) |
283 | { |
284 | } |
285 | #endif |
286 | |
287 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
288 | { |
289 | struct kmem_cache *cachep; |
290 | struct page *page; |
291 | |
292 | /* |
293 | * When kmemcg is not being used, both assignments should return the |
294 | * same value. but we don't want to pay the assignment price in that |
295 | * case. If it is not compiled in, the compiler should be smart enough |
296 | * to not do even the assignment. In that case, slab_equal_or_root |
297 | * will also be a constant. |
298 | */ |
299 | if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) |
300 | return s; |
301 | |
302 | page = virt_to_head_page(x); |
303 | cachep = page->slab_cache; |
304 | if (slab_equal_or_root(cachep, s)) |
305 | return cachep; |
306 | |
307 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", |
308 | __func__, cachep->name, s->name); |
309 | WARN_ON_ONCE(1); |
310 | return s; |
311 | } |
312 | |
313 | #ifndef CONFIG_SLOB |
314 | /* |
315 | * The slab lists for all objects. |
316 | */ |
317 | struct kmem_cache_node { |
318 | spinlock_t list_lock; |
319 | |
320 | #ifdef CONFIG_SLAB |
321 | struct list_head slabs_partial; /* partial list first, better asm code */ |
322 | struct list_head slabs_full; |
323 | struct list_head slabs_free; |
324 | unsigned long free_objects; |
325 | unsigned int free_limit; |
326 | unsigned int colour_next; /* Per-node cache coloring */ |
327 | struct array_cache *shared; /* shared per node */ |
328 | struct alien_cache **alien; /* on other nodes */ |
329 | unsigned long next_reap; /* updated without locking */ |
330 | int free_touched; /* updated without locking */ |
331 | #endif |
332 | |
333 | #ifdef CONFIG_SLUB |
334 | unsigned long nr_partial; |
335 | struct list_head partial; |
336 | #ifdef CONFIG_SLUB_DEBUG |
337 | atomic_long_t nr_slabs; |
338 | atomic_long_t total_objects; |
339 | struct list_head full; |
340 | #endif |
341 | #endif |
342 | |
343 | }; |
344 | |
345 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
346 | { |
347 | return s->node[node]; |
348 | } |
349 | |
350 | /* |
351 | * Iterator over all nodes. The body will be executed for each node that has |
352 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
353 | */ |
354 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
355 | for (__node = 0; __node < nr_node_ids; __node++) \ |
356 | if ((__n = get_node(__s, __node))) |
357 | |
358 | #endif |
359 | |
360 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
361 | void slab_stop(struct seq_file *m, void *p); |
362 | |
363 | #endif /* MM_SLAB_H */ |
364 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9