Root/drivers/staging/zsmalloc/zsmalloc-main.c

1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13
14/*
15 * This allocator is designed for use with zcache and zram. Thus, the
16 * allocator is supposed to work well under low memory conditions. In
17 * particular, it never attempts higher order page allocation which is
18 * very likely to fail under memory pressure. On the other hand, if we
19 * just use single (0-order) pages, it would suffer from very high
20 * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
21 * an entire page. This was one of the major issues with its predecessor
22 * (xvmalloc).
23 *
24 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
25 * and links them together using various 'struct page' fields. These linked
26 * pages act as a single higher-order page i.e. an object can span 0-order
27 * page boundaries. The code refers to these linked pages as a single entity
28 * called zspage.
29 *
30 * Following is how we use various fields and flags of underlying
31 * struct page(s) to form a zspage.
32 *
33 * Usage of struct page fields:
34 * page->first_page: points to the first component (0-order) page
35 * page->index (union with page->freelist): offset of the first object
36 * starting in this page. For the first page, this is
37 * always 0, so we use this field (aka freelist) to point
38 * to the first free object in zspage.
39 * page->lru: links together all component pages (except the first page)
40 * of a zspage
41 *
42 * For _first_ page only:
43 *
44 * page->private (union with page->first_page): refers to the
45 * component page after the first page
46 * page->freelist: points to the first free object in zspage.
47 * Free objects are linked together using in-place
48 * metadata.
49 * page->objects: maximum number of objects we can store in this
50 * zspage (class->zspage_order * PAGE_SIZE / class->size)
51 * page->lru: links together first pages of various zspages.
52 * Basically forming list of zspages in a fullness group.
53 * page->mapping: class index and fullness group of the zspage
54 *
55 * Usage of struct page flags:
56 * PG_private: identifies the first component page
57 * PG_private2: identifies the last component page
58 *
59 */
60
61#ifdef CONFIG_ZSMALLOC_DEBUG
62#define DEBUG
63#endif
64
65#include <linux/module.h>
66#include <linux/kernel.h>
67#include <linux/bitops.h>
68#include <linux/errno.h>
69#include <linux/highmem.h>
70#include <linux/init.h>
71#include <linux/string.h>
72#include <linux/slab.h>
73#include <asm/tlbflush.h>
74#include <asm/pgtable.h>
75#include <linux/cpumask.h>
76#include <linux/cpu.h>
77#include <linux/vmalloc.h>
78
79#include "zsmalloc.h"
80#include "zsmalloc_int.h"
81
82/*
83 * A zspage's class index and fullness group
84 * are encoded in its (first)page->mapping
85 */
86#define CLASS_IDX_BITS 28
87#define FULLNESS_BITS 4
88#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
89#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
90
91/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
92static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
93
94static int is_first_page(struct page *page)
95{
96    return PagePrivate(page);
97}
98
99static int is_last_page(struct page *page)
100{
101    return PagePrivate2(page);
102}
103
104static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
105                enum fullness_group *fullness)
106{
107    unsigned long m;
108    BUG_ON(!is_first_page(page));
109
110    m = (unsigned long)page->mapping;
111    *fullness = m & FULLNESS_MASK;
112    *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
113}
114
115static void set_zspage_mapping(struct page *page, unsigned int class_idx,
116                enum fullness_group fullness)
117{
118    unsigned long m;
119    BUG_ON(!is_first_page(page));
120
121    m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
122            (fullness & FULLNESS_MASK);
123    page->mapping = (struct address_space *)m;
124}
125
126static int get_size_class_index(int size)
127{
128    int idx = 0;
129
130    if (likely(size > ZS_MIN_ALLOC_SIZE))
131        idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
132                ZS_SIZE_CLASS_DELTA);
133
134    return idx;
135}
136
137static enum fullness_group get_fullness_group(struct page *page)
138{
139    int inuse, max_objects;
140    enum fullness_group fg;
141    BUG_ON(!is_first_page(page));
142
143    inuse = page->inuse;
144    max_objects = page->objects;
145
146    if (inuse == 0)
147        fg = ZS_EMPTY;
148    else if (inuse == max_objects)
149        fg = ZS_FULL;
150    else if (inuse <= max_objects / fullness_threshold_frac)
151        fg = ZS_ALMOST_EMPTY;
152    else
153        fg = ZS_ALMOST_FULL;
154
155    return fg;
156}
157
158static void insert_zspage(struct page *page, struct size_class *class,
159                enum fullness_group fullness)
160{
161    struct page **head;
162
163    BUG_ON(!is_first_page(page));
164
165    if (fullness >= _ZS_NR_FULLNESS_GROUPS)
166        return;
167
168    head = &class->fullness_list[fullness];
169    if (*head)
170        list_add_tail(&page->lru, &(*head)->lru);
171
172    *head = page;
173}
174
175static void remove_zspage(struct page *page, struct size_class *class,
176                enum fullness_group fullness)
177{
178    struct page **head;
179
180    BUG_ON(!is_first_page(page));
181
182    if (fullness >= _ZS_NR_FULLNESS_GROUPS)
183        return;
184
185    head = &class->fullness_list[fullness];
186    BUG_ON(!*head);
187    if (list_empty(&(*head)->lru))
188        *head = NULL;
189    else if (*head == page)
190        *head = (struct page *)list_entry((*head)->lru.next,
191                    struct page, lru);
192
193    list_del_init(&page->lru);
194}
195
196static enum fullness_group fix_fullness_group(struct zs_pool *pool,
197                        struct page *page)
198{
199    int class_idx;
200    struct size_class *class;
201    enum fullness_group currfg, newfg;
202
203    BUG_ON(!is_first_page(page));
204
205    get_zspage_mapping(page, &class_idx, &currfg);
206    newfg = get_fullness_group(page);
207    if (newfg == currfg)
208        goto out;
209
210    class = &pool->size_class[class_idx];
211    remove_zspage(page, class, currfg);
212    insert_zspage(page, class, newfg);
213    set_zspage_mapping(page, class_idx, newfg);
214
215out:
216    return newfg;
217}
218
219/*
220 * We have to decide on how many pages to link together
221 * to form a zspage for each size class. This is important
222 * to reduce wastage due to unusable space left at end of
223 * each zspage which is given as:
224 * wastage = Zp - Zp % size_class
225 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
226 *
227 * For example, for size class of 3/8 * PAGE_SIZE, we should
228 * link together 3 PAGE_SIZE sized pages to form a zspage
229 * since then we can perfectly fit in 8 such objects.
230 */
231static int get_pages_per_zspage(int class_size)
232{
233    int i, max_usedpc = 0;
234    /* zspage order which gives maximum used size per KB */
235    int max_usedpc_order = 1;
236
237    for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
238        int zspage_size;
239        int waste, usedpc;
240
241        zspage_size = i * PAGE_SIZE;
242        waste = zspage_size % class_size;
243        usedpc = (zspage_size - waste) * 100 / zspage_size;
244
245        if (usedpc > max_usedpc) {
246            max_usedpc = usedpc;
247            max_usedpc_order = i;
248        }
249    }
250
251    return max_usedpc_order;
252}
253
254/*
255 * A single 'zspage' is composed of many system pages which are
256 * linked together using fields in struct page. This function finds
257 * the first/head page, given any component page of a zspage.
258 */
259static struct page *get_first_page(struct page *page)
260{
261    if (is_first_page(page))
262        return page;
263    else
264        return page->first_page;
265}
266
267static struct page *get_next_page(struct page *page)
268{
269    struct page *next;
270
271    if (is_last_page(page))
272        next = NULL;
273    else if (is_first_page(page))
274        next = (struct page *)page->private;
275    else
276        next = list_entry(page->lru.next, struct page, lru);
277
278    return next;
279}
280
281/* Encode <page, obj_idx> as a single handle value */
282static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
283{
284    unsigned long handle;
285
286    if (!page) {
287        BUG_ON(obj_idx);
288        return NULL;
289    }
290
291    handle = page_to_pfn(page) << OBJ_INDEX_BITS;
292    handle |= (obj_idx & OBJ_INDEX_MASK);
293
294    return (void *)handle;
295}
296
297/* Decode <page, obj_idx> pair from the given object handle */
298static void obj_handle_to_location(unsigned long handle, struct page **page,
299                unsigned long *obj_idx)
300{
301    *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
302    *obj_idx = handle & OBJ_INDEX_MASK;
303}
304
305static unsigned long obj_idx_to_offset(struct page *page,
306                unsigned long obj_idx, int class_size)
307{
308    unsigned long off = 0;
309
310    if (!is_first_page(page))
311        off = page->index;
312
313    return off + obj_idx * class_size;
314}
315
316static void reset_page(struct page *page)
317{
318    clear_bit(PG_private, &page->flags);
319    clear_bit(PG_private_2, &page->flags);
320    set_page_private(page, 0);
321    page->mapping = NULL;
322    page->freelist = NULL;
323    reset_page_mapcount(page);
324}
325
326static void free_zspage(struct page *first_page)
327{
328    struct page *nextp, *tmp, *head_extra;
329
330    BUG_ON(!is_first_page(first_page));
331    BUG_ON(first_page->inuse);
332
333    head_extra = (struct page *)page_private(first_page);
334
335    reset_page(first_page);
336    __free_page(first_page);
337
338    /* zspage with only 1 system page */
339    if (!head_extra)
340        return;
341
342    list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
343        list_del(&nextp->lru);
344        reset_page(nextp);
345        __free_page(nextp);
346    }
347    reset_page(head_extra);
348    __free_page(head_extra);
349}
350
351/* Initialize a newly allocated zspage */
352static void init_zspage(struct page *first_page, struct size_class *class)
353{
354    unsigned long off = 0;
355    struct page *page = first_page;
356
357    BUG_ON(!is_first_page(first_page));
358    while (page) {
359        struct page *next_page;
360        struct link_free *link;
361        unsigned int i, objs_on_page;
362
363        /*
364         * page->index stores offset of first object starting
365         * in the page. For the first page, this is always 0,
366         * so we use first_page->index (aka ->freelist) to store
367         * head of corresponding zspage's freelist.
368         */
369        if (page != first_page)
370            page->index = off;
371
372        link = (struct link_free *)kmap_atomic(page) +
373                        off / sizeof(*link);
374        objs_on_page = (PAGE_SIZE - off) / class->size;
375
376        for (i = 1; i <= objs_on_page; i++) {
377            off += class->size;
378            if (off < PAGE_SIZE) {
379                link->next = obj_location_to_handle(page, i);
380                link += class->size / sizeof(*link);
381            }
382        }
383
384        /*
385         * We now come to the last (full or partial) object on this
386         * page, which must point to the first object on the next
387         * page (if present)
388         */
389        next_page = get_next_page(page);
390        link->next = obj_location_to_handle(next_page, 0);
391        kunmap_atomic(link);
392        page = next_page;
393        off = (off + class->size) % PAGE_SIZE;
394    }
395}
396
397/*
398 * Allocate a zspage for the given size class
399 */
400static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
401{
402    int i, error;
403    struct page *first_page = NULL, *uninitialized_var(prev_page);
404
405    /*
406     * Allocate individual pages and link them together as:
407     * 1. first page->private = first sub-page
408     * 2. all sub-pages are linked together using page->lru
409     * 3. each sub-page is linked to the first page using page->first_page
410     *
411     * For each size class, First/Head pages are linked together using
412     * page->lru. Also, we set PG_private to identify the first page
413     * (i.e. no other sub-page has this flag set) and PG_private_2 to
414     * identify the last page.
415     */
416    error = -ENOMEM;
417    for (i = 0; i < class->pages_per_zspage; i++) {
418        struct page *page;
419
420        page = alloc_page(flags);
421        if (!page)
422            goto cleanup;
423
424        INIT_LIST_HEAD(&page->lru);
425        if (i == 0) { /* first page */
426            SetPagePrivate(page);
427            set_page_private(page, 0);
428            first_page = page;
429            first_page->inuse = 0;
430        }
431        if (i == 1)
432            first_page->private = (unsigned long)page;
433        if (i >= 1)
434            page->first_page = first_page;
435        if (i >= 2)
436            list_add(&page->lru, &prev_page->lru);
437        if (i == class->pages_per_zspage - 1) /* last page */
438            SetPagePrivate2(page);
439        prev_page = page;
440    }
441
442    init_zspage(first_page, class);
443
444    first_page->freelist = obj_location_to_handle(first_page, 0);
445    /* Maximum number of objects we can store in this zspage */
446    first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
447
448    error = 0; /* Success */
449
450cleanup:
451    if (unlikely(error) && first_page) {
452        free_zspage(first_page);
453        first_page = NULL;
454    }
455
456    return first_page;
457}
458
459static struct page *find_get_zspage(struct size_class *class)
460{
461    int i;
462    struct page *page;
463
464    for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
465        page = class->fullness_list[i];
466        if (page)
467            break;
468    }
469
470    return page;
471}
472
473static void zs_copy_map_object(char *buf, struct page *firstpage,
474                int off, int size)
475{
476    struct page *pages[2];
477    int sizes[2];
478    void *addr;
479
480    pages[0] = firstpage;
481    pages[1] = get_next_page(firstpage);
482    BUG_ON(!pages[1]);
483
484    sizes[0] = PAGE_SIZE - off;
485    sizes[1] = size - sizes[0];
486
487    /* copy object to per-cpu buffer */
488    addr = kmap_atomic(pages[0]);
489    memcpy(buf, addr + off, sizes[0]);
490    kunmap_atomic(addr);
491    addr = kmap_atomic(pages[1]);
492    memcpy(buf + sizes[0], addr, sizes[1]);
493    kunmap_atomic(addr);
494}
495
496static void zs_copy_unmap_object(char *buf, struct page *firstpage,
497                int off, int size)
498{
499    struct page *pages[2];
500    int sizes[2];
501    void *addr;
502
503    pages[0] = firstpage;
504    pages[1] = get_next_page(firstpage);
505    BUG_ON(!pages[1]);
506
507    sizes[0] = PAGE_SIZE - off;
508    sizes[1] = size - sizes[0];
509
510    /* copy per-cpu buffer to object */
511    addr = kmap_atomic(pages[0]);
512    memcpy(addr + off, buf, sizes[0]);
513    kunmap_atomic(addr);
514    addr = kmap_atomic(pages[1]);
515    memcpy(addr, buf + sizes[0], sizes[1]);
516    kunmap_atomic(addr);
517}
518
519static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
520                void *pcpu)
521{
522    int cpu = (long)pcpu;
523    struct mapping_area *area;
524
525    switch (action) {
526    case CPU_UP_PREPARE:
527        area = &per_cpu(zs_map_area, cpu);
528        /*
529         * Make sure we don't leak memory if a cpu UP notification
530         * and zs_init() race and both call zs_cpu_up() on the same cpu
531         */
532        if (area->vm_buf)
533            return 0;
534        area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
535        if (!area->vm_buf)
536            return -ENOMEM;
537        return 0;
538        break;
539    case CPU_DEAD:
540    case CPU_UP_CANCELED:
541        area = &per_cpu(zs_map_area, cpu);
542        if (area->vm_buf)
543            free_page((unsigned long)area->vm_buf);
544        area->vm_buf = NULL;
545        break;
546    }
547
548    return NOTIFY_OK;
549}
550
551static struct notifier_block zs_cpu_nb = {
552    .notifier_call = zs_cpu_notifier
553};
554
555static void zs_exit(void)
556{
557    int cpu;
558
559    for_each_online_cpu(cpu)
560        zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
561    unregister_cpu_notifier(&zs_cpu_nb);
562}
563
564static int zs_init(void)
565{
566    int cpu, ret;
567
568    register_cpu_notifier(&zs_cpu_nb);
569    for_each_online_cpu(cpu) {
570        ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
571        if (notifier_to_errno(ret))
572            goto fail;
573    }
574    return 0;
575fail:
576    zs_exit();
577    return notifier_to_errno(ret);
578}
579
580struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
581{
582    int i, ovhd_size;
583    struct zs_pool *pool;
584
585    if (!name)
586        return NULL;
587
588    ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
589    pool = kzalloc(ovhd_size, GFP_KERNEL);
590    if (!pool)
591        return NULL;
592
593    for (i = 0; i < ZS_SIZE_CLASSES; i++) {
594        int size;
595        struct size_class *class;
596
597        size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
598        if (size > ZS_MAX_ALLOC_SIZE)
599            size = ZS_MAX_ALLOC_SIZE;
600
601        class = &pool->size_class[i];
602        class->size = size;
603        class->index = i;
604        spin_lock_init(&class->lock);
605        class->pages_per_zspage = get_pages_per_zspage(size);
606
607    }
608
609    pool->flags = flags;
610    pool->name = name;
611
612    return pool;
613}
614EXPORT_SYMBOL_GPL(zs_create_pool);
615
616void zs_destroy_pool(struct zs_pool *pool)
617{
618    int i;
619
620    for (i = 0; i < ZS_SIZE_CLASSES; i++) {
621        int fg;
622        struct size_class *class = &pool->size_class[i];
623
624        for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
625            if (class->fullness_list[fg]) {
626                pr_info("Freeing non-empty class with size "
627                    "%db, fullness group %d\n",
628                    class->size, fg);
629            }
630        }
631    }
632    kfree(pool);
633}
634EXPORT_SYMBOL_GPL(zs_destroy_pool);
635
636/**
637 * zs_malloc - Allocate block of given size from pool.
638 * @pool: pool to allocate from
639 * @size: size of block to allocate
640 *
641 * On success, handle to the allocated object is returned,
642 * otherwise 0.
643 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
644 */
645unsigned long zs_malloc(struct zs_pool *pool, size_t size)
646{
647    unsigned long obj;
648    struct link_free *link;
649    int class_idx;
650    struct size_class *class;
651
652    struct page *first_page, *m_page;
653    unsigned long m_objidx, m_offset;
654
655    if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
656        return 0;
657
658    class_idx = get_size_class_index(size);
659    class = &pool->size_class[class_idx];
660    BUG_ON(class_idx != class->index);
661
662    spin_lock(&class->lock);
663    first_page = find_get_zspage(class);
664
665    if (!first_page) {
666        spin_unlock(&class->lock);
667        first_page = alloc_zspage(class, pool->flags);
668        if (unlikely(!first_page))
669            return 0;
670
671        set_zspage_mapping(first_page, class->index, ZS_EMPTY);
672        spin_lock(&class->lock);
673        class->pages_allocated += class->pages_per_zspage;
674    }
675
676    obj = (unsigned long)first_page->freelist;
677    obj_handle_to_location(obj, &m_page, &m_objidx);
678    m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
679
680    link = (struct link_free *)kmap_atomic(m_page) +
681                    m_offset / sizeof(*link);
682    first_page->freelist = link->next;
683    memset(link, POISON_INUSE, sizeof(*link));
684    kunmap_atomic(link);
685
686    first_page->inuse++;
687    /* Now move the zspage to another fullness group, if required */
688    fix_fullness_group(pool, first_page);
689    spin_unlock(&class->lock);
690
691    return obj;
692}
693EXPORT_SYMBOL_GPL(zs_malloc);
694
695void zs_free(struct zs_pool *pool, unsigned long obj)
696{
697    struct link_free *link;
698    struct page *first_page, *f_page;
699    unsigned long f_objidx, f_offset;
700
701    int class_idx;
702    struct size_class *class;
703    enum fullness_group fullness;
704
705    if (unlikely(!obj))
706        return;
707
708    obj_handle_to_location(obj, &f_page, &f_objidx);
709    first_page = get_first_page(f_page);
710
711    get_zspage_mapping(first_page, &class_idx, &fullness);
712    class = &pool->size_class[class_idx];
713    f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
714
715    spin_lock(&class->lock);
716
717    /* Insert this object in containing zspage's freelist */
718    link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
719                            + f_offset);
720    link->next = first_page->freelist;
721    kunmap_atomic(link);
722    first_page->freelist = (void *)obj;
723
724    first_page->inuse--;
725    fullness = fix_fullness_group(pool, first_page);
726
727    if (fullness == ZS_EMPTY)
728        class->pages_allocated -= class->pages_per_zspage;
729
730    spin_unlock(&class->lock);
731
732    if (fullness == ZS_EMPTY)
733        free_zspage(first_page);
734}
735EXPORT_SYMBOL_GPL(zs_free);
736
737/**
738 * zs_map_object - get address of allocated object from handle.
739 * @pool: pool from which the object was allocated
740 * @handle: handle returned from zs_malloc
741 *
742 * Before using an object allocated from zs_malloc, it must be mapped using
743 * this function. When done with the object, it must be unmapped using
744 * zs_unmap_object.
745 *
746 * Only one object can be mapped per cpu at a time. There is no protection
747 * against nested mappings.
748 *
749 * This function returns with preemption and page faults disabled.
750*/
751void *zs_map_object(struct zs_pool *pool, unsigned long handle,
752            enum zs_mapmode mm)
753{
754    struct page *page;
755    unsigned long obj_idx, off;
756
757    unsigned int class_idx;
758    enum fullness_group fg;
759    struct size_class *class;
760    struct mapping_area *area;
761
762    BUG_ON(!handle);
763
764    obj_handle_to_location(handle, &page, &obj_idx);
765    get_zspage_mapping(get_first_page(page), &class_idx, &fg);
766    class = &pool->size_class[class_idx];
767    off = obj_idx_to_offset(page, obj_idx, class->size);
768
769    area = &get_cpu_var(zs_map_area);
770    if (off + class->size <= PAGE_SIZE) {
771        /* this object is contained entirely within a page */
772        area->vm_addr = kmap_atomic(page);
773        return area->vm_addr + off;
774    }
775
776    /* disable page faults to match kmap_atomic() return conditions */
777    pagefault_disable();
778
779    if (mm != ZS_MM_WO)
780        zs_copy_map_object(area->vm_buf, page, off, class->size);
781    area->vm_addr = NULL;
782    return area->vm_buf;
783}
784EXPORT_SYMBOL_GPL(zs_map_object);
785
786void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
787{
788    struct page *page;
789    unsigned long obj_idx, off;
790
791    unsigned int class_idx;
792    enum fullness_group fg;
793    struct size_class *class;
794    struct mapping_area *area;
795
796    area = &__get_cpu_var(zs_map_area);
797    /* single-page object fastpath */
798    if (area->vm_addr) {
799        kunmap_atomic(area->vm_addr);
800        goto out;
801    }
802
803    /* no write fastpath */
804    if (area->vm_mm == ZS_MM_RO)
805        goto pfenable;
806
807    BUG_ON(!handle);
808
809    obj_handle_to_location(handle, &page, &obj_idx);
810    get_zspage_mapping(get_first_page(page), &class_idx, &fg);
811    class = &pool->size_class[class_idx];
812    off = obj_idx_to_offset(page, obj_idx, class->size);
813
814    zs_copy_unmap_object(area->vm_buf, page, off, class->size);
815
816pfenable:
817    /* enable page faults to match kunmap_atomic() return conditions */
818    pagefault_enable();
819out:
820    put_cpu_var(zs_map_area);
821}
822EXPORT_SYMBOL_GPL(zs_unmap_object);
823
824u64 zs_get_total_size_bytes(struct zs_pool *pool)
825{
826    int i;
827    u64 npages = 0;
828
829    for (i = 0; i < ZS_SIZE_CLASSES; i++)
830        npages += pool->size_class[i].pages_allocated;
831
832    return npages << PAGE_SHIFT;
833}
834EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
835
836module_init(zs_init);
837module_exit(zs_exit);
838
839MODULE_LICENSE("Dual BSD/GPL");
840MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
841

Archive Download this file



interactive