Date:2010-05-26 07:18:31 (12 years 4 months ago)
Author:Nitin Gupta
Commit:43551b5504231e8aa7be6a508916e873f1a7bffc
Message:Rename ramzswap to zram in code

Automated renames in code:
- rzs* -> zram*
- RZS* -> ZRAM*
- ramzswap* -> zram*

Manual changes:
- Edited comments/messages mentioning "swap"

Signed-off-by: Nitin Gupta <ngupta@vflare.org>
Files: drivers/staging/zram/zram_drv.c (25 diffs)
drivers/staging/zram/zram_drv.h (7 diffs)
drivers/staging/zram/zram_ioctl.h (3 diffs)

Change Details

drivers/staging/zram/zram_drv.c
11/*
2 * Compressed RAM based swap device
2 * Compressed RAM block device
33 *
44 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
55 *
...... 
1212 * Project home: http://compcache.googlecode.com
1313 */
1414
15#define KMSG_COMPONENT "ramzswap"
15#define KMSG_COMPONENT "zram"
1616#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
1717
1818#include <linux/module.h>
...... 
2626#include <linux/slab.h>
2727#include <linux/lzo.h>
2828#include <linux/string.h>
29#include <linux/swap.h>
30#include <linux/swapops.h>
3129#include <linux/vmalloc.h>
3230
3331#include "zram_drv.h"
3432
3533/* Globals */
36static int ramzswap_major;
37static struct ramzswap *devices;
34static int zram_major;
35static struct zram *devices;
3836
3937/* Module params (documentation at end) */
4038static unsigned int num_devices;
4139
42static int rzs_test_flag(struct ramzswap *rzs, u32 index,
43            enum rzs_pageflags flag)
40static int zram_test_flag(struct zram *zram, u32 index,
41            enum zram_pageflags flag)
4442{
45    return rzs->table[index].flags & BIT(flag);
43    return zram->table[index].flags & BIT(flag);
4644}
4745
48static void rzs_set_flag(struct ramzswap *rzs, u32 index,
49            enum rzs_pageflags flag)
46static void zram_set_flag(struct zram *zram, u32 index,
47            enum zram_pageflags flag)
5048{
51    rzs->table[index].flags |= BIT(flag);
49    zram->table[index].flags |= BIT(flag);
5250}
5351
54static void rzs_clear_flag(struct ramzswap *rzs, u32 index,
55            enum rzs_pageflags flag)
52static void zram_clear_flag(struct zram *zram, u32 index,
53            enum zram_pageflags flag)
5654{
57    rzs->table[index].flags &= ~BIT(flag);
55    zram->table[index].flags &= ~BIT(flag);
5856}
5957
6058static int page_zero_filled(void *ptr)
...... 
7270    return 1;
7371}
7472
75static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
73static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
7674{
77    if (!rzs->disksize) {
75    if (!zram->disksize) {
7876        pr_info(
7977        "disk size not provided. You can use disksize_kb module "
8078        "param to specify size.\nUsing default: (%u%% of RAM).\n",
8179        default_disksize_perc_ram
8280        );
83        rzs->disksize = default_disksize_perc_ram *
81        zram->disksize = default_disksize_perc_ram *
8482                    (totalram_bytes / 100);
8583    }
8684
87    if (rzs->disksize > 2 * (totalram_bytes)) {
85    if (zram->disksize > 2 * (totalram_bytes)) {
8886        pr_info(
89        "There is little point creating a ramzswap of greater than "
87        "There is little point creating a zram of greater than "
9088        "twice the size of memory since we expect a 2:1 compression "
91        "ratio. Note that ramzswap uses about 0.1%% of the size of "
92        "the swap device when not in use so a huge ramzswap is "
89        "ratio. Note that zram uses about 0.1%% of the size of "
90        "the disk when not in use so a huge zram is "
9391        "wasteful.\n"
9492        "\tMemory Size: %zu kB\n"
9593        "\tSize you selected: %zu kB\n"
9694        "Continuing anyway ...\n",
97        totalram_bytes >> 10, rzs->disksize
95        totalram_bytes >> 10, zram->disksize
9896        );
9997    }
10098
101    rzs->disksize &= PAGE_MASK;
99    zram->disksize &= PAGE_MASK;
102100}
103101
104static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
105            struct ramzswap_ioctl_stats *s)
102static void zram_ioctl_get_stats(struct zram *zram,
103            struct zram_ioctl_stats *s)
106104{
107    s->disksize = rzs->disksize;
105    s->disksize = zram->disksize;
108106
109#if defined(CONFIG_RAMZSWAP_STATS)
107#if defined(CONFIG_ZRAM_STATS)
110108    {
111    struct ramzswap_stats *rs = &rzs->stats;
109    struct zram_stats *rs = &zram->stats;
112110    size_t succ_writes, mem_used;
113111    unsigned int good_compress_perc = 0, no_compress_perc = 0;
114112
115    mem_used = xv_get_total_size_bytes(rzs->mem_pool)
113    mem_used = xv_get_total_size_bytes(zram->mem_pool)
116114            + (rs->pages_expand << PAGE_SHIFT);
117    succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
118            rzs_stat64_read(rzs, &rs->failed_writes);
115    succ_writes = zram_stat64_read(zram, &rs->num_writes) -
116            zram_stat64_read(zram, &rs->failed_writes);
119117
120118    if (succ_writes && rs->pages_stored) {
121119        good_compress_perc = rs->good_compress * 100
...... 
124122                    / rs->pages_stored;
125123    }
126124
127    s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
128    s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
129    s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
130    s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
131    s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
132    s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
125    s->num_reads = zram_stat64_read(zram, &rs->num_reads);
126    s->num_writes = zram_stat64_read(zram, &rs->num_writes);
127    s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
128    s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
129    s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
130    s->notify_free = zram_stat64_read(zram, &rs->notify_free);
133131    s->pages_zero = rs->pages_zero;
134132
135133    s->good_compress_pct = good_compress_perc;
...... 
141139    s->compr_data_size = rs->compr_size;
142140    s->mem_used_total = mem_used;
143141    }
144#endif /* CONFIG_RAMZSWAP_STATS */
142#endif /* CONFIG_ZRAM_STATS */
145143}
146144
147static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
145static void zram_free_page(struct zram *zram, size_t index)
148146{
149147    u32 clen;
150148    void *obj;
151149
152    struct page *page = rzs->table[index].page;
153    u32 offset = rzs->table[index].offset;
150    struct page *page = zram->table[index].page;
151    u32 offset = zram->table[index].offset;
154152
155153    if (unlikely(!page)) {
156154        /*
157155         * No memory is allocated for zero filled pages.
158156         * Simply clear zero page flag.
159157         */
160        if (rzs_test_flag(rzs, index, RZS_ZERO)) {
161            rzs_clear_flag(rzs, index, RZS_ZERO);
162            rzs_stat_dec(&rzs->stats.pages_zero);
158        if (zram_test_flag(zram, index, ZRAM_ZERO)) {
159            zram_clear_flag(zram, index, ZRAM_ZERO);
160            zram_stat_dec(&zram->stats.pages_zero);
163161        }
164162        return;
165163    }
166164
167    if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
165    if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
168166        clen = PAGE_SIZE;
169167        __free_page(page);
170        rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
171        rzs_stat_dec(&rzs->stats.pages_expand);
168        zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
169        zram_stat_dec(&zram->stats.pages_expand);
172170        goto out;
173171    }
174172
...... 
176174    clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
177175    kunmap_atomic(obj, KM_USER0);
178176
179    xv_free(rzs->mem_pool, page, offset);
177    xv_free(zram->mem_pool, page, offset);
180178    if (clen <= PAGE_SIZE / 2)
181        rzs_stat_dec(&rzs->stats.good_compress);
179        zram_stat_dec(&zram->stats.good_compress);
182180
183181out:
184    rzs->stats.compr_size -= clen;
185    rzs_stat_dec(&rzs->stats.pages_stored);
182    zram->stats.compr_size -= clen;
183    zram_stat_dec(&zram->stats.pages_stored);
186184
187    rzs->table[index].page = NULL;
188    rzs->table[index].offset = 0;
185    zram->table[index].page = NULL;
186    zram->table[index].offset = 0;
189187}
190188
191189static void handle_zero_page(struct page *page)
...... 
199197    flush_dcache_page(page);
200198}
201199
202static void handle_uncompressed_page(struct ramzswap *rzs,
200static void handle_uncompressed_page(struct zram *zram,
203201                struct page *page, u32 index)
204202{
205203    unsigned char *user_mem, *cmem;
206204
207205    user_mem = kmap_atomic(page, KM_USER0);
208    cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
209            rzs->table[index].offset;
206    cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
207            zram->table[index].offset;
210208
211209    memcpy(user_mem, cmem, PAGE_SIZE);
212210    kunmap_atomic(user_mem, KM_USER0);
...... 
215213    flush_dcache_page(page);
216214}
217215
218static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
216static int zram_read(struct zram *zram, struct bio *bio)
219217{
220218
221219    int i;
222220    u32 index;
223221    struct bio_vec *bvec;
224222
225    rzs_stat64_inc(rzs, &rzs->stats.num_reads);
223    zram_stat64_inc(zram, &zram->stats.num_reads);
226224
227225    index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
228226    bio_for_each_segment(bvec, bio, i) {
...... 
234232
235233        page = bvec->bv_page;
236234
237        if (rzs_test_flag(rzs, index, RZS_ZERO)) {
235        if (zram_test_flag(zram, index, ZRAM_ZERO)) {
238236            handle_zero_page(page);
239237            continue;
240238        }
241239
242240        /* Requested page is not present in compressed area */
243        if (unlikely(!rzs->table[index].page)) {
241        if (unlikely(!zram->table[index].page)) {
244242            pr_debug("Read before write: sector=%lu, size=%u",
245243                (ulong)(bio->bi_sector), bio->bi_size);
246244            /* Do nothing */
...... 
248246        }
249247
250248        /* Page is stored uncompressed since it's incompressible */
251        if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
252            handle_uncompressed_page(rzs, page, index);
249        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
250            handle_uncompressed_page(zram, page, index);
253251            continue;
254252        }
255253
256254        user_mem = kmap_atomic(page, KM_USER0);
257255        clen = PAGE_SIZE;
258256
259        cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
260                rzs->table[index].offset;
257        cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
258                zram->table[index].offset;
261259
262260        ret = lzo1x_decompress_safe(
263261            cmem + sizeof(*zheader),
...... 
271269        if (unlikely(ret != LZO_E_OK)) {
272270            pr_err("Decompression failed! err=%d, page=%u\n",
273271                ret, index);
274            rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
272            zram_stat64_inc(zram, &zram->stats.failed_reads);
275273            goto out;
276274        }
277275
...... 
288286    return 0;
289287}
290288
291static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
289static int zram_write(struct zram *zram, struct bio *bio)
292290{
293291    int i;
294292    u32 index;
295293    struct bio_vec *bvec;
296294
297    rzs_stat64_inc(rzs, &rzs->stats.num_writes);
295    zram_stat64_inc(zram, &zram->stats.num_writes);
298296
299297    index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
300298
...... 
307305        unsigned char *user_mem, *cmem, *src;
308306
309307        page = bvec->bv_page;
310        src = rzs->compress_buffer;
308        src = zram->compress_buffer;
311309
312310        /*
313311         * System overwrites unused sectors. Free memory associated
314312         * with this sector now.
315313         */
316        if (rzs->table[index].page ||
317                rzs_test_flag(rzs, index, RZS_ZERO))
318            ramzswap_free_page(rzs, index);
314        if (zram->table[index].page ||
315                zram_test_flag(zram, index, ZRAM_ZERO))
316            zram_free_page(zram, index);
319317
320        mutex_lock(&rzs->lock);
318        mutex_lock(&zram->lock);
321319
322320        user_mem = kmap_atomic(page, KM_USER0);
323321        if (page_zero_filled(user_mem)) {
324322            kunmap_atomic(user_mem, KM_USER0);
325            mutex_unlock(&rzs->lock);
326            rzs_stat_inc(&rzs->stats.pages_zero);
327            rzs_set_flag(rzs, index, RZS_ZERO);
323            mutex_unlock(&zram->lock);
324            zram_stat_inc(&zram->stats.pages_zero);
325            zram_set_flag(zram, index, ZRAM_ZERO);
328326            continue;
329327        }
330328
331329        ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
332                    rzs->compress_workmem);
330                    zram->compress_workmem);
333331
334332        kunmap_atomic(user_mem, KM_USER0);
335333
336334        if (unlikely(ret != LZO_E_OK)) {
337            mutex_unlock(&rzs->lock);
335            mutex_unlock(&zram->lock);
338336            pr_err("Compression failed! err=%d\n", ret);
339            rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
337            zram_stat64_inc(zram, &zram->stats.failed_writes);
340338            goto out;
341339        }
342340
343341        /*
344342         * Page is incompressible. Store it as-is (uncompressed)
345         * since we do not want to return too many swap write
343         * since we do not want to return too many disk write
346344         * errors which has side effect of hanging the system.
347345         */
348346        if (unlikely(clen > max_zpage_size)) {
349347            clen = PAGE_SIZE;
350348            page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
351349            if (unlikely(!page_store)) {
352                mutex_unlock(&rzs->lock);
350                mutex_unlock(&zram->lock);
353351                pr_info("Error allocating memory for "
354352                    "incompressible page: %u\n", index);
355                rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
353                zram_stat64_inc(zram,
354                    &zram->stats.failed_writes);
356355                goto out;
357356            }
358357
359358            offset = 0;
360            rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
361            rzs_stat_inc(&rzs->stats.pages_expand);
362            rzs->table[index].page = page_store;
359            zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
360            zram_stat_inc(&zram->stats.pages_expand);
361            zram->table[index].page = page_store;
363362            src = kmap_atomic(page, KM_USER0);
364363            goto memstore;
365364        }
366365
367        if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader),
368                &rzs->table[index].page, &offset,
366        if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
367                &zram->table[index].page, &offset,
369368                GFP_NOIO | __GFP_HIGHMEM)) {
370            mutex_unlock(&rzs->lock);
369            mutex_unlock(&zram->lock);
371370            pr_info("Error allocating memory for compressed "
372371                "page: %u, size=%zu\n", index, clen);
373            rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
372            zram_stat64_inc(zram, &zram->stats.failed_writes);
374373            goto out;
375374        }
376375
377376memstore:
378        rzs->table[index].offset = offset;
377        zram->table[index].offset = offset;
379378
380        cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
381                rzs->table[index].offset;
379        cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
380                zram->table[index].offset;
382381
383382#if 0
384383        /* Back-reference needed for memory defragmentation */
385        if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) {
384        if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
386385            zheader = (struct zobj_header *)cmem;
387386            zheader->table_idx = index;
388387            cmem += sizeof(*zheader);
...... 
392391        memcpy(cmem, src, clen);
393392
394393        kunmap_atomic(cmem, KM_USER1);
395        if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
394        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
396395            kunmap_atomic(src, KM_USER0);
397396
398397        /* Update stats */
399        rzs->stats.compr_size += clen;
400        rzs_stat_inc(&rzs->stats.pages_stored);
398        zram->stats.compr_size += clen;
399        zram_stat_inc(&zram->stats.pages_stored);
401400        if (clen <= PAGE_SIZE / 2)
402            rzs_stat_inc(&rzs->stats.good_compress);
401            zram_stat_inc(&zram->stats.good_compress);
403402
404        mutex_unlock(&rzs->lock);
403        mutex_unlock(&zram->lock);
405404        index++;
406405    }
407406
...... 
417416/*
418417 * Check if request is within bounds and page aligned.
419418 */
420static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio)
419static inline int valid_io_request(struct zram *zram, struct bio *bio)
421420{
422421    if (unlikely(
423        (bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) ||
422        (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
424423        (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
425424        (bio->bi_size & (PAGE_SIZE - 1)))) {
426425
...... 
432431}
433432
434433/*
435 * Handler function for all ramzswap I/O requests.
434 * Handler function for all zram I/O requests.
436435 */
437static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
436static int zram_make_request(struct request_queue *queue, struct bio *bio)
438437{
439438    int ret = 0;
440    struct ramzswap *rzs = queue->queuedata;
439    struct zram *zram = queue->queuedata;
441440
442    if (unlikely(!rzs->init_done)) {
441    if (unlikely(!zram->init_done)) {
443442        bio_io_error(bio);
444443        return 0;
445444    }
446445
447    if (!valid_io_request(rzs, bio)) {
448        rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
446    if (!valid_io_request(zram, bio)) {
447        zram_stat64_inc(zram, &zram->stats.invalid_io);
449448        bio_io_error(bio);
450449        return 0;
451450    }
452451
453452    switch (bio_data_dir(bio)) {
454453    case READ:
455        ret = ramzswap_read(rzs, bio);
454        ret = zram_read(zram, bio);
456455        break;
457456
458457    case WRITE:
459        ret = ramzswap_write(rzs, bio);
458        ret = zram_write(zram, bio);
460459        break;
461460    }
462461
463462    return ret;
464463}
465464
466static void reset_device(struct ramzswap *rzs)
465static void reset_device(struct zram *zram)
467466{
468467    size_t index;
469468
470469    /* Do not accept any new I/O request */
471    rzs->init_done = 0;
470    zram->init_done = 0;
472471
473472    /* Free various per-device buffers */
474    kfree(rzs->compress_workmem);
475    free_pages((unsigned long)rzs->compress_buffer, 1);
473    kfree(zram->compress_workmem);
474    free_pages((unsigned long)zram->compress_buffer, 1);
476475
477    rzs->compress_workmem = NULL;
478    rzs->compress_buffer = NULL;
476    zram->compress_workmem = NULL;
477    zram->compress_buffer = NULL;
479478
480    /* Free all pages that are still in this ramzswap device */
481    for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
479    /* Free all pages that are still in this zram device */
480    for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
482481        struct page *page;
483482        u16 offset;
484483
485        page = rzs->table[index].page;
486        offset = rzs->table[index].offset;
484        page = zram->table[index].page;
485        offset = zram->table[index].offset;
487486
488487        if (!page)
489488            continue;
490489
491        if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
490        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
492491            __free_page(page);
493492        else
494            xv_free(rzs->mem_pool, page, offset);
493            xv_free(zram->mem_pool, page, offset);
495494    }
496495
497    vfree(rzs->table);
498    rzs->table = NULL;
496    vfree(zram->table);
497    zram->table = NULL;
499498
500    xv_destroy_pool(rzs->mem_pool);
501    rzs->mem_pool = NULL;
499    xv_destroy_pool(zram->mem_pool);
500    zram->mem_pool = NULL;
502501
503502    /* Reset stats */
504    memset(&rzs->stats, 0, sizeof(rzs->stats));
503    memset(&zram->stats, 0, sizeof(zram->stats));
505504
506    rzs->disksize = 0;
505    zram->disksize = 0;
507506}
508507
509static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
508static int zram_ioctl_init_device(struct zram *zram)
510509{
511510    int ret;
512511    size_t num_pages;
513512
514    if (rzs->init_done) {
513    if (zram->init_done) {
515514        pr_info("Device already initialized!\n");
516515        return -EBUSY;
517516    }
518517
519    ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
518    zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
520519
521    rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
522    if (!rzs->compress_workmem) {
520    zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
521    if (!zram->compress_workmem) {
523522        pr_err("Error allocating compressor working memory!\n");
524523        ret = -ENOMEM;
525524        goto fail;
526525    }
527526
528    rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
529    if (!rzs->compress_buffer) {
527    zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
528    if (!zram->compress_buffer) {
530529        pr_err("Error allocating compressor buffer space\n");
531530        ret = -ENOMEM;
532531        goto fail;
533532    }
534533
535    num_pages = rzs->disksize >> PAGE_SHIFT;
536    rzs->table = vmalloc(num_pages * sizeof(*rzs->table));
537    if (!rzs->table) {
538        pr_err("Error allocating ramzswap address table\n");
534    num_pages = zram->disksize >> PAGE_SHIFT;
535    zram->table = vmalloc(num_pages * sizeof(*zram->table));
536    if (!zram->table) {
537        pr_err("Error allocating zram address table\n");
539538        /* To prevent accessing table entries during cleanup */
540        rzs->disksize = 0;
539        zram->disksize = 0;
541540        ret = -ENOMEM;
542541        goto fail;
543542    }
544    memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
543    memset(zram->table, 0, num_pages * sizeof(*zram->table));
545544
546    set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
545    set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
547546
548    /* ramzswap devices sort of resembles non-rotational disks */
549    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
547    /* zram devices sort of resembles non-rotational disks */
548    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
550549
551    rzs->mem_pool = xv_create_pool();
552    if (!rzs->mem_pool) {
550    zram->mem_pool = xv_create_pool();
551    if (!zram->mem_pool) {
553552        pr_err("Error creating memory pool\n");
554553        ret = -ENOMEM;
555554        goto fail;
556555    }
557556
558    rzs->init_done = 1;
557    zram->init_done = 1;
559558
560559    pr_debug("Initialization done!\n");
561560    return 0;
562561
563562fail:
564    reset_device(rzs);
563    reset_device(zram);
565564
566565    pr_err("Initialization failed: err=%d\n", ret);
567566    return ret;
568567}
569568
570static int ramzswap_ioctl_reset_device(struct ramzswap *rzs)
569static int zram_ioctl_reset_device(struct zram *zram)
571570{
572    if (rzs->init_done)
573        reset_device(rzs);
571    if (zram->init_done)
572        reset_device(zram);
574573
575574    return 0;
576575}
577576
578static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
577static int zram_ioctl(struct block_device *bdev, fmode_t mode,
579578            unsigned int cmd, unsigned long arg)
580579{
581580    int ret = 0;
582581    size_t disksize_kb;
583582
584    struct ramzswap *rzs = bdev->bd_disk->private_data;
583    struct zram *zram = bdev->bd_disk->private_data;
585584
586585    switch (cmd) {
587    case RZSIO_SET_DISKSIZE_KB:
588        if (rzs->init_done) {
586    case ZRAMIO_SET_DISKSIZE_KB:
587        if (zram->init_done) {
589588            ret = -EBUSY;
590589            goto out;
591590        }
...... 
594593            ret = -EFAULT;
595594            goto out;
596595        }
597        rzs->disksize = disksize_kb << 10;
596        zram->disksize = disksize_kb << 10;
598597        pr_info("Disk size set to %zu kB\n", disksize_kb);
599598        break;
600599
601    case RZSIO_GET_STATS:
600    case ZRAMIO_GET_STATS:
602601    {
603        struct ramzswap_ioctl_stats *stats;
604        if (!rzs->init_done) {
602        struct zram_ioctl_stats *stats;
603        if (!zram->init_done) {
605604            ret = -ENOTTY;
606605            goto out;
607606        }
...... 
610609            ret = -ENOMEM;
611610            goto out;
612611        }
613        ramzswap_ioctl_get_stats(rzs, stats);
612        zram_ioctl_get_stats(zram, stats);
614613        if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
615614            kfree(stats);
616615            ret = -EFAULT;
...... 
619618        kfree(stats);
620619        break;
621620    }
622    case RZSIO_INIT:
623        ret = ramzswap_ioctl_init_device(rzs);
621    case ZRAMIO_INIT:
622        ret = zram_ioctl_init_device(zram);
624623        break;
625624
626    case RZSIO_RESET:
625    case ZRAMIO_RESET:
627626        /* Do not reset an active device! */
628627        if (bdev->bd_holders) {
629628            ret = -EBUSY;
...... 
634633        if (bdev)
635634            fsync_bdev(bdev);
636635
637        ret = ramzswap_ioctl_reset_device(rzs);
636        ret = zram_ioctl_reset_device(zram);
638637        break;
639638
640639    default:
...... 
646645    return ret;
647646}
648647
649void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
648void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
650649{
651    struct ramzswap *rzs;
650    struct zram *zram;
652651
653    rzs = bdev->bd_disk->private_data;
654    ramzswap_free_page(rzs, index);
655    rzs_stat64_inc(rzs, &rzs->stats.notify_free);
652    zram = bdev->bd_disk->private_data;
653    zram_free_page(zram, index);
654    zram_stat64_inc(zram, &zram->stats.notify_free);
656655}
657656
658static const struct block_device_operations ramzswap_devops = {
659    .ioctl = ramzswap_ioctl,
660    .swap_slot_free_notify = ramzswap_slot_free_notify,
657static const struct block_device_operations zram_devops = {
658    .ioctl = zram_ioctl,
659    .swap_slot_free_notify = zram_slot_free_notify,
661660    .owner = THIS_MODULE
662661};
663662
664static int create_device(struct ramzswap *rzs, int device_id)
663static int create_device(struct zram *zram, int device_id)
665664{
666665    int ret = 0;
667666
668    mutex_init(&rzs->lock);
669    spin_lock_init(&rzs->stat64_lock);
667    mutex_init(&zram->lock);
668    spin_lock_init(&zram->stat64_lock);
670669
671    rzs->queue = blk_alloc_queue(GFP_KERNEL);
672    if (!rzs->queue) {
670    zram->queue = blk_alloc_queue(GFP_KERNEL);
671    if (!zram->queue) {
673672        pr_err("Error allocating disk queue for device %d\n",
674673            device_id);
675674        ret = -ENOMEM;
676675        goto out;
677676    }
678677
679    blk_queue_make_request(rzs->queue, ramzswap_make_request);
680    rzs->queue->queuedata = rzs;
678    blk_queue_make_request(zram->queue, zram_make_request);
679    zram->queue->queuedata = zram;
681680
682681     /* gendisk structure */
683    rzs->disk = alloc_disk(1);
684    if (!rzs->disk) {
685        blk_cleanup_queue(rzs->queue);
682    zram->disk = alloc_disk(1);
683    if (!zram->disk) {
684        blk_cleanup_queue(zram->queue);
686685        pr_warning("Error allocating disk structure for device %d\n",
687686            device_id);
688687        ret = -ENOMEM;
689688        goto out;
690689    }
691690
692    rzs->disk->major = ramzswap_major;
693    rzs->disk->first_minor = device_id;
694    rzs->disk->fops = &ramzswap_devops;
695    rzs->disk->queue = rzs->queue;
696    rzs->disk->private_data = rzs;
697    snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
691    zram->disk->major = zram_major;
692    zram->disk->first_minor = device_id;
693    zram->disk->fops = &zram_devops;
694    zram->disk->queue = zram->queue;
695    zram->disk->private_data = zram;
696    snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
698697
699    /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
700    set_capacity(rzs->disk, 0);
698    /* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
699    set_capacity(zram->disk, 0);
701700
702701    /*
703702     * To ensure that we always get PAGE_SIZE aligned
704703     * and n*PAGE_SIZED sized I/O requests.
705704     */
706    blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
707    blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE);
708    blk_queue_io_min(rzs->disk->queue, PAGE_SIZE);
709    blk_queue_io_opt(rzs->disk->queue, PAGE_SIZE);
705    blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
706    blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
707    blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
708    blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
710709
711    add_disk(rzs->disk);
710    add_disk(zram->disk);
712711
713    rzs->init_done = 0;
712    zram->init_done = 0;
714713
715714out:
716715    return ret;
717716}
718717
719static void destroy_device(struct ramzswap *rzs)
718static void destroy_device(struct zram *zram)
720719{
721    if (rzs->disk) {
722        del_gendisk(rzs->disk);
723        put_disk(rzs->disk);
720    if (zram->disk) {
721        del_gendisk(zram->disk);
722        put_disk(zram->disk);
724723    }
725724
726    if (rzs->queue)
727        blk_cleanup_queue(rzs->queue);
725    if (zram->queue)
726        blk_cleanup_queue(zram->queue);
728727}
729728
730static int __init ramzswap_init(void)
729static int __init zram_init(void)
731730{
732731    int ret, dev_id;
733732
...... 
738737        goto out;
739738    }
740739
741    ramzswap_major = register_blkdev(0, "ramzswap");
742    if (ramzswap_major <= 0) {
740    zram_major = register_blkdev(0, "zram");
741    if (zram_major <= 0) {
743742        pr_warning("Unable to get major number\n");
744743        ret = -EBUSY;
745744        goto out;
...... 
752751
753752    /* Allocate the device array and initialize each one */
754753    pr_info("Creating %u devices ...\n", num_devices);
755    devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL);
754    devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
756755    if (!devices) {
757756        ret = -ENOMEM;
758757        goto unregister;
...... 
770769    while (dev_id)
771770        destroy_device(&devices[--dev_id]);
772771unregister:
773    unregister_blkdev(ramzswap_major, "ramzswap");
772    unregister_blkdev(zram_major, "zram");
774773out:
775774    return ret;
776775}
777776
778static void __exit ramzswap_exit(void)
777static void __exit zram_exit(void)
779778{
780779    int i;
781    struct ramzswap *rzs;
780    struct zram *zram;
782781
783782    for (i = 0; i < num_devices; i++) {
784        rzs = &devices[i];
783        zram = &devices[i];
785784
786        destroy_device(rzs);
787        if (rzs->init_done)
788            reset_device(rzs);
785        destroy_device(zram);
786        if (zram->init_done)
787            reset_device(zram);
789788    }
790789
791    unregister_blkdev(ramzswap_major, "ramzswap");
790    unregister_blkdev(zram_major, "zram");
792791
793792    kfree(devices);
794793    pr_debug("Cleanup done!\n");
795794}
796795
797796module_param(num_devices, uint, 0);
798MODULE_PARM_DESC(num_devices, "Number of ramzswap devices");
797MODULE_PARM_DESC(num_devices, "Number of zram devices");
799798
800module_init(ramzswap_init);
801module_exit(ramzswap_exit);
799module_init(zram_init);
800module_exit(zram_exit);
802801
803802MODULE_LICENSE("Dual BSD/GPL");
804803MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
805MODULE_DESCRIPTION("Compressed RAM Based Swap Device");
804MODULE_DESCRIPTION("Compressed RAM Block Device");
drivers/staging/zram/zram_drv.h
11/*
2 * Compressed RAM based swap device
2 * Compressed RAM block device
33 *
44 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
55 *
...... 
1212 * Project home: http://compcache.googlecode.com
1313 */
1414
15#ifndef _RAMZSWAP_DRV_H_
16#define _RAMZSWAP_DRV_H_
15#ifndef _ZRAM_DRV_H_
16#define _ZRAM_DRV_H_
1717
1818#include <linux/spinlock.h>
1919#include <linux/mutex.h>
...... 
4141
4242/*-- Configurable parameters */
4343
44/* Default ramzswap disk size: 25% of total RAM */
44/* Default zram disk size: 25% of total RAM */
4545static const unsigned default_disksize_perc_ram = 25;
4646
4747/*
...... 
6363#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
6464#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
6565
66/* Flags for ramzswap pages (table[page_no].flags) */
67enum rzs_pageflags {
66/* Flags for zram pages (table[page_no].flags) */
67enum zram_pageflags {
6868    /* Page is stored uncompressed */
69    RZS_UNCOMPRESSED,
69    ZRAM_UNCOMPRESSED,
7070
7171    /* Page consists entirely of zeros */
72    RZS_ZERO,
72    ZRAM_ZERO,
7373
74    __NR_RZS_PAGEFLAGS,
74    __NR_ZRAM_PAGEFLAGS,
7575};
7676
7777/*-- Data structures */
7878
79/*
80 * Allocated for each swap slot, indexed by page no.
81 * These table entries must fit exactly in a page.
82 */
79/* Allocated for each disk page */
8380struct table {
8481    struct page *page;
8582    u16 offset;
...... 
8784    u8 flags;
8885} __attribute__((aligned(4)));
8986
90struct ramzswap_stats {
87struct zram_stats {
9188    /* basic stats */
9289    size_t compr_size; /* compressed size of pages stored -
9390                 * needed to enforce memlimit */
9491    /* more stats */
95#if defined(CONFIG_RAMZSWAP_STATS)
92#if defined(CONFIG_ZRAM_STATS)
9693    u64 num_reads; /* failed + successful */
9794    u64 num_writes; /* --do-- */
9895    u64 failed_reads; /* should NEVER! happen */
9996    u64 failed_writes; /* can happen when memory is too low */
100    u64 invalid_io; /* non-swap I/O requests */
97    u64 invalid_io; /* non-page-aligned I/O requests */
10198    u64 notify_free; /* no. of swap slot free notifications */
10299    u32 pages_zero; /* no. of zero filled pages */
103100    u32 pages_stored; /* no. of pages currently stored */
...... 
106103#endif
107104};
108105
109struct ramzswap {
106struct zram {
110107    struct xv_pool *mem_pool;
111108    void *compress_workmem;
112109    void *compress_buffer;
...... 
118115    struct gendisk *disk;
119116    int init_done;
120117    /*
121     * This is limit on amount of *uncompressed* worth of data
122     * we can hold. When backing swap device is provided, it is
123     * set equal to device size.
118     * This is the limit on amount of *uncompressed* worth of data
119     * we can store in a disk.
124120     */
125121    size_t disksize; /* bytes */
126122
127    struct ramzswap_stats stats;
123    struct zram_stats stats;
128124};
129125
130126/*-- */
131127
132128/* Debugging and Stats */
133#if defined(CONFIG_RAMZSWAP_STATS)
134static void rzs_stat_inc(u32 *v)
129#if defined(CONFIG_ZRAM_STATS)
130static void zram_stat_inc(u32 *v)
135131{
136132    *v = *v + 1;
137133}
138134
139static void rzs_stat_dec(u32 *v)
135static void zram_stat_dec(u32 *v)
140136{
141137    *v = *v - 1;
142138}
143139
144static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
140static void zram_stat64_inc(struct zram *zram, u64 *v)
145141{
146    spin_lock(&rzs->stat64_lock);
142    spin_lock(&zram->stat64_lock);
147143    *v = *v + 1;
148    spin_unlock(&rzs->stat64_lock);
144    spin_unlock(&zram->stat64_lock);
149145}
150146
151static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
147static u64 zram_stat64_read(struct zram *zram, u64 *v)
152148{
153149    u64 val;
154150
155    spin_lock(&rzs->stat64_lock);
151    spin_lock(&zram->stat64_lock);
156152    val = *v;
157    spin_unlock(&rzs->stat64_lock);
153    spin_unlock(&zram->stat64_lock);
158154
159155    return val;
160156}
161157#else
162#define rzs_stat_inc(v)
163#define rzs_stat_dec(v)
164#define rzs_stat64_inc(r, v)
165#define rzs_stat64_read(r, v)
166#endif /* CONFIG_RAMZSWAP_STATS */
158#define zram_stat_inc(v)
159#define zram_stat_dec(v)
160#define zram_stat64_inc(r, v)
161#define zram_stat64_read(r, v)
162#endif /* CONFIG_ZRAM_STATS */
167163
168164#endif
drivers/staging/zram/zram_ioctl.h
11/*
2 * Compressed RAM based swap device
2 * Compressed RAM block device
33 *
44 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
55 *
...... 
1212 * Project home: http://compcache.googlecode.com
1313 */
1414
15#ifndef _RAMZSWAP_IOCTL_H_
16#define _RAMZSWAP_IOCTL_H_
15#ifndef _ZRAM_IOCTL_H_
16#define _ZRAM_IOCTL_H_
1717
18struct ramzswap_ioctl_stats {
19    u64 disksize; /* user specified or equal to backing swap
20                 * size (if present) */
18struct zram_ioctl_stats {
19    u64 disksize; /* disksize in bytes (user specifies in KB) */
2120    u64 num_reads; /* failed + successful */
2221    u64 num_writes; /* --do-- */
2322    u64 failed_reads; /* should NEVER! happen */
2423    u64 failed_writes; /* can happen when memory is too low */
25    u64 invalid_io; /* non-swap I/O requests */
24    u64 invalid_io; /* non-page-aligned I/O requests */
2625    u64 notify_free; /* no. of swap slot free notifications */
2726    u32 pages_zero; /* no. of zero filled pages */
2827    u32 good_compress_pct; /* no. of pages with compression ratio<=50% */
...... 
3433    u64 mem_used_total;
3534} __attribute__ ((packed, aligned(4)));
3635
37#define RZSIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
38#define RZSIO_GET_STATS _IOR('z', 1, struct ramzswap_ioctl_stats)
39#define RZSIO_INIT _IO('z', 2)
40#define RZSIO_RESET _IO('z', 3)
36#define ZRAMIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
37#define ZRAMIO_GET_STATS _IOR('z', 1, struct zram_ioctl_stats)
38#define ZRAMIO_INIT _IO('z', 2)
39#define ZRAMIO_RESET _IO('z', 3)
4140
4241#endif

Archive Download the corresponding diff file



interactive