Root/drivers/misc/vmw_balloon.c

1/*
2 * VMware Balloon driver.
3 *
4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
21 */
22
23/*
24 * This is VMware physical memory management driver for Linux. The driver
25 * acts like a "balloon" that can be inflated to reclaim physical pages by
26 * reserving them in the guest and invalidating them in the monitor,
27 * freeing up the underlying machine pages so they can be allocated to
28 * other guests. The balloon can also be deflated to allow the guest to
29 * use more physical memory. Higher level policies can control the sizes
30 * of balloons in VMs in order to manage physical memory resources.
31 */
32
33//#define DEBUG
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/sched.h>
40#include <linux/module.h>
41#include <linux/workqueue.h>
42#include <linux/debugfs.h>
43#include <linux/seq_file.h>
44#include <asm/hypervisor.h>
45
46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.3-k");
49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL");
52
53/*
54 * Various constants controlling rate of inflaint/deflating balloon,
55 * measured in pages.
56 */
57
58/*
59 * Rate of allocating memory when there is no memory pressure
60 * (driver performs non-sleeping allocations).
61 */
62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
63
64/*
65 * Rates of memory allocaton when guest experiences memory pressure
66 * (driver performs sleeping allocations).
67 */
68#define VMW_BALLOON_RATE_ALLOC_MIN 512U
69#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
70#define VMW_BALLOON_RATE_ALLOC_INC 16U
71
72/*
73 * Rates for releasing pages while deflating balloon.
74 */
75#define VMW_BALLOON_RATE_FREE_MIN 512U
76#define VMW_BALLOON_RATE_FREE_MAX 16384U
77#define VMW_BALLOON_RATE_FREE_INC 16U
78
79/*
80 * When guest is under memory pressure, use a reduced page allocation
81 * rate for next several cycles.
82 */
83#define VMW_BALLOON_SLOW_CYCLES 4
84
85/*
86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
88 * __GFP_NOWARN, to suppress page allocation failure warnings.
89 */
90#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
91
92/*
93 * Use GFP_HIGHUSER when executing in a separate kernel thread
94 * context and allocation can sleep. This is less stressful to
95 * the guest memory system, since it allows the thread to block
96 * while memory is reclaimed, and won't take pages from emergency
97 * low-memory pools.
98 */
99#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD 1024
103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED 16
106
107/*
108 * Hypervisor communication port definitions.
109 */
110#define VMW_BALLOON_HV_PORT 0x5670
111#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
112#define VMW_BALLOON_PROTOCOL_VERSION 2
113#define VMW_BALLOON_GUEST_ID 1 /* Linux */
114
115#define VMW_BALLOON_CMD_START 0
116#define VMW_BALLOON_CMD_GET_TARGET 1
117#define VMW_BALLOON_CMD_LOCK 2
118#define VMW_BALLOON_CMD_UNLOCK 3
119#define VMW_BALLOON_CMD_GUEST_ID 4
120
121/* error codes */
122#define VMW_BALLOON_SUCCESS 0
123#define VMW_BALLOON_FAILURE -1
124#define VMW_BALLOON_ERROR_CMD_INVALID 1
125#define VMW_BALLOON_ERROR_PPN_INVALID 2
126#define VMW_BALLOON_ERROR_PPN_LOCKED 3
127#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
128#define VMW_BALLOON_ERROR_PPN_PINNED 5
129#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
130#define VMW_BALLOON_ERROR_RESET 7
131#define VMW_BALLOON_ERROR_BUSY 8
132
133#define VMWARE_BALLOON_CMD(cmd, data, result) \
134({ \
135    unsigned long __stat, __dummy1, __dummy2; \
136    __asm__ __volatile__ ("inl (%%dx)" : \
137        "=a"(__stat), \
138        "=c"(__dummy1), \
139        "=d"(__dummy2), \
140        "=b"(result) : \
141        "0"(VMW_BALLOON_HV_MAGIC), \
142        "1"(VMW_BALLOON_CMD_##cmd), \
143        "2"(VMW_BALLOON_HV_PORT), \
144        "3"(data) : \
145        "memory"); \
146    result &= -1UL; \
147    __stat & -1UL; \
148})
149
150#ifdef CONFIG_DEBUG_FS
151struct vmballoon_stats {
152    unsigned int timer;
153
154    /* allocation statistics */
155    unsigned int alloc;
156    unsigned int alloc_fail;
157    unsigned int sleep_alloc;
158    unsigned int sleep_alloc_fail;
159    unsigned int refused_alloc;
160    unsigned int refused_free;
161    unsigned int free;
162
163    /* monitor operations */
164    unsigned int lock;
165    unsigned int lock_fail;
166    unsigned int unlock;
167    unsigned int unlock_fail;
168    unsigned int target;
169    unsigned int target_fail;
170    unsigned int start;
171    unsigned int start_fail;
172    unsigned int guest_type;
173    unsigned int guest_type_fail;
174};
175
176#define STATS_INC(stat) (stat)++
177#else
178#define STATS_INC(stat)
179#endif
180
181struct vmballoon {
182
183    /* list of reserved physical pages */
184    struct list_head pages;
185
186    /* transient list of non-balloonable pages */
187    struct list_head refused_pages;
188    unsigned int n_refused_pages;
189
190    /* balloon size in pages */
191    unsigned int size;
192    unsigned int target;
193
194    /* reset flag */
195    bool reset_required;
196
197    /* adjustment rates (pages per second) */
198    unsigned int rate_alloc;
199    unsigned int rate_free;
200
201    /* slowdown page allocations for next few cycles */
202    unsigned int slow_allocation_cycles;
203
204#ifdef CONFIG_DEBUG_FS
205    /* statistics */
206    struct vmballoon_stats stats;
207
208    /* debugfs file exporting statistics */
209    struct dentry *dbg_entry;
210#endif
211
212    struct sysinfo sysinfo;
213
214    struct delayed_work dwork;
215};
216
217static struct vmballoon balloon;
218
219/*
220 * Send "start" command to the host, communicating supported version
221 * of the protocol.
222 */
223static bool vmballoon_send_start(struct vmballoon *b)
224{
225    unsigned long status, dummy;
226
227    STATS_INC(b->stats.start);
228
229    status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
230    if (status == VMW_BALLOON_SUCCESS)
231        return true;
232
233    pr_debug("%s - failed, hv returns %ld\n", __func__, status);
234    STATS_INC(b->stats.start_fail);
235    return false;
236}
237
238static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
239{
240    switch (status) {
241    case VMW_BALLOON_SUCCESS:
242        return true;
243
244    case VMW_BALLOON_ERROR_RESET:
245        b->reset_required = true;
246        /* fall through */
247
248    default:
249        return false;
250    }
251}
252
253/*
254 * Communicate guest type to the host so that it can adjust ballooning
255 * algorithm to the one most appropriate for the guest. This command
256 * is normally issued after sending "start" command and is part of
257 * standard reset sequence.
258 */
259static bool vmballoon_send_guest_id(struct vmballoon *b)
260{
261    unsigned long status, dummy;
262
263    status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
264
265    STATS_INC(b->stats.guest_type);
266
267    if (vmballoon_check_status(b, status))
268        return true;
269
270    pr_debug("%s - failed, hv returns %ld\n", __func__, status);
271    STATS_INC(b->stats.guest_type_fail);
272    return false;
273}
274
275/*
276 * Retrieve desired balloon size from the host.
277 */
278static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
279{
280    unsigned long status;
281    unsigned long target;
282    unsigned long limit;
283    u32 limit32;
284
285    /*
286     * si_meminfo() is cheap. Moreover, we want to provide dynamic
287     * max balloon size later. So let us call si_meminfo() every
288     * iteration.
289     */
290    si_meminfo(&b->sysinfo);
291    limit = b->sysinfo.totalram;
292
293    /* Ensure limit fits in 32-bits */
294    limit32 = (u32)limit;
295    if (limit != limit32)
296        return false;
297
298    /* update stats */
299    STATS_INC(b->stats.target);
300
301    status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
302    if (vmballoon_check_status(b, status)) {
303        *new_target = target;
304        return true;
305    }
306
307    pr_debug("%s - failed, hv returns %ld\n", __func__, status);
308    STATS_INC(b->stats.target_fail);
309    return false;
310}
311
312/*
313 * Notify the host about allocated page so that host can use it without
314 * fear that guest will need it. Host may reject some pages, we need to
315 * check the return value and maybe submit a different page.
316 */
317static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
318                     unsigned int *hv_status)
319{
320    unsigned long status, dummy;
321    u32 pfn32;
322
323    pfn32 = (u32)pfn;
324    if (pfn32 != pfn)
325        return -1;
326
327    STATS_INC(b->stats.lock);
328
329    *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330    if (vmballoon_check_status(b, status))
331        return 0;
332
333    pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334    STATS_INC(b->stats.lock_fail);
335    return 1;
336}
337
338/*
339 * Notify the host that guest intends to release given page back into
340 * the pool of available (to the guest) pages.
341 */
342static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
343{
344    unsigned long status, dummy;
345    u32 pfn32;
346
347    pfn32 = (u32)pfn;
348    if (pfn32 != pfn)
349        return false;
350
351    STATS_INC(b->stats.unlock);
352
353    status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
354    if (vmballoon_check_status(b, status))
355        return true;
356
357    pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
358    STATS_INC(b->stats.unlock_fail);
359    return false;
360}
361
362/*
363 * Quickly release all pages allocated for the balloon. This function is
364 * called when host decides to "reset" balloon for one reason or another.
365 * Unlike normal "deflate" we do not (shall not) notify host of the pages
366 * being released.
367 */
368static void vmballoon_pop(struct vmballoon *b)
369{
370    struct page *page, *next;
371    unsigned int count = 0;
372
373    list_for_each_entry_safe(page, next, &b->pages, lru) {
374        list_del(&page->lru);
375        __free_page(page);
376        STATS_INC(b->stats.free);
377        b->size--;
378
379        if (++count >= b->rate_free) {
380            count = 0;
381            cond_resched();
382        }
383    }
384}
385
386/*
387 * Perform standard reset sequence by popping the balloon (in case it
388 * is not empty) and then restarting protocol. This operation normally
389 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
390 */
391static void vmballoon_reset(struct vmballoon *b)
392{
393    /* free all pages, skipping monitor unlock */
394    vmballoon_pop(b);
395
396    if (vmballoon_send_start(b)) {
397        b->reset_required = false;
398        if (!vmballoon_send_guest_id(b))
399            pr_err("failed to send guest ID to the host\n");
400    }
401}
402
403/*
404 * Allocate (or reserve) a page for the balloon and notify the host. If host
405 * refuses the page put it on "refuse" list and allocate another one until host
406 * is satisfied. "Refused" pages are released at the end of inflation cycle
407 * (when we allocate b->rate_alloc pages).
408 */
409static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
410{
411    struct page *page;
412    gfp_t flags;
413    unsigned int hv_status;
414    int locked;
415    flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
416
417    do {
418        if (!can_sleep)
419            STATS_INC(b->stats.alloc);
420        else
421            STATS_INC(b->stats.sleep_alloc);
422
423        page = alloc_page(flags);
424        if (!page) {
425            if (!can_sleep)
426                STATS_INC(b->stats.alloc_fail);
427            else
428                STATS_INC(b->stats.sleep_alloc_fail);
429            return -ENOMEM;
430        }
431
432        /* inform monitor */
433        locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
434        if (locked > 0) {
435            STATS_INC(b->stats.refused_alloc);
436
437            if (hv_status == VMW_BALLOON_ERROR_RESET ||
438                hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
439                __free_page(page);
440                return -EIO;
441            }
442
443            /*
444             * Place page on the list of non-balloonable pages
445             * and retry allocation, unless we already accumulated
446             * too many of them, in which case take a breather.
447             */
448            list_add(&page->lru, &b->refused_pages);
449            if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
450                return -EIO;
451        }
452    } while (locked != 0);
453
454    /* track allocated page */
455    list_add(&page->lru, &b->pages);
456
457    /* update balloon size */
458    b->size++;
459
460    return 0;
461}
462
463/*
464 * Release the page allocated for the balloon. Note that we first notify
465 * the host so it can make sure the page will be available for the guest
466 * to use, if needed.
467 */
468static int vmballoon_release_page(struct vmballoon *b, struct page *page)
469{
470    if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
471        return -EIO;
472
473    list_del(&page->lru);
474
475    /* deallocate page */
476    __free_page(page);
477    STATS_INC(b->stats.free);
478
479    /* update balloon size */
480    b->size--;
481
482    return 0;
483}
484
485/*
486 * Release pages that were allocated while attempting to inflate the
487 * balloon but were refused by the host for one reason or another.
488 */
489static void vmballoon_release_refused_pages(struct vmballoon *b)
490{
491    struct page *page, *next;
492
493    list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
494        list_del(&page->lru);
495        __free_page(page);
496        STATS_INC(b->stats.refused_free);
497    }
498
499    b->n_refused_pages = 0;
500}
501
502/*
503 * Inflate the balloon towards its target size. Note that we try to limit
504 * the rate of allocation to make sure we are not choking the rest of the
505 * system.
506 */
507static void vmballoon_inflate(struct vmballoon *b)
508{
509    unsigned int goal;
510    unsigned int rate;
511    unsigned int i;
512    unsigned int allocations = 0;
513    int error = 0;
514    bool alloc_can_sleep = false;
515
516    pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
517
518    /*
519     * First try NOSLEEP page allocations to inflate balloon.
520     *
521     * If we do not throttle nosleep allocations, we can drain all
522     * free pages in the guest quickly (if the balloon target is high).
523     * As a side-effect, draining free pages helps to inform (force)
524     * the guest to start swapping if balloon target is not met yet,
525     * which is a desired behavior. However, balloon driver can consume
526     * all available CPU cycles if too many pages are allocated in a
527     * second. Therefore, we throttle nosleep allocations even when
528     * the guest is not under memory pressure. OTOH, if we have already
529     * predicted that the guest is under memory pressure, then we
530     * slowdown page allocations considerably.
531     */
532
533    goal = b->target - b->size;
534    /*
535     * Start with no sleep allocation rate which may be higher
536     * than sleeping allocation rate.
537     */
538    rate = b->slow_allocation_cycles ?
539            b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
540
541    pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
542         __func__, goal, rate, b->rate_alloc);
543
544    for (i = 0; i < goal; i++) {
545
546        error = vmballoon_reserve_page(b, alloc_can_sleep);
547        if (error) {
548            if (error != -ENOMEM) {
549                /*
550                 * Not a page allocation failure, stop this
551                 * cycle. Maybe we'll get new target from
552                 * the host soon.
553                 */
554                break;
555            }
556
557            if (alloc_can_sleep) {
558                /*
559                 * CANSLEEP page allocation failed, so guest
560                 * is under severe memory pressure. Quickly
561                 * decrease allocation rate.
562                 */
563                b->rate_alloc = max(b->rate_alloc / 2,
564                            VMW_BALLOON_RATE_ALLOC_MIN);
565                break;
566            }
567
568            /*
569             * NOSLEEP page allocation failed, so the guest is
570             * under memory pressure. Let us slow down page
571             * allocations for next few cycles so that the guest
572             * gets out of memory pressure. Also, if we already
573             * allocated b->rate_alloc pages, let's pause,
574             * otherwise switch to sleeping allocations.
575             */
576            b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
577
578            if (i >= b->rate_alloc)
579                break;
580
581            alloc_can_sleep = true;
582            /* Lower rate for sleeping allocations. */
583            rate = b->rate_alloc;
584        }
585
586        if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
587            cond_resched();
588            allocations = 0;
589        }
590
591        if (i >= rate) {
592            /* We allocated enough pages, let's take a break. */
593            break;
594        }
595    }
596
597    /*
598     * We reached our goal without failures so try increasing
599     * allocation rate.
600     */
601    if (error == 0 && i >= b->rate_alloc) {
602        unsigned int mult = i / b->rate_alloc;
603
604        b->rate_alloc =
605            min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
606                VMW_BALLOON_RATE_ALLOC_MAX);
607    }
608
609    vmballoon_release_refused_pages(b);
610}
611
612/*
613 * Decrease the size of the balloon allowing guest to use more memory.
614 */
615static void vmballoon_deflate(struct vmballoon *b)
616{
617    struct page *page, *next;
618    unsigned int i = 0;
619    unsigned int goal;
620    int error;
621
622    pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
623
624    /* limit deallocation rate */
625    goal = min(b->size - b->target, b->rate_free);
626
627    pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
628
629    /* free pages to reach target */
630    list_for_each_entry_safe(page, next, &b->pages, lru) {
631        error = vmballoon_release_page(b, page);
632        if (error) {
633            /* quickly decrease rate in case of error */
634            b->rate_free = max(b->rate_free / 2,
635                       VMW_BALLOON_RATE_FREE_MIN);
636            return;
637        }
638
639        if (++i >= goal)
640            break;
641    }
642
643    /* slowly increase rate if there were no errors */
644    b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
645               VMW_BALLOON_RATE_FREE_MAX);
646}
647
648/*
649 * Balloon work function: reset protocol, if needed, get the new size and
650 * adjust balloon as needed. Repeat in 1 sec.
651 */
652static void vmballoon_work(struct work_struct *work)
653{
654    struct delayed_work *dwork = to_delayed_work(work);
655    struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
656    unsigned int target;
657
658    STATS_INC(b->stats.timer);
659
660    if (b->reset_required)
661        vmballoon_reset(b);
662
663    if (b->slow_allocation_cycles > 0)
664        b->slow_allocation_cycles--;
665
666    if (vmballoon_send_get_target(b, &target)) {
667        /* update target, adjust size */
668        b->target = target;
669
670        if (b->size < target)
671            vmballoon_inflate(b);
672        else if (b->size > target)
673            vmballoon_deflate(b);
674    }
675
676    /*
677     * We are using a freezable workqueue so that balloon operations are
678     * stopped while the system transitions to/from sleep/hibernation.
679     */
680    queue_delayed_work(system_freezable_wq,
681               dwork, round_jiffies_relative(HZ));
682}
683
684/*
685 * DEBUGFS Interface
686 */
687#ifdef CONFIG_DEBUG_FS
688
689static int vmballoon_debug_show(struct seq_file *f, void *offset)
690{
691    struct vmballoon *b = f->private;
692    struct vmballoon_stats *stats = &b->stats;
693
694    /* format size info */
695    seq_printf(f,
696           "target: %8d pages\n"
697           "current: %8d pages\n",
698           b->target, b->size);
699
700    /* format rate info */
701    seq_printf(f,
702           "rateNoSleepAlloc: %8d pages/sec\n"
703           "rateSleepAlloc: %8d pages/sec\n"
704           "rateFree: %8d pages/sec\n",
705           VMW_BALLOON_NOSLEEP_ALLOC_MAX,
706           b->rate_alloc, b->rate_free);
707
708    seq_printf(f,
709           "\n"
710           "timer: %8u\n"
711           "start: %8u (%4u failed)\n"
712           "guestType: %8u (%4u failed)\n"
713           "lock: %8u (%4u failed)\n"
714           "unlock: %8u (%4u failed)\n"
715           "target: %8u (%4u failed)\n"
716           "primNoSleepAlloc: %8u (%4u failed)\n"
717           "primCanSleepAlloc: %8u (%4u failed)\n"
718           "primFree: %8u\n"
719           "errAlloc: %8u\n"
720           "errFree: %8u\n",
721           stats->timer,
722           stats->start, stats->start_fail,
723           stats->guest_type, stats->guest_type_fail,
724           stats->lock, stats->lock_fail,
725           stats->unlock, stats->unlock_fail,
726           stats->target, stats->target_fail,
727           stats->alloc, stats->alloc_fail,
728           stats->sleep_alloc, stats->sleep_alloc_fail,
729           stats->free,
730           stats->refused_alloc, stats->refused_free);
731
732    return 0;
733}
734
735static int vmballoon_debug_open(struct inode *inode, struct file *file)
736{
737    return single_open(file, vmballoon_debug_show, inode->i_private);
738}
739
740static const struct file_operations vmballoon_debug_fops = {
741    .owner = THIS_MODULE,
742    .open = vmballoon_debug_open,
743    .read = seq_read,
744    .llseek = seq_lseek,
745    .release = single_release,
746};
747
748static int __init vmballoon_debugfs_init(struct vmballoon *b)
749{
750    int error;
751
752    b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
753                       &vmballoon_debug_fops);
754    if (IS_ERR(b->dbg_entry)) {
755        error = PTR_ERR(b->dbg_entry);
756        pr_err("failed to create debugfs entry, error: %d\n", error);
757        return error;
758    }
759
760    return 0;
761}
762
763static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
764{
765    debugfs_remove(b->dbg_entry);
766}
767
768#else
769
770static inline int vmballoon_debugfs_init(struct vmballoon *b)
771{
772    return 0;
773}
774
775static inline void vmballoon_debugfs_exit(struct vmballoon *b)
776{
777}
778
779#endif /* CONFIG_DEBUG_FS */
780
781static int __init vmballoon_init(void)
782{
783    int error;
784
785    /*
786     * Check if we are running on VMware's hypervisor and bail out
787     * if we are not.
788     */
789    if (x86_hyper != &x86_hyper_vmware)
790        return -ENODEV;
791
792    INIT_LIST_HEAD(&balloon.pages);
793    INIT_LIST_HEAD(&balloon.refused_pages);
794
795    /* initialize rates */
796    balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
797    balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
798
799    INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
800
801    /*
802     * Start balloon.
803     */
804    if (!vmballoon_send_start(&balloon)) {
805        pr_err("failed to send start command to the host\n");
806        return -EIO;
807    }
808
809    if (!vmballoon_send_guest_id(&balloon)) {
810        pr_err("failed to send guest ID to the host\n");
811        return -EIO;
812    }
813
814    error = vmballoon_debugfs_init(&balloon);
815    if (error)
816        return error;
817
818    queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
819
820    return 0;
821}
822module_init(vmballoon_init);
823
824static void __exit vmballoon_exit(void)
825{
826    cancel_delayed_work_sync(&balloon.dwork);
827
828    vmballoon_debugfs_exit(&balloon);
829
830    /*
831     * Deallocate all reserved memory, and reset connection with monitor.
832     * Reset connection before deallocating memory to avoid potential for
833     * additional spurious resets from guest touching deallocated pages.
834     */
835    vmballoon_send_start(&balloon);
836    vmballoon_pop(&balloon);
837}
838module_exit(vmballoon_exit);
839

Archive Download this file



interactive