Root/kernel/res_counter.c

1/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
15#include <linux/mm.h>
16
17void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18{
19    spin_lock_init(&counter->lock);
20    counter->limit = RES_COUNTER_MAX;
21    counter->soft_limit = RES_COUNTER_MAX;
22    counter->parent = parent;
23}
24
25static u64 res_counter_uncharge_locked(struct res_counter *counter,
26                       unsigned long val)
27{
28    if (WARN_ON(counter->usage < val))
29        val = counter->usage;
30
31    counter->usage -= val;
32    return counter->usage;
33}
34
35static int res_counter_charge_locked(struct res_counter *counter,
36                     unsigned long val, bool force)
37{
38    int ret = 0;
39
40    if (counter->usage + val > counter->limit) {
41        counter->failcnt++;
42        ret = -ENOMEM;
43        if (!force)
44            return ret;
45    }
46
47    counter->usage += val;
48    if (counter->usage > counter->max_usage)
49        counter->max_usage = counter->usage;
50    return ret;
51}
52
53static int __res_counter_charge(struct res_counter *counter, unsigned long val,
54                struct res_counter **limit_fail_at, bool force)
55{
56    int ret, r;
57    unsigned long flags;
58    struct res_counter *c, *u;
59
60    r = ret = 0;
61    *limit_fail_at = NULL;
62    local_irq_save(flags);
63    for (c = counter; c != NULL; c = c->parent) {
64        spin_lock(&c->lock);
65        r = res_counter_charge_locked(c, val, force);
66        spin_unlock(&c->lock);
67        if (r < 0 && !ret) {
68            ret = r;
69            *limit_fail_at = c;
70            if (!force)
71                break;
72        }
73    }
74
75    if (ret < 0 && !force) {
76        for (u = counter; u != c; u = u->parent) {
77            spin_lock(&u->lock);
78            res_counter_uncharge_locked(u, val);
79            spin_unlock(&u->lock);
80        }
81    }
82    local_irq_restore(flags);
83
84    return ret;
85}
86
87int res_counter_charge(struct res_counter *counter, unsigned long val,
88            struct res_counter **limit_fail_at)
89{
90    return __res_counter_charge(counter, val, limit_fail_at, false);
91}
92
93int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
94                  struct res_counter **limit_fail_at)
95{
96    return __res_counter_charge(counter, val, limit_fail_at, true);
97}
98
99u64 res_counter_uncharge_until(struct res_counter *counter,
100                   struct res_counter *top,
101                   unsigned long val)
102{
103    unsigned long flags;
104    struct res_counter *c;
105    u64 ret = 0;
106
107    local_irq_save(flags);
108    for (c = counter; c != top; c = c->parent) {
109        u64 r;
110        spin_lock(&c->lock);
111        r = res_counter_uncharge_locked(c, val);
112        if (c == counter)
113            ret = r;
114        spin_unlock(&c->lock);
115    }
116    local_irq_restore(flags);
117    return ret;
118}
119
120u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
121{
122    return res_counter_uncharge_until(counter, NULL, val);
123}
124
125static inline unsigned long long *
126res_counter_member(struct res_counter *counter, int member)
127{
128    switch (member) {
129    case RES_USAGE:
130        return &counter->usage;
131    case RES_MAX_USAGE:
132        return &counter->max_usage;
133    case RES_LIMIT:
134        return &counter->limit;
135    case RES_FAILCNT:
136        return &counter->failcnt;
137    case RES_SOFT_LIMIT:
138        return &counter->soft_limit;
139    };
140
141    BUG();
142    return NULL;
143}
144
145ssize_t res_counter_read(struct res_counter *counter, int member,
146        const char __user *userbuf, size_t nbytes, loff_t *pos,
147        int (*read_strategy)(unsigned long long val, char *st_buf))
148{
149    unsigned long long *val;
150    char buf[64], *s;
151
152    s = buf;
153    val = res_counter_member(counter, member);
154    if (read_strategy)
155        s += read_strategy(*val, s);
156    else
157        s += sprintf(s, "%llu\n", *val);
158    return simple_read_from_buffer((void __user *)userbuf, nbytes,
159            pos, buf, s - buf);
160}
161
162#if BITS_PER_LONG == 32
163u64 res_counter_read_u64(struct res_counter *counter, int member)
164{
165    unsigned long flags;
166    u64 ret;
167
168    spin_lock_irqsave(&counter->lock, flags);
169    ret = *res_counter_member(counter, member);
170    spin_unlock_irqrestore(&counter->lock, flags);
171
172    return ret;
173}
174#else
175u64 res_counter_read_u64(struct res_counter *counter, int member)
176{
177    return *res_counter_member(counter, member);
178}
179#endif
180
181int res_counter_memparse_write_strategy(const char *buf,
182                    unsigned long long *resp)
183{
184    char *end;
185    unsigned long long res;
186
187    /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
188    if (*buf == '-') {
189        res = simple_strtoull(buf + 1, &end, 10);
190        if (res != 1 || *end != '\0')
191            return -EINVAL;
192        *resp = RES_COUNTER_MAX;
193        return 0;
194    }
195
196    res = memparse(buf, &end);
197    if (*end != '\0')
198        return -EINVAL;
199
200    if (PAGE_ALIGN(res) >= res)
201        res = PAGE_ALIGN(res);
202    else
203        res = RES_COUNTER_MAX;
204
205    *resp = res;
206
207    return 0;
208}
209

Archive Download this file



interactive