Root/
1 | /*P:200 This contains all the /dev/lguest code, whereby the userspace |
2 | * launcher controls and communicates with the Guest. For example, |
3 | * the first write will tell us the Guest's memory layout and entry |
4 | * point. A read will run the Guest until something happens, such as |
5 | * a signal or the Guest doing a NOTIFY out to the Launcher. There is |
6 | * also a way for the Launcher to attach eventfds to particular NOTIFY |
7 | * values instead of returning from the read() call. |
8 | :*/ |
9 | #include <linux/uaccess.h> |
10 | #include <linux/miscdevice.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/eventfd.h> |
14 | #include <linux/file.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/export.h> |
17 | #include "lg.h" |
18 | |
19 | /*L:056 |
20 | * Before we move on, let's jump ahead and look at what the kernel does when |
21 | * it needs to look up the eventfds. That will complete our picture of how we |
22 | * use RCU. |
23 | * |
24 | * The notification value is in cpu->pending_notify: we return true if it went |
25 | * to an eventfd. |
26 | */ |
27 | bool send_notify_to_eventfd(struct lg_cpu *cpu) |
28 | { |
29 | unsigned int i; |
30 | struct lg_eventfd_map *map; |
31 | |
32 | /* |
33 | * This "rcu_read_lock()" helps track when someone is still looking at |
34 | * the (RCU-using) eventfds array. It's not actually a lock at all; |
35 | * indeed it's a noop in many configurations. (You didn't expect me to |
36 | * explain all the RCU secrets here, did you?) |
37 | */ |
38 | rcu_read_lock(); |
39 | /* |
40 | * rcu_dereference is the counter-side of rcu_assign_pointer(); it |
41 | * makes sure we don't access the memory pointed to by |
42 | * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, |
43 | * but Alpha allows this! Paul McKenney points out that a really |
44 | * aggressive compiler could have the same effect: |
45 | * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html |
46 | * |
47 | * So play safe, use rcu_dereference to get the rcu-protected pointer: |
48 | */ |
49 | map = rcu_dereference(cpu->lg->eventfds); |
50 | /* |
51 | * Simple array search: even if they add an eventfd while we do this, |
52 | * we'll continue to use the old array and just won't see the new one. |
53 | */ |
54 | for (i = 0; i < map->num; i++) { |
55 | if (map->map[i].addr == cpu->pending_notify) { |
56 | eventfd_signal(map->map[i].event, 1); |
57 | cpu->pending_notify = 0; |
58 | break; |
59 | } |
60 | } |
61 | /* We're done with the rcu-protected variable cpu->lg->eventfds. */ |
62 | rcu_read_unlock(); |
63 | |
64 | /* If we cleared the notification, it's because we found a match. */ |
65 | return cpu->pending_notify == 0; |
66 | } |
67 | |
68 | /*L:055 |
69 | * One of the more tricksy tricks in the Linux Kernel is a technique called |
70 | * Read Copy Update. Since one point of lguest is to teach lguest journeyers |
71 | * about kernel coding, I use it here. (In case you're curious, other purposes |
72 | * include learning about virtualization and instilling a deep appreciation for |
73 | * simplicity and puppies). |
74 | * |
75 | * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we |
76 | * add new eventfds without ever blocking readers from accessing the array. |
77 | * The current Launcher only does this during boot, so that never happens. But |
78 | * Read Copy Update is cool, and adding a lock risks damaging even more puppies |
79 | * than this code does. |
80 | * |
81 | * We allocate a brand new one-larger array, copy the old one and add our new |
82 | * element. Then we make the lg eventfd pointer point to the new array. |
83 | * That's the easy part: now we need to free the old one, but we need to make |
84 | * sure no slow CPU somewhere is still looking at it. That's what |
85 | * synchronize_rcu does for us: waits until every CPU has indicated that it has |
86 | * moved on to know it's no longer using the old one. |
87 | * |
88 | * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. |
89 | */ |
90 | static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) |
91 | { |
92 | struct lg_eventfd_map *new, *old = lg->eventfds; |
93 | |
94 | /* |
95 | * We don't allow notifications on value 0 anyway (pending_notify of |
96 | * 0 means "nothing pending"). |
97 | */ |
98 | if (!addr) |
99 | return -EINVAL; |
100 | |
101 | /* |
102 | * Replace the old array with the new one, carefully: others can |
103 | * be accessing it at the same time. |
104 | */ |
105 | new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), |
106 | GFP_KERNEL); |
107 | if (!new) |
108 | return -ENOMEM; |
109 | |
110 | /* First make identical copy. */ |
111 | memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); |
112 | new->num = old->num; |
113 | |
114 | /* Now append new entry. */ |
115 | new->map[new->num].addr = addr; |
116 | new->map[new->num].event = eventfd_ctx_fdget(fd); |
117 | if (IS_ERR(new->map[new->num].event)) { |
118 | int err = PTR_ERR(new->map[new->num].event); |
119 | kfree(new); |
120 | return err; |
121 | } |
122 | new->num++; |
123 | |
124 | /* |
125 | * Now put new one in place: rcu_assign_pointer() is a fancy way of |
126 | * doing "lg->eventfds = new", but it uses memory barriers to make |
127 | * absolutely sure that the contents of "new" written above is nailed |
128 | * down before we actually do the assignment. |
129 | * |
130 | * We have to think about these kinds of things when we're operating on |
131 | * live data without locks. |
132 | */ |
133 | rcu_assign_pointer(lg->eventfds, new); |
134 | |
135 | /* |
136 | * We're not in a big hurry. Wait until no one's looking at old |
137 | * version, then free it. |
138 | */ |
139 | synchronize_rcu(); |
140 | kfree(old); |
141 | |
142 | return 0; |
143 | } |
144 | |
145 | /*L:052 |
146 | * Receiving notifications from the Guest is usually done by attaching a |
147 | * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will |
148 | * become readable when the Guest does an LHCALL_NOTIFY with that value. |
149 | * |
150 | * This is really convenient for processing each virtqueue in a separate |
151 | * thread. |
152 | */ |
153 | static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) |
154 | { |
155 | unsigned long addr, fd; |
156 | int err; |
157 | |
158 | if (get_user(addr, input) != 0) |
159 | return -EFAULT; |
160 | input++; |
161 | if (get_user(fd, input) != 0) |
162 | return -EFAULT; |
163 | |
164 | /* |
165 | * Just make sure two callers don't add eventfds at once. We really |
166 | * only need to lock against callers adding to the same Guest, so using |
167 | * the Big Lguest Lock is overkill. But this is setup, not a fast path. |
168 | */ |
169 | mutex_lock(&lguest_lock); |
170 | err = add_eventfd(lg, addr, fd); |
171 | mutex_unlock(&lguest_lock); |
172 | |
173 | return err; |
174 | } |
175 | |
176 | /*L:050 |
177 | * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt |
178 | * number to /dev/lguest. |
179 | */ |
180 | static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) |
181 | { |
182 | unsigned long irq; |
183 | |
184 | if (get_user(irq, input) != 0) |
185 | return -EFAULT; |
186 | if (irq >= LGUEST_IRQS) |
187 | return -EINVAL; |
188 | |
189 | /* |
190 | * Next time the Guest runs, the core code will see if it can deliver |
191 | * this interrupt. |
192 | */ |
193 | set_interrupt(cpu, irq); |
194 | return 0; |
195 | } |
196 | |
197 | /*L:040 |
198 | * Once our Guest is initialized, the Launcher makes it run by reading |
199 | * from /dev/lguest. |
200 | */ |
201 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) |
202 | { |
203 | struct lguest *lg = file->private_data; |
204 | struct lg_cpu *cpu; |
205 | unsigned int cpu_id = *o; |
206 | |
207 | /* You must write LHREQ_INITIALIZE first! */ |
208 | if (!lg) |
209 | return -EINVAL; |
210 | |
211 | /* Watch out for arbitrary vcpu indexes! */ |
212 | if (cpu_id >= lg->nr_cpus) |
213 | return -EINVAL; |
214 | |
215 | cpu = &lg->cpus[cpu_id]; |
216 | |
217 | /* If you're not the task which owns the Guest, go away. */ |
218 | if (current != cpu->tsk) |
219 | return -EPERM; |
220 | |
221 | /* If the Guest is already dead, we indicate why */ |
222 | if (lg->dead) { |
223 | size_t len; |
224 | |
225 | /* lg->dead either contains an error code, or a string. */ |
226 | if (IS_ERR(lg->dead)) |
227 | return PTR_ERR(lg->dead); |
228 | |
229 | /* We can only return as much as the buffer they read with. */ |
230 | len = min(size, strlen(lg->dead)+1); |
231 | if (copy_to_user(user, lg->dead, len) != 0) |
232 | return -EFAULT; |
233 | return len; |
234 | } |
235 | |
236 | /* |
237 | * If we returned from read() last time because the Guest sent I/O, |
238 | * clear the flag. |
239 | */ |
240 | if (cpu->pending_notify) |
241 | cpu->pending_notify = 0; |
242 | |
243 | /* Run the Guest until something interesting happens. */ |
244 | return run_guest(cpu, (unsigned long __user *)user); |
245 | } |
246 | |
247 | /*L:025 |
248 | * This actually initializes a CPU. For the moment, a Guest is only |
249 | * uniprocessor, so "id" is always 0. |
250 | */ |
251 | static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) |
252 | { |
253 | /* We have a limited number the number of CPUs in the lguest struct. */ |
254 | if (id >= ARRAY_SIZE(cpu->lg->cpus)) |
255 | return -EINVAL; |
256 | |
257 | /* Set up this CPU's id, and pointer back to the lguest struct. */ |
258 | cpu->id = id; |
259 | cpu->lg = container_of((cpu - id), struct lguest, cpus[0]); |
260 | cpu->lg->nr_cpus++; |
261 | |
262 | /* Each CPU has a timer it can set. */ |
263 | init_clockdev(cpu); |
264 | |
265 | /* |
266 | * We need a complete page for the Guest registers: they are accessible |
267 | * to the Guest and we can only grant it access to whole pages. |
268 | */ |
269 | cpu->regs_page = get_zeroed_page(GFP_KERNEL); |
270 | if (!cpu->regs_page) |
271 | return -ENOMEM; |
272 | |
273 | /* We actually put the registers at the bottom of the page. */ |
274 | cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); |
275 | |
276 | /* |
277 | * Now we initialize the Guest's registers, handing it the start |
278 | * address. |
279 | */ |
280 | lguest_arch_setup_regs(cpu, start_ip); |
281 | |
282 | /* |
283 | * We keep a pointer to the Launcher task (ie. current task) for when |
284 | * other Guests want to wake this one (eg. console input). |
285 | */ |
286 | cpu->tsk = current; |
287 | |
288 | /* |
289 | * We need to keep a pointer to the Launcher's memory map, because if |
290 | * the Launcher dies we need to clean it up. If we don't keep a |
291 | * reference, it is destroyed before close() is called. |
292 | */ |
293 | cpu->mm = get_task_mm(cpu->tsk); |
294 | |
295 | /* |
296 | * We remember which CPU's pages this Guest used last, for optimization |
297 | * when the same Guest runs on the same CPU twice. |
298 | */ |
299 | cpu->last_pages = NULL; |
300 | |
301 | /* No error == success. */ |
302 | return 0; |
303 | } |
304 | |
305 | /*L:020 |
306 | * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in |
307 | * addition to the LHREQ_INITIALIZE value). These are: |
308 | * |
309 | * base: The start of the Guest-physical memory inside the Launcher memory. |
310 | * |
311 | * pfnlimit: The highest (Guest-physical) page number the Guest should be |
312 | * allowed to access. The Guest memory lives inside the Launcher, so it sets |
313 | * this to ensure the Guest can only reach its own memory. |
314 | * |
315 | * start: The first instruction to execute ("eip" in x86-speak). |
316 | */ |
317 | static int initialize(struct file *file, const unsigned long __user *input) |
318 | { |
319 | /* "struct lguest" contains all we (the Host) know about a Guest. */ |
320 | struct lguest *lg; |
321 | int err; |
322 | unsigned long args[3]; |
323 | |
324 | /* |
325 | * We grab the Big Lguest lock, which protects against multiple |
326 | * simultaneous initializations. |
327 | */ |
328 | mutex_lock(&lguest_lock); |
329 | /* You can't initialize twice! Close the device and start again... */ |
330 | if (file->private_data) { |
331 | err = -EBUSY; |
332 | goto unlock; |
333 | } |
334 | |
335 | if (copy_from_user(args, input, sizeof(args)) != 0) { |
336 | err = -EFAULT; |
337 | goto unlock; |
338 | } |
339 | |
340 | lg = kzalloc(sizeof(*lg), GFP_KERNEL); |
341 | if (!lg) { |
342 | err = -ENOMEM; |
343 | goto unlock; |
344 | } |
345 | |
346 | lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); |
347 | if (!lg->eventfds) { |
348 | err = -ENOMEM; |
349 | goto free_lg; |
350 | } |
351 | lg->eventfds->num = 0; |
352 | |
353 | /* Populate the easy fields of our "struct lguest" */ |
354 | lg->mem_base = (void __user *)args[0]; |
355 | lg->pfn_limit = args[1]; |
356 | |
357 | /* This is the first cpu (cpu 0) and it will start booting at args[2] */ |
358 | err = lg_cpu_start(&lg->cpus[0], 0, args[2]); |
359 | if (err) |
360 | goto free_eventfds; |
361 | |
362 | /* |
363 | * Initialize the Guest's shadow page tables. This allocates |
364 | * memory, so can fail. |
365 | */ |
366 | err = init_guest_pagetable(lg); |
367 | if (err) |
368 | goto free_regs; |
369 | |
370 | /* We keep our "struct lguest" in the file's private_data. */ |
371 | file->private_data = lg; |
372 | |
373 | mutex_unlock(&lguest_lock); |
374 | |
375 | /* And because this is a write() call, we return the length used. */ |
376 | return sizeof(args); |
377 | |
378 | free_regs: |
379 | /* FIXME: This should be in free_vcpu */ |
380 | free_page(lg->cpus[0].regs_page); |
381 | free_eventfds: |
382 | kfree(lg->eventfds); |
383 | free_lg: |
384 | kfree(lg); |
385 | unlock: |
386 | mutex_unlock(&lguest_lock); |
387 | return err; |
388 | } |
389 | |
390 | /*L:010 |
391 | * The first operation the Launcher does must be a write. All writes |
392 | * start with an unsigned long number: for the first write this must be |
393 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use |
394 | * writes of other values to send interrupts or set up receipt of notifications. |
395 | * |
396 | * Note that we overload the "offset" in the /dev/lguest file to indicate what |
397 | * CPU number we're dealing with. Currently this is always 0 since we only |
398 | * support uniprocessor Guests, but you can see the beginnings of SMP support |
399 | * here. |
400 | */ |
401 | static ssize_t write(struct file *file, const char __user *in, |
402 | size_t size, loff_t *off) |
403 | { |
404 | /* |
405 | * Once the Guest is initialized, we hold the "struct lguest" in the |
406 | * file private data. |
407 | */ |
408 | struct lguest *lg = file->private_data; |
409 | const unsigned long __user *input = (const unsigned long __user *)in; |
410 | unsigned long req; |
411 | struct lg_cpu *uninitialized_var(cpu); |
412 | unsigned int cpu_id = *off; |
413 | |
414 | /* The first value tells us what this request is. */ |
415 | if (get_user(req, input) != 0) |
416 | return -EFAULT; |
417 | input++; |
418 | |
419 | /* If you haven't initialized, you must do that first. */ |
420 | if (req != LHREQ_INITIALIZE) { |
421 | if (!lg || (cpu_id >= lg->nr_cpus)) |
422 | return -EINVAL; |
423 | cpu = &lg->cpus[cpu_id]; |
424 | |
425 | /* Once the Guest is dead, you can only read() why it died. */ |
426 | if (lg->dead) |
427 | return -ENOENT; |
428 | } |
429 | |
430 | switch (req) { |
431 | case LHREQ_INITIALIZE: |
432 | return initialize(file, input); |
433 | case LHREQ_IRQ: |
434 | return user_send_irq(cpu, input); |
435 | case LHREQ_EVENTFD: |
436 | return attach_eventfd(lg, input); |
437 | default: |
438 | return -EINVAL; |
439 | } |
440 | } |
441 | |
442 | /*L:060 |
443 | * The final piece of interface code is the close() routine. It reverses |
444 | * everything done in initialize(). This is usually called because the |
445 | * Launcher exited. |
446 | * |
447 | * Note that the close routine returns 0 or a negative error number: it can't |
448 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for |
449 | * letting them do it. |
450 | :*/ |
451 | static int close(struct inode *inode, struct file *file) |
452 | { |
453 | struct lguest *lg = file->private_data; |
454 | unsigned int i; |
455 | |
456 | /* If we never successfully initialized, there's nothing to clean up */ |
457 | if (!lg) |
458 | return 0; |
459 | |
460 | /* |
461 | * We need the big lock, to protect from inter-guest I/O and other |
462 | * Launchers initializing guests. |
463 | */ |
464 | mutex_lock(&lguest_lock); |
465 | |
466 | /* Free up the shadow page tables for the Guest. */ |
467 | free_guest_pagetable(lg); |
468 | |
469 | for (i = 0; i < lg->nr_cpus; i++) { |
470 | /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ |
471 | hrtimer_cancel(&lg->cpus[i].hrt); |
472 | /* We can free up the register page we allocated. */ |
473 | free_page(lg->cpus[i].regs_page); |
474 | /* |
475 | * Now all the memory cleanups are done, it's safe to release |
476 | * the Launcher's memory management structure. |
477 | */ |
478 | mmput(lg->cpus[i].mm); |
479 | } |
480 | |
481 | /* Release any eventfds they registered. */ |
482 | for (i = 0; i < lg->eventfds->num; i++) |
483 | eventfd_ctx_put(lg->eventfds->map[i].event); |
484 | kfree(lg->eventfds); |
485 | |
486 | /* |
487 | * If lg->dead doesn't contain an error code it will be NULL or a |
488 | * kmalloc()ed string, either of which is ok to hand to kfree(). |
489 | */ |
490 | if (!IS_ERR(lg->dead)) |
491 | kfree(lg->dead); |
492 | /* Free the memory allocated to the lguest_struct */ |
493 | kfree(lg); |
494 | /* Release lock and exit. */ |
495 | mutex_unlock(&lguest_lock); |
496 | |
497 | return 0; |
498 | } |
499 | |
500 | /*L:000 |
501 | * Welcome to our journey through the Launcher! |
502 | * |
503 | * The Launcher is the Host userspace program which sets up, runs and services |
504 | * the Guest. In fact, many comments in the Drivers which refer to "the Host" |
505 | * doing things are inaccurate: the Launcher does all the device handling for |
506 | * the Guest, but the Guest can't know that. |
507 | * |
508 | * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we |
509 | * shall see more of that later. |
510 | * |
511 | * We begin our understanding with the Host kernel interface which the Launcher |
512 | * uses: reading and writing a character device called /dev/lguest. All the |
513 | * work happens in the read(), write() and close() routines: |
514 | */ |
515 | static const struct file_operations lguest_fops = { |
516 | .owner = THIS_MODULE, |
517 | .release = close, |
518 | .write = write, |
519 | .read = read, |
520 | .llseek = default_llseek, |
521 | }; |
522 | /*:*/ |
523 | |
524 | /* |
525 | * This is a textbook example of a "misc" character device. Populate a "struct |
526 | * miscdevice" and register it with misc_register(). |
527 | */ |
528 | static struct miscdevice lguest_dev = { |
529 | .minor = MISC_DYNAMIC_MINOR, |
530 | .name = "lguest", |
531 | .fops = &lguest_fops, |
532 | }; |
533 | |
534 | int __init lguest_device_init(void) |
535 | { |
536 | return misc_register(&lguest_dev); |
537 | } |
538 | |
539 | void __exit lguest_device_remove(void) |
540 | { |
541 | misc_deregister(&lguest_dev); |
542 | } |
543 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9