Root/target/linux/generic/patches-3.3/050-rng_git_backport.patch

1--- a/drivers/char/random.c
2+++ b/drivers/char/random.c
3@@ -125,21 +125,26 @@
4  * The current exported interfaces for gathering environmental noise
5  * from the devices are:
6  *
7+ * void add_device_randomness(const void *buf, unsigned int size);
8  * void add_input_randomness(unsigned int type, unsigned int code,
9  * unsigned int value);
10- * void add_interrupt_randomness(int irq);
11+ * void add_interrupt_randomness(int irq, int irq_flags);
12  * void add_disk_randomness(struct gendisk *disk);
13  *
14  * add_input_randomness() uses the input layer interrupt timing, as well as
15  * the event type information from the hardware.
16  *
17- * add_interrupt_randomness() uses the inter-interrupt timing as random
18- * inputs to the entropy pool. Note that not all interrupts are good
19- * sources of randomness! For example, the timer interrupts is not a
20- * good choice, because the periodicity of the interrupts is too
21- * regular, and hence predictable to an attacker. Network Interface
22- * Controller interrupts are a better measure, since the timing of the
23- * NIC interrupts are more unpredictable.
24+ * add_interrupt_randomness() uses the interrupt timing as random
25+ * inputs to the entropy pool. Using the cycle counters and the irq source
26+ * as inputs, it feeds the randomness roughly once a second.
27+ *
28+ * add_device_randomness() is for adding data to the random pool that
29+ * is likely to differ between two devices (or possibly even per boot).
30+ * This would be things like MAC addresses or serial numbers, or the
31+ * read-out of the RTC. This does *not* add any actual entropy to the
32+ * pool, but it initializes the pool to different values for devices
33+ * that might otherwise be identical and have very little entropy
34+ * available to them (particularly common in the embedded world).
35  *
36  * add_disk_randomness() uses what amounts to the seek time of block
37  * layer request events, on a per-disk_devt basis, as input to the
38@@ -248,6 +253,7 @@
39 #include <linux/percpu.h>
40 #include <linux/cryptohash.h>
41 #include <linux/fips.h>
42+#include <linux/ptrace.h>
43 
44 #ifdef CONFIG_GENERIC_HARDIRQS
45 # include <linux/irq.h>
46@@ -256,8 +262,12 @@
47 #include <asm/processor.h>
48 #include <asm/uaccess.h>
49 #include <asm/irq.h>
50+#include <asm/irq_regs.h>
51 #include <asm/io.h>
52 
53+#define CREATE_TRACE_POINTS
54+#include <trace/events/random.h>
55+
56 /*
57  * Configuration information
58  */
59@@ -420,8 +430,10 @@ struct entropy_store {
60     /* read-write data: */
61     spinlock_t lock;
62     unsigned add_ptr;
63+ unsigned input_rotate;
64     int entropy_count;
65- int input_rotate;
66+ int entropy_total;
67+ unsigned int initialized:1;
68     __u8 last_data[EXTRACT_SIZE];
69 };
70 
71@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_
72     .pool = nonblocking_pool_data
73 };
74 
75+static __u32 const twist_table[8] = {
76+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
77+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
78+
79 /*
80  * This function adds bytes into the entropy "pool". It does not
81  * update the entropy estimate. The caller should call
82@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_
83  * it's cheap to do so and helps slightly in the expected case where
84  * the entropy is concentrated in the low-order bits.
85  */
86-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
87- int nbytes, __u8 out[64])
88+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
89+ int nbytes, __u8 out[64])
90 {
91- static __u32 const twist_table[8] = {
92- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
93- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
94     unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
95     int input_rotate;
96     int wordmask = r->poolinfo->poolwords - 1;
97     const char *bytes = in;
98     __u32 w;
99- unsigned long flags;
100 
101- /* Taps are constant, so we can load them without holding r->lock. */
102     tap1 = r->poolinfo->tap1;
103     tap2 = r->poolinfo->tap2;
104     tap3 = r->poolinfo->tap3;
105     tap4 = r->poolinfo->tap4;
106     tap5 = r->poolinfo->tap5;
107 
108- spin_lock_irqsave(&r->lock, flags);
109- input_rotate = r->input_rotate;
110- i = r->add_ptr;
111+ smp_rmb();
112+ input_rotate = ACCESS_ONCE(r->input_rotate);
113+ i = ACCESS_ONCE(r->add_ptr);
114 
115     /* mix one byte at a time to simplify size handling and churn faster */
116     while (nbytes--) {
117@@ -513,19 +524,61 @@ static void mix_pool_bytes_extract(struc
118         input_rotate += i ? 7 : 14;
119     }
120 
121- r->input_rotate = input_rotate;
122- r->add_ptr = i;
123+ ACCESS_ONCE(r->input_rotate) = input_rotate;
124+ ACCESS_ONCE(r->add_ptr) = i;
125+ smp_wmb();
126 
127     if (out)
128         for (j = 0; j < 16; j++)
129             ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
130+}
131+
132+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
133+ int nbytes, __u8 out[64])
134+{
135+ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
136+ _mix_pool_bytes(r, in, nbytes, out);
137+}
138 
139+static void mix_pool_bytes(struct entropy_store *r, const void *in,
140+ int nbytes, __u8 out[64])
141+{
142+ unsigned long flags;
143+
144+ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
145+ spin_lock_irqsave(&r->lock, flags);
146+ _mix_pool_bytes(r, in, nbytes, out);
147     spin_unlock_irqrestore(&r->lock, flags);
148 }
149 
150-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
151+struct fast_pool {
152+ __u32 pool[4];
153+ unsigned long last;
154+ unsigned short count;
155+ unsigned char rotate;
156+ unsigned char last_timer_intr;
157+};
158+
159+/*
160+ * This is a fast mixing routine used by the interrupt randomness
161+ * collector. It's hardcoded for an 128 bit pool and assumes that any
162+ * locks that might be needed are taken by the caller.
163+ */
164+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
165 {
166- mix_pool_bytes_extract(r, in, bytes, NULL);
167+ const char *bytes = in;
168+ __u32 w;
169+ unsigned i = f->count;
170+ unsigned input_rotate = f->rotate;
171+
172+ while (nbytes--) {
173+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
174+ f->pool[(i + 1) & 3];
175+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
176+ input_rotate += (i++ & 3) ? 7 : 14;
177+ }
178+ f->count = i;
179+ f->rotate = input_rotate;
180 }
181 
182 /*
183@@ -533,30 +586,38 @@ static void mix_pool_bytes(struct entrop
184  */
185 static void credit_entropy_bits(struct entropy_store *r, int nbits)
186 {
187- unsigned long flags;
188- int entropy_count;
189+ int entropy_count, orig;
190 
191     if (!nbits)
192         return;
193 
194- spin_lock_irqsave(&r->lock, flags);
195-
196     DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
197- entropy_count = r->entropy_count;
198+retry:
199+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
200     entropy_count += nbits;
201+
202     if (entropy_count < 0) {
203         DEBUG_ENT("negative entropy/overflow\n");
204         entropy_count = 0;
205     } else if (entropy_count > r->poolinfo->POOLBITS)
206         entropy_count = r->poolinfo->POOLBITS;
207- r->entropy_count = entropy_count;
208+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
209+ goto retry;
210+
211+ if (!r->initialized && nbits > 0) {
212+ r->entropy_total += nbits;
213+ if (r->entropy_total > 128)
214+ r->initialized = 1;
215+ }
216+
217+ trace_credit_entropy_bits(r->name, nbits, entropy_count,
218+ r->entropy_total, _RET_IP_);
219 
220     /* should we wake readers? */
221     if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
222         wake_up_interruptible(&random_read_wait);
223         kill_fasync(&fasync, SIGIO, POLL_IN);
224     }
225- spin_unlock_irqrestore(&r->lock, flags);
226 }
227 
228 /*********************************************************************
229@@ -609,6 +670,25 @@ static void set_timer_rand_state(unsigne
230 }
231 #endif
232 
233+/*
234+ * Add device- or boot-specific data to the input and nonblocking
235+ * pools to help initialize them to unique values.
236+ *
237+ * None of this adds any entropy, it is meant to avoid the
238+ * problem of the nonblocking pool having similar initial state
239+ * across largely identical devices.
240+ */
241+void add_device_randomness(const void *buf, unsigned int size)
242+{
243+ unsigned long time = get_cycles() ^ jiffies;
244+
245+ mix_pool_bytes(&input_pool, buf, size, NULL);
246+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
247+ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
248+ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
249+}
250+EXPORT_SYMBOL(add_device_randomness);
251+
252 static struct timer_rand_state input_timer_state;
253 
254 /*
255@@ -637,13 +717,9 @@ static void add_timer_randomness(struct
256         goto out;
257 
258     sample.jiffies = jiffies;
259-
260- /* Use arch random value, fall back to cycles */
261- if (!arch_get_random_int(&sample.cycles))
262- sample.cycles = get_cycles();
263-
264+ sample.cycles = get_cycles();
265     sample.num = num;
266- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
267+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
268 
269     /*
270      * Calculate number of bits of randomness we probably added.
271@@ -700,17 +776,48 @@ void add_input_randomness(unsigned int t
272 }
273 EXPORT_SYMBOL_GPL(add_input_randomness);
274 
275-void add_interrupt_randomness(int irq)
276+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
277+
278+void add_interrupt_randomness(int irq, int irq_flags)
279 {
280- struct timer_rand_state *state;
281+ struct entropy_store *r;
282+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
283+ struct pt_regs *regs = get_irq_regs();
284+ unsigned long now = jiffies;
285+ __u32 input[4], cycles = get_cycles();
286+
287+ input[0] = cycles ^ jiffies;
288+ input[1] = irq;
289+ if (regs) {
290+ __u64 ip = instruction_pointer(regs);
291+ input[2] = ip;
292+ input[3] = ip >> 32;
293+ }
294 
295- state = get_timer_rand_state(irq);
296+ fast_mix(fast_pool, input, sizeof(input));
297 
298- if (state == NULL)
299+ if ((fast_pool->count & 1023) &&
300+ !time_after(now, fast_pool->last + HZ))
301         return;
302 
303- DEBUG_ENT("irq event %d\n", irq);
304- add_timer_randomness(state, 0x100 + irq);
305+ fast_pool->last = now;
306+
307+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
308+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
309+ /*
310+ * If we don't have a valid cycle counter, and we see
311+ * back-to-back timer interrupts, then skip giving credit for
312+ * any entropy.
313+ */
314+ if (cycles == 0) {
315+ if (irq_flags & __IRQF_TIMER) {
316+ if (fast_pool->last_timer_intr)
317+ return;
318+ fast_pool->last_timer_intr = 1;
319+ } else
320+ fast_pool->last_timer_intr = 0;
321+ }
322+ credit_entropy_bits(r, 1);
323 }
324 
325 #ifdef CONFIG_BLOCK
326@@ -742,7 +849,11 @@ static ssize_t extract_entropy(struct en
327  */
328 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
329 {
330- __u32 tmp[OUTPUT_POOL_WORDS];
331+ union {
332+ __u32 tmp[OUTPUT_POOL_WORDS];
333+ long hwrand[4];
334+ } u;
335+ int i;
336 
337     if (r->pull && r->entropy_count < nbytes * 8 &&
338         r->entropy_count < r->poolinfo->POOLBITS) {
339@@ -753,17 +864,22 @@ static void xfer_secondary_pool(struct e
340         /* pull at least as many as BYTES as wakeup BITS */
341         bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
342         /* but never more than the buffer size */
343- bytes = min_t(int, bytes, sizeof(tmp));
344+ bytes = min_t(int, bytes, sizeof(u.tmp));
345 
346         DEBUG_ENT("going to reseed %s with %d bits "
347               "(%d of %d requested)\n",
348               r->name, bytes * 8, nbytes * 8, r->entropy_count);
349 
350- bytes = extract_entropy(r->pull, tmp, bytes,
351+ bytes = extract_entropy(r->pull, u.tmp, bytes,
352                     random_read_wakeup_thresh / 8, rsvd);
353- mix_pool_bytes(r, tmp, bytes);
354+ mix_pool_bytes(r, u.tmp, bytes, NULL);
355         credit_entropy_bits(r, bytes*8);
356     }
357+ for (i = 0; i < 4; i++)
358+ if (arch_get_random_long(&u.hwrand[i]))
359+ break;
360+ if (i)
361+ mix_pool_bytes(r, &u.hwrand, i * sizeof(u.hwrand[0]), 0);
362 }
363 
364 /*
365@@ -822,9 +938,11 @@ static void extract_buf(struct entropy_s
366     int i;
367     __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
368     __u8 extract[64];
369+ unsigned long flags;
370 
371     /* Generate a hash across the pool, 16 words (512 bits) at a time */
372     sha_init(hash);
373+ spin_lock_irqsave(&r->lock, flags);
374     for (i = 0; i < r->poolinfo->poolwords; i += 16)
375         sha_transform(hash, (__u8 *)(r->pool + i), workspace);
376 
377@@ -837,7 +955,8 @@ static void extract_buf(struct entropy_s
378      * brute-forcing the feedback as hard as brute-forcing the
379      * hash.
380      */
381- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
382+ __mix_pool_bytes(r, hash, sizeof(hash), extract);
383+ spin_unlock_irqrestore(&r->lock, flags);
384 
385     /*
386      * To avoid duplicates, we atomically extract a portion of the
387@@ -860,12 +979,12 @@ static void extract_buf(struct entropy_s
388 }
389 
390 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
391- size_t nbytes, int min, int reserved)
392+ size_t nbytes, int min, int reserved)
393 {
394     ssize_t ret = 0, i;
395     __u8 tmp[EXTRACT_SIZE];
396- unsigned long flags;
397 
398+ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
399     xfer_secondary_pool(r, nbytes);
400     nbytes = account(r, nbytes, min, reserved);
401 
402@@ -873,6 +992,8 @@ static ssize_t extract_entropy(struct en
403         extract_buf(r, tmp);
404 
405         if (fips_enabled) {
406+ unsigned long flags;
407+
408             spin_lock_irqsave(&r->lock, flags);
409             if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
410                 panic("Hardware RNG duplicated output!\n");
411@@ -898,6 +1019,7 @@ static ssize_t extract_entropy_user(stru
412     ssize_t ret = 0, i;
413     __u8 tmp[EXTRACT_SIZE];
414 
415+ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
416     xfer_secondary_pool(r, nbytes);
417     nbytes = account(r, nbytes, 0, 0);
418 
419@@ -931,17 +1053,35 @@ static ssize_t extract_entropy_user(stru
420 
421 /*
422  * This function is the exported kernel interface. It returns some
423- * number of good random numbers, suitable for seeding TCP sequence
424- * numbers, etc.
425+ * number of good random numbers, suitable for key generation, seeding
426+ * TCP sequence numbers, etc. It does not use the hw random number
427+ * generator, if available; use get_random_bytes_arch() for that.
428  */
429 void get_random_bytes(void *buf, int nbytes)
430 {
431+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
432+}
433+EXPORT_SYMBOL(get_random_bytes);
434+
435+/*
436+ * This function will use the architecture-specific hardware random
437+ * number generator if it is available. The arch-specific hw RNG will
438+ * almost certainly be faster than what we can do in software, but it
439+ * is impossible to verify that it is implemented securely (as
440+ * opposed, to, say, the AES encryption of a sequence number using a
441+ * key known by the NSA). So it's useful if we need the speed, but
442+ * only if we're willing to trust the hardware manufacturer not to
443+ * have put in a back door.
444+ */
445+void get_random_bytes_arch(void *buf, int nbytes)
446+{
447     char *p = buf;
448 
449+ trace_get_random_bytes(nbytes, _RET_IP_);
450     while (nbytes) {
451         unsigned long v;
452         int chunk = min(nbytes, (int)sizeof(unsigned long));
453-
454+
455         if (!arch_get_random_long(&v))
456             break;
457         
458@@ -950,9 +1090,11 @@ void get_random_bytes(void *buf, int nby
459         nbytes -= chunk;
460     }
461 
462- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
463+ if (nbytes)
464+ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
465 }
466-EXPORT_SYMBOL(get_random_bytes);
467+EXPORT_SYMBOL(get_random_bytes_arch);
468+
469 
470 /*
471  * init_std_data - initialize pool with system data
472@@ -966,21 +1108,18 @@ EXPORT_SYMBOL(get_random_bytes);
473 static void init_std_data(struct entropy_store *r)
474 {
475     int i;
476- ktime_t now;
477- unsigned long flags;
478+ ktime_t now = ktime_get_real();
479+ unsigned long rv;
480 
481- spin_lock_irqsave(&r->lock, flags);
482     r->entropy_count = 0;
483- spin_unlock_irqrestore(&r->lock, flags);
484-
485- now = ktime_get_real();
486- mix_pool_bytes(r, &now, sizeof(now));
487- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
488- if (!arch_get_random_long(&flags))
489+ r->entropy_total = 0;
490+ mix_pool_bytes(r, &now, sizeof(now), NULL);
491+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
492+ if (!arch_get_random_long(&rv))
493             break;
494- mix_pool_bytes(r, &flags, sizeof(flags));
495+ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
496     }
497- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
498+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
499 }
500 
501 static int rand_initialize(void)
502@@ -1117,7 +1256,7 @@ write_pool(struct entropy_store *r, cons
503         count -= bytes;
504         p += bytes;
505 
506- mix_pool_bytes(r, buf, bytes);
507+ mix_pool_bytes(r, buf, bytes, NULL);
508         cond_resched();
509     }
510 
511@@ -1274,6 +1413,7 @@ static int proc_do_uuid(ctl_table *table
512 }
513 
514 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
515+extern ctl_table random_table[];
516 ctl_table random_table[] = {
517     {
518         .procname = "poolsize",
519@@ -1339,7 +1479,7 @@ late_initcall(random_int_secret_init);
520  * value is not cryptographically secure but for several uses the cost of
521  * depleting entropy is too high
522  */
523-DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
524+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
525 unsigned int get_random_int(void)
526 {
527     __u32 *hash;
528--- a/drivers/mfd/ab3100-core.c
529+++ b/drivers/mfd/ab3100-core.c
530@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(in
531     u32 fatevent;
532     int err;
533 
534- add_interrupt_randomness(irq);
535-
536     err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
537                        event_regs, 3);
538     if (err)
539--- a/drivers/usb/core/hub.c
540+++ b/drivers/usb/core/hub.c
541@@ -24,6 +24,7 @@
542 #include <linux/kthread.h>
543 #include <linux/mutex.h>
544 #include <linux/freezer.h>
545+#include <linux/random.h>
546 
547 #include <asm/uaccess.h>
548 #include <asm/byteorder.h>
549@@ -1896,6 +1897,14 @@ int usb_new_device(struct usb_device *ud
550     /* Tell the world! */
551     announce_device(udev);
552 
553+ if (udev->serial)
554+ add_device_randomness(udev->serial, strlen(udev->serial));
555+ if (udev->product)
556+ add_device_randomness(udev->product, strlen(udev->product));
557+ if (udev->manufacturer)
558+ add_device_randomness(udev->manufacturer,
559+ strlen(udev->manufacturer));
560+
561     device_enable_async_suspend(&udev->dev);
562     /* Register the device. The device driver is responsible
563      * for configuring the device and invoking the add-device
564--- a/include/linux/random.h
565+++ b/include/linux/random.h
566@@ -50,11 +50,13 @@ struct rnd_state {
567 
568 extern void rand_initialize_irq(int irq);
569 
570+extern void add_device_randomness(const void *, unsigned int);
571 extern void add_input_randomness(unsigned int type, unsigned int code,
572                  unsigned int value);
573-extern void add_interrupt_randomness(int irq);
574+extern void add_interrupt_randomness(int irq, int irq_flags);
575 
576 extern void get_random_bytes(void *buf, int nbytes);
577+extern void get_random_bytes_arch(void *buf, int nbytes);
578 void generate_random_uuid(unsigned char uuid_out[16]);
579 
580 #ifndef MODULE
581--- /dev/null
582+++ b/include/trace/events/random.h
583@@ -0,0 +1,134 @@
584+#undef TRACE_SYSTEM
585+#define TRACE_SYSTEM random
586+
587+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
588+#define _TRACE_RANDOM_H
589+
590+#include <linux/writeback.h>
591+#include <linux/tracepoint.h>
592+
593+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
594+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
595+
596+ TP_ARGS(pool_name, bytes, IP),
597+
598+ TP_STRUCT__entry(
599+ __field( const char *, pool_name )
600+ __field( int, bytes )
601+ __field(unsigned long, IP )
602+ ),
603+
604+ TP_fast_assign(
605+ __entry->pool_name = pool_name;
606+ __entry->bytes = bytes;
607+ __entry->IP = IP;
608+ ),
609+
610+ TP_printk("%s pool: bytes %d caller %pF",
611+ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
612+);
613+
614+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
615+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
616+
617+ TP_ARGS(pool_name, bytes, IP)
618+);
619+
620+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
621+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
622+
623+ TP_ARGS(pool_name, bytes, IP)
624+);
625+
626+TRACE_EVENT(credit_entropy_bits,
627+ TP_PROTO(const char *pool_name, int bits, int entropy_count,
628+ int entropy_total, unsigned long IP),
629+
630+ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
631+
632+ TP_STRUCT__entry(
633+ __field( const char *, pool_name )
634+ __field( int, bits )
635+ __field( int, entropy_count )
636+ __field( int, entropy_total )
637+ __field(unsigned long, IP )
638+ ),
639+
640+ TP_fast_assign(
641+ __entry->pool_name = pool_name;
642+ __entry->bits = bits;
643+ __entry->entropy_count = entropy_count;
644+ __entry->entropy_total = entropy_total;
645+ __entry->IP = IP;
646+ ),
647+
648+ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
649+ "caller %pF", __entry->pool_name, __entry->bits,
650+ __entry->entropy_count, __entry->entropy_total,
651+ (void *)__entry->IP)
652+);
653+
654+TRACE_EVENT(get_random_bytes,
655+ TP_PROTO(int nbytes, unsigned long IP),
656+
657+ TP_ARGS(nbytes, IP),
658+
659+ TP_STRUCT__entry(
660+ __field( int, nbytes )
661+ __field(unsigned long, IP )
662+ ),
663+
664+ TP_fast_assign(
665+ __entry->nbytes = nbytes;
666+ __entry->IP = IP;
667+ ),
668+
669+ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
670+);
671+
672+DECLARE_EVENT_CLASS(random__extract_entropy,
673+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
674+ unsigned long IP),
675+
676+ TP_ARGS(pool_name, nbytes, entropy_count, IP),
677+
678+ TP_STRUCT__entry(
679+ __field( const char *, pool_name )
680+ __field( int, nbytes )
681+ __field( int, entropy_count )
682+ __field(unsigned long, IP )
683+ ),
684+
685+ TP_fast_assign(
686+ __entry->pool_name = pool_name;
687+ __entry->nbytes = nbytes;
688+ __entry->entropy_count = entropy_count;
689+ __entry->IP = IP;
690+ ),
691+
692+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
693+ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
694+ (void *)__entry->IP)
695+);
696+
697+
698+DEFINE_EVENT(random__extract_entropy, extract_entropy,
699+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
700+ unsigned long IP),
701+
702+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
703+);
704+
705+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
706+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
707+ unsigned long IP),
708+
709+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
710+);
711+
712+
713+
714+#endif /* _TRACE_RANDOM_H */
715+
716+/* This part must be outside protection */
717+#include <trace/define_trace.h>
718--- a/kernel/irq/handle.c
719+++ b/kernel/irq/handle.c
720@@ -117,7 +117,7 @@ irqreturn_t
721 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
722 {
723     irqreturn_t retval = IRQ_NONE;
724- unsigned int random = 0, irq = desc->irq_data.irq;
725+ unsigned int flags = 0, irq = desc->irq_data.irq;
726 
727     do {
728         irqreturn_t res;
729@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc
730 
731             /* Fall through to add to randomness */
732         case IRQ_HANDLED:
733- random |= action->flags;
734+ flags |= action->flags;
735             break;
736 
737         default:
738@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc
739         action = action->next;
740     } while (action);
741 
742- if (random & IRQF_SAMPLE_RANDOM)
743- add_interrupt_randomness(irq);
744+ add_interrupt_randomness(irq, flags);
745 
746     if (!noirqdebug)
747         note_interrupt(irq, desc, retval);
748--- a/net/core/dev.c
749+++ b/net/core/dev.c
750@@ -1176,6 +1176,7 @@ static int __dev_open(struct net_device
751         net_dmaengine_get();
752         dev_set_rx_mode(dev);
753         dev_activate(dev);
754+ add_device_randomness(dev->dev_addr, dev->addr_len);
755     }
756 
757     return ret;
758@@ -4823,6 +4824,7 @@ int dev_set_mac_address(struct net_devic
759     err = ops->ndo_set_mac_address(dev, sa);
760     if (!err)
761         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
762+ add_device_randomness(dev->dev_addr, dev->addr_len);
763     return err;
764 }
765 EXPORT_SYMBOL(dev_set_mac_address);
766@@ -5602,6 +5604,7 @@ int register_netdevice(struct net_device
767     dev_init_scheduler(dev);
768     dev_hold(dev);
769     list_netdevice(dev);
770+ add_device_randomness(dev->dev_addr, dev->addr_len);
771 
772     /* Notify protocols, that a new device appeared. */
773     ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
774--- a/net/core/rtnetlink.c
775+++ b/net/core/rtnetlink.c
776@@ -1371,6 +1371,7 @@ static int do_setlink(struct net_device
777             goto errout;
778         send_addr_notify = 1;
779         modified = 1;
780+ add_device_randomness(dev->dev_addr, dev->addr_len);
781     }
782 
783     if (tb[IFLA_MTU]) {
784

Archive Download this file



interactive