Root/
1 | /* |
2 | * Tty buffer allocation management |
3 | */ |
4 | |
5 | #include <linux/types.h> |
6 | #include <linux/errno.h> |
7 | #include <linux/tty.h> |
8 | #include <linux/tty_driver.h> |
9 | #include <linux/tty_flip.h> |
10 | #include <linux/timer.h> |
11 | #include <linux/string.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/init.h> |
15 | #include <linux/wait.h> |
16 | #include <linux/bitops.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/module.h> |
19 | #include <linux/ratelimit.h> |
20 | |
21 | |
22 | #define MIN_TTYB_SIZE 256 |
23 | #define TTYB_ALIGN_MASK 255 |
24 | |
25 | /* |
26 | * Byte threshold to limit memory consumption for flip buffers. |
27 | * The actual memory limit is > 2x this amount. |
28 | */ |
29 | #define TTYB_MEM_LIMIT 65536 |
30 | |
31 | /* |
32 | * We default to dicing tty buffer allocations to this many characters |
33 | * in order to avoid multiple page allocations. We know the size of |
34 | * tty_buffer itself but it must also be taken into account that the |
35 | * the buffer is 256 byte aligned. See tty_buffer_find for the allocation |
36 | * logic this must match |
37 | */ |
38 | |
39 | #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) |
40 | |
41 | |
42 | /** |
43 | * tty_buffer_lock_exclusive - gain exclusive access to buffer |
44 | * tty_buffer_unlock_exclusive - release exclusive access |
45 | * |
46 | * @port - tty_port owning the flip buffer |
47 | * |
48 | * Guarantees safe use of the line discipline's receive_buf() method by |
49 | * excluding the buffer work and any pending flush from using the flip |
50 | * buffer. Data can continue to be added concurrently to the flip buffer |
51 | * from the driver side. |
52 | * |
53 | * On release, the buffer work is restarted if there is data in the |
54 | * flip buffer |
55 | */ |
56 | |
57 | void tty_buffer_lock_exclusive(struct tty_port *port) |
58 | { |
59 | struct tty_bufhead *buf = &port->buf; |
60 | |
61 | atomic_inc(&buf->priority); |
62 | mutex_lock(&buf->lock); |
63 | } |
64 | |
65 | void tty_buffer_unlock_exclusive(struct tty_port *port) |
66 | { |
67 | struct tty_bufhead *buf = &port->buf; |
68 | int restart; |
69 | |
70 | restart = buf->head->commit != buf->head->read; |
71 | |
72 | atomic_dec(&buf->priority); |
73 | mutex_unlock(&buf->lock); |
74 | if (restart) |
75 | queue_work(system_unbound_wq, &buf->work); |
76 | } |
77 | |
78 | /** |
79 | * tty_buffer_space_avail - return unused buffer space |
80 | * @port - tty_port owning the flip buffer |
81 | * |
82 | * Returns the # of bytes which can be written by the driver without |
83 | * reaching the buffer limit. |
84 | * |
85 | * Note: this does not guarantee that memory is available to write |
86 | * the returned # of bytes (use tty_prepare_flip_string_xxx() to |
87 | * pre-allocate if memory guarantee is required). |
88 | */ |
89 | |
90 | int tty_buffer_space_avail(struct tty_port *port) |
91 | { |
92 | int space = TTYB_MEM_LIMIT - atomic_read(&port->buf.memory_used); |
93 | return max(space, 0); |
94 | } |
95 | |
96 | static void tty_buffer_reset(struct tty_buffer *p, size_t size) |
97 | { |
98 | p->used = 0; |
99 | p->size = size; |
100 | p->next = NULL; |
101 | p->commit = 0; |
102 | p->read = 0; |
103 | } |
104 | |
105 | /** |
106 | * tty_buffer_free_all - free buffers used by a tty |
107 | * @tty: tty to free from |
108 | * |
109 | * Remove all the buffers pending on a tty whether queued with data |
110 | * or in the free ring. Must be called when the tty is no longer in use |
111 | */ |
112 | |
113 | void tty_buffer_free_all(struct tty_port *port) |
114 | { |
115 | struct tty_bufhead *buf = &port->buf; |
116 | struct tty_buffer *p, *next; |
117 | struct llist_node *llist; |
118 | |
119 | while ((p = buf->head) != NULL) { |
120 | buf->head = p->next; |
121 | if (p->size > 0) |
122 | kfree(p); |
123 | } |
124 | llist = llist_del_all(&buf->free); |
125 | llist_for_each_entry_safe(p, next, llist, free) |
126 | kfree(p); |
127 | |
128 | tty_buffer_reset(&buf->sentinel, 0); |
129 | buf->head = &buf->sentinel; |
130 | buf->tail = &buf->sentinel; |
131 | |
132 | atomic_set(&buf->memory_used, 0); |
133 | } |
134 | |
135 | /** |
136 | * tty_buffer_alloc - allocate a tty buffer |
137 | * @tty: tty device |
138 | * @size: desired size (characters) |
139 | * |
140 | * Allocate a new tty buffer to hold the desired number of characters. |
141 | * We round our buffers off in 256 character chunks to get better |
142 | * allocation behaviour. |
143 | * Return NULL if out of memory or the allocation would exceed the |
144 | * per device queue |
145 | */ |
146 | |
147 | static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) |
148 | { |
149 | struct llist_node *free; |
150 | struct tty_buffer *p; |
151 | |
152 | /* Round the buffer size out */ |
153 | size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); |
154 | |
155 | if (size <= MIN_TTYB_SIZE) { |
156 | free = llist_del_first(&port->buf.free); |
157 | if (free) { |
158 | p = llist_entry(free, struct tty_buffer, free); |
159 | goto found; |
160 | } |
161 | } |
162 | |
163 | /* Should possibly check if this fails for the largest buffer we |
164 | have queued and recycle that ? */ |
165 | if (atomic_read(&port->buf.memory_used) > TTYB_MEM_LIMIT) |
166 | return NULL; |
167 | p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); |
168 | if (p == NULL) |
169 | return NULL; |
170 | |
171 | found: |
172 | tty_buffer_reset(p, size); |
173 | atomic_add(size, &port->buf.memory_used); |
174 | return p; |
175 | } |
176 | |
177 | /** |
178 | * tty_buffer_free - free a tty buffer |
179 | * @tty: tty owning the buffer |
180 | * @b: the buffer to free |
181 | * |
182 | * Free a tty buffer, or add it to the free list according to our |
183 | * internal strategy |
184 | */ |
185 | |
186 | static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) |
187 | { |
188 | struct tty_bufhead *buf = &port->buf; |
189 | |
190 | /* Dumb strategy for now - should keep some stats */ |
191 | WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0); |
192 | |
193 | if (b->size > MIN_TTYB_SIZE) |
194 | kfree(b); |
195 | else if (b->size > 0) |
196 | llist_add(&b->free, &buf->free); |
197 | } |
198 | |
199 | /** |
200 | * tty_buffer_flush - flush full tty buffers |
201 | * @tty: tty to flush |
202 | * |
203 | * flush all the buffers containing receive data. If the buffer is |
204 | * being processed by flush_to_ldisc then we defer the processing |
205 | * to that function |
206 | * |
207 | * Locking: takes buffer lock to ensure single-threaded flip buffer |
208 | * 'consumer' |
209 | */ |
210 | |
211 | void tty_buffer_flush(struct tty_struct *tty) |
212 | { |
213 | struct tty_port *port = tty->port; |
214 | struct tty_bufhead *buf = &port->buf; |
215 | struct tty_buffer *next; |
216 | |
217 | atomic_inc(&buf->priority); |
218 | |
219 | mutex_lock(&buf->lock); |
220 | while ((next = buf->head->next) != NULL) { |
221 | tty_buffer_free(port, buf->head); |
222 | buf->head = next; |
223 | } |
224 | buf->head->read = buf->head->commit; |
225 | atomic_dec(&buf->priority); |
226 | mutex_unlock(&buf->lock); |
227 | } |
228 | |
229 | /** |
230 | * tty_buffer_request_room - grow tty buffer if needed |
231 | * @tty: tty structure |
232 | * @size: size desired |
233 | * |
234 | * Make at least size bytes of linear space available for the tty |
235 | * buffer. If we fail return the size we managed to find. |
236 | */ |
237 | int tty_buffer_request_room(struct tty_port *port, size_t size) |
238 | { |
239 | struct tty_bufhead *buf = &port->buf; |
240 | struct tty_buffer *b, *n; |
241 | int left; |
242 | |
243 | b = buf->tail; |
244 | left = b->size - b->used; |
245 | |
246 | if (left < size) { |
247 | /* This is the slow path - looking for new buffers to use */ |
248 | if ((n = tty_buffer_alloc(port, size)) != NULL) { |
249 | buf->tail = n; |
250 | b->commit = b->used; |
251 | smp_mb(); |
252 | b->next = n; |
253 | } else |
254 | size = left; |
255 | } |
256 | return size; |
257 | } |
258 | EXPORT_SYMBOL_GPL(tty_buffer_request_room); |
259 | |
260 | /** |
261 | * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer |
262 | * @port: tty port |
263 | * @chars: characters |
264 | * @flag: flag value for each character |
265 | * @size: size |
266 | * |
267 | * Queue a series of bytes to the tty buffering. All the characters |
268 | * passed are marked with the supplied flag. Returns the number added. |
269 | */ |
270 | |
271 | int tty_insert_flip_string_fixed_flag(struct tty_port *port, |
272 | const unsigned char *chars, char flag, size_t size) |
273 | { |
274 | int copied = 0; |
275 | do { |
276 | int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); |
277 | int space = tty_buffer_request_room(port, goal); |
278 | struct tty_buffer *tb = port->buf.tail; |
279 | if (unlikely(space == 0)) |
280 | break; |
281 | memcpy(char_buf_ptr(tb, tb->used), chars, space); |
282 | memset(flag_buf_ptr(tb, tb->used), flag, space); |
283 | tb->used += space; |
284 | copied += space; |
285 | chars += space; |
286 | /* There is a small chance that we need to split the data over |
287 | several buffers. If this is the case we must loop */ |
288 | } while (unlikely(size > copied)); |
289 | return copied; |
290 | } |
291 | EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); |
292 | |
293 | /** |
294 | * tty_insert_flip_string_flags - Add characters to the tty buffer |
295 | * @port: tty port |
296 | * @chars: characters |
297 | * @flags: flag bytes |
298 | * @size: size |
299 | * |
300 | * Queue a series of bytes to the tty buffering. For each character |
301 | * the flags array indicates the status of the character. Returns the |
302 | * number added. |
303 | */ |
304 | |
305 | int tty_insert_flip_string_flags(struct tty_port *port, |
306 | const unsigned char *chars, const char *flags, size_t size) |
307 | { |
308 | int copied = 0; |
309 | do { |
310 | int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); |
311 | int space = tty_buffer_request_room(port, goal); |
312 | struct tty_buffer *tb = port->buf.tail; |
313 | if (unlikely(space == 0)) |
314 | break; |
315 | memcpy(char_buf_ptr(tb, tb->used), chars, space); |
316 | memcpy(flag_buf_ptr(tb, tb->used), flags, space); |
317 | tb->used += space; |
318 | copied += space; |
319 | chars += space; |
320 | flags += space; |
321 | /* There is a small chance that we need to split the data over |
322 | several buffers. If this is the case we must loop */ |
323 | } while (unlikely(size > copied)); |
324 | return copied; |
325 | } |
326 | EXPORT_SYMBOL(tty_insert_flip_string_flags); |
327 | |
328 | /** |
329 | * tty_schedule_flip - push characters to ldisc |
330 | * @port: tty port to push from |
331 | * |
332 | * Takes any pending buffers and transfers their ownership to the |
333 | * ldisc side of the queue. It then schedules those characters for |
334 | * processing by the line discipline. |
335 | * Note that this function can only be used when the low_latency flag |
336 | * is unset. Otherwise the workqueue won't be flushed. |
337 | */ |
338 | |
339 | void tty_schedule_flip(struct tty_port *port) |
340 | { |
341 | struct tty_bufhead *buf = &port->buf; |
342 | WARN_ON(port->low_latency); |
343 | |
344 | buf->tail->commit = buf->tail->used; |
345 | schedule_work(&buf->work); |
346 | } |
347 | EXPORT_SYMBOL(tty_schedule_flip); |
348 | |
349 | /** |
350 | * tty_prepare_flip_string - make room for characters |
351 | * @port: tty port |
352 | * @chars: return pointer for character write area |
353 | * @size: desired size |
354 | * |
355 | * Prepare a block of space in the buffer for data. Returns the length |
356 | * available and buffer pointer to the space which is now allocated and |
357 | * accounted for as ready for normal characters. This is used for drivers |
358 | * that need their own block copy routines into the buffer. There is no |
359 | * guarantee the buffer is a DMA target! |
360 | */ |
361 | |
362 | int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, |
363 | size_t size) |
364 | { |
365 | int space = tty_buffer_request_room(port, size); |
366 | if (likely(space)) { |
367 | struct tty_buffer *tb = port->buf.tail; |
368 | *chars = char_buf_ptr(tb, tb->used); |
369 | memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); |
370 | tb->used += space; |
371 | } |
372 | return space; |
373 | } |
374 | EXPORT_SYMBOL_GPL(tty_prepare_flip_string); |
375 | |
376 | /** |
377 | * tty_prepare_flip_string_flags - make room for characters |
378 | * @port: tty port |
379 | * @chars: return pointer for character write area |
380 | * @flags: return pointer for status flag write area |
381 | * @size: desired size |
382 | * |
383 | * Prepare a block of space in the buffer for data. Returns the length |
384 | * available and buffer pointer to the space which is now allocated and |
385 | * accounted for as ready for characters. This is used for drivers |
386 | * that need their own block copy routines into the buffer. There is no |
387 | * guarantee the buffer is a DMA target! |
388 | */ |
389 | |
390 | int tty_prepare_flip_string_flags(struct tty_port *port, |
391 | unsigned char **chars, char **flags, size_t size) |
392 | { |
393 | int space = tty_buffer_request_room(port, size); |
394 | if (likely(space)) { |
395 | struct tty_buffer *tb = port->buf.tail; |
396 | *chars = char_buf_ptr(tb, tb->used); |
397 | *flags = flag_buf_ptr(tb, tb->used); |
398 | tb->used += space; |
399 | } |
400 | return space; |
401 | } |
402 | EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); |
403 | |
404 | |
405 | static int |
406 | receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) |
407 | { |
408 | struct tty_ldisc *disc = tty->ldisc; |
409 | unsigned char *p = char_buf_ptr(head, head->read); |
410 | char *f = flag_buf_ptr(head, head->read); |
411 | |
412 | if (disc->ops->receive_buf2) |
413 | count = disc->ops->receive_buf2(tty, p, f, count); |
414 | else { |
415 | count = min_t(int, count, tty->receive_room); |
416 | if (count) |
417 | disc->ops->receive_buf(tty, p, f, count); |
418 | } |
419 | head->read += count; |
420 | return count; |
421 | } |
422 | |
423 | /** |
424 | * flush_to_ldisc |
425 | * @work: tty structure passed from work queue. |
426 | * |
427 | * This routine is called out of the software interrupt to flush data |
428 | * from the buffer chain to the line discipline. |
429 | * |
430 | * The receive_buf method is single threaded for each tty instance. |
431 | * |
432 | * Locking: takes buffer lock to ensure single-threaded flip buffer |
433 | * 'consumer' |
434 | */ |
435 | |
436 | static void flush_to_ldisc(struct work_struct *work) |
437 | { |
438 | struct tty_port *port = container_of(work, struct tty_port, buf.work); |
439 | struct tty_bufhead *buf = &port->buf; |
440 | struct tty_struct *tty; |
441 | struct tty_ldisc *disc; |
442 | |
443 | tty = port->itty; |
444 | if (tty == NULL) |
445 | return; |
446 | |
447 | disc = tty_ldisc_ref(tty); |
448 | if (disc == NULL) |
449 | return; |
450 | |
451 | mutex_lock(&buf->lock); |
452 | |
453 | while (1) { |
454 | struct tty_buffer *head = buf->head; |
455 | int count; |
456 | |
457 | /* Ldisc or user is trying to gain exclusive access */ |
458 | if (atomic_read(&buf->priority)) |
459 | break; |
460 | |
461 | count = head->commit - head->read; |
462 | if (!count) { |
463 | if (head->next == NULL) |
464 | break; |
465 | buf->head = head->next; |
466 | tty_buffer_free(port, head); |
467 | continue; |
468 | } |
469 | |
470 | count = receive_buf(tty, head, count); |
471 | if (!count) |
472 | break; |
473 | } |
474 | |
475 | mutex_unlock(&buf->lock); |
476 | |
477 | tty_ldisc_deref(disc); |
478 | } |
479 | |
480 | /** |
481 | * tty_flush_to_ldisc |
482 | * @tty: tty to push |
483 | * |
484 | * Push the terminal flip buffers to the line discipline. |
485 | * |
486 | * Must not be called from IRQ context. |
487 | */ |
488 | void tty_flush_to_ldisc(struct tty_struct *tty) |
489 | { |
490 | if (!tty->port->low_latency) |
491 | flush_work(&tty->port->buf.work); |
492 | } |
493 | |
494 | /** |
495 | * tty_flip_buffer_push - terminal |
496 | * @port: tty port to push |
497 | * |
498 | * Queue a push of the terminal flip buffers to the line discipline. This |
499 | * function must not be called from IRQ context if port->low_latency is |
500 | * set. |
501 | * |
502 | * In the event of the queue being busy for flipping the work will be |
503 | * held off and retried later. |
504 | */ |
505 | |
506 | void tty_flip_buffer_push(struct tty_port *port) |
507 | { |
508 | struct tty_bufhead *buf = &port->buf; |
509 | |
510 | buf->tail->commit = buf->tail->used; |
511 | |
512 | if (port->low_latency) |
513 | flush_to_ldisc(&buf->work); |
514 | else |
515 | schedule_work(&buf->work); |
516 | } |
517 | EXPORT_SYMBOL(tty_flip_buffer_push); |
518 | |
519 | /** |
520 | * tty_buffer_init - prepare a tty buffer structure |
521 | * @tty: tty to initialise |
522 | * |
523 | * Set up the initial state of the buffer management for a tty device. |
524 | * Must be called before the other tty buffer functions are used. |
525 | */ |
526 | |
527 | void tty_buffer_init(struct tty_port *port) |
528 | { |
529 | struct tty_bufhead *buf = &port->buf; |
530 | |
531 | mutex_init(&buf->lock); |
532 | tty_buffer_reset(&buf->sentinel, 0); |
533 | buf->head = &buf->sentinel; |
534 | buf->tail = &buf->sentinel; |
535 | init_llist_head(&buf->free); |
536 | atomic_set(&buf->memory_used, 0); |
537 | atomic_set(&buf->priority, 0); |
538 | INIT_WORK(&buf->work, flush_to_ldisc); |
539 | } |
540 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9