Root/
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. |
3 | * |
4 | * Peter Bergner, IBM Corp. June 2001. |
5 | * Copyright (C) 2001 Peter Bergner. |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. |
11 | */ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/lmb.h> |
17 | |
18 | #define LMB_ALLOC_ANYWHERE 0 |
19 | |
20 | struct lmb lmb; |
21 | |
22 | static int lmb_debug; |
23 | |
24 | static int __init early_lmb(char *p) |
25 | { |
26 | if (p && strstr(p, "debug")) |
27 | lmb_debug = 1; |
28 | return 0; |
29 | } |
30 | early_param("lmb", early_lmb); |
31 | |
32 | static void lmb_dump(struct lmb_region *region, char *name) |
33 | { |
34 | unsigned long long base, size; |
35 | int i; |
36 | |
37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); |
38 | |
39 | for (i = 0; i < region->cnt; i++) { |
40 | base = region->region[i].base; |
41 | size = region->region[i].size; |
42 | |
43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", |
44 | name, i, base, base + size - 1, size); |
45 | } |
46 | } |
47 | |
48 | void lmb_dump_all(void) |
49 | { |
50 | if (!lmb_debug) |
51 | return; |
52 | |
53 | pr_info("LMB configuration:\n"); |
54 | pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); |
55 | pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); |
56 | |
57 | lmb_dump(&lmb.memory, "memory"); |
58 | lmb_dump(&lmb.reserved, "reserved"); |
59 | } |
60 | |
61 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, |
62 | u64 size2) |
63 | { |
64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
65 | } |
66 | |
67 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) |
68 | { |
69 | if (base2 == base1 + size1) |
70 | return 1; |
71 | else if (base1 == base2 + size2) |
72 | return -1; |
73 | |
74 | return 0; |
75 | } |
76 | |
77 | static long lmb_regions_adjacent(struct lmb_region *rgn, |
78 | unsigned long r1, unsigned long r2) |
79 | { |
80 | u64 base1 = rgn->region[r1].base; |
81 | u64 size1 = rgn->region[r1].size; |
82 | u64 base2 = rgn->region[r2].base; |
83 | u64 size2 = rgn->region[r2].size; |
84 | |
85 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
86 | } |
87 | |
88 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
89 | { |
90 | unsigned long i; |
91 | |
92 | for (i = r; i < rgn->cnt - 1; i++) { |
93 | rgn->region[i].base = rgn->region[i + 1].base; |
94 | rgn->region[i].size = rgn->region[i + 1].size; |
95 | } |
96 | rgn->cnt--; |
97 | } |
98 | |
99 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
100 | static void lmb_coalesce_regions(struct lmb_region *rgn, |
101 | unsigned long r1, unsigned long r2) |
102 | { |
103 | rgn->region[r1].size += rgn->region[r2].size; |
104 | lmb_remove_region(rgn, r2); |
105 | } |
106 | |
107 | void __init lmb_init(void) |
108 | { |
109 | /* Create a dummy zero size LMB which will get coalesced away later. |
110 | * This simplifies the lmb_add() code below... |
111 | */ |
112 | lmb.memory.region[0].base = 0; |
113 | lmb.memory.region[0].size = 0; |
114 | lmb.memory.cnt = 1; |
115 | |
116 | /* Ditto. */ |
117 | lmb.reserved.region[0].base = 0; |
118 | lmb.reserved.region[0].size = 0; |
119 | lmb.reserved.cnt = 1; |
120 | } |
121 | |
122 | void __init lmb_analyze(void) |
123 | { |
124 | int i; |
125 | |
126 | lmb.memory.size = 0; |
127 | |
128 | for (i = 0; i < lmb.memory.cnt; i++) |
129 | lmb.memory.size += lmb.memory.region[i].size; |
130 | } |
131 | |
132 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) |
133 | { |
134 | unsigned long coalesced = 0; |
135 | long adjacent, i; |
136 | |
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { |
138 | rgn->region[0].base = base; |
139 | rgn->region[0].size = size; |
140 | return 0; |
141 | } |
142 | |
143 | /* First try and coalesce this LMB with another. */ |
144 | for (i = 0; i < rgn->cnt; i++) { |
145 | u64 rgnbase = rgn->region[i].base; |
146 | u64 rgnsize = rgn->region[i].size; |
147 | |
148 | if ((rgnbase == base) && (rgnsize == size)) |
149 | /* Already have this region, so we're done */ |
150 | return 0; |
151 | |
152 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
153 | if (adjacent > 0) { |
154 | rgn->region[i].base -= size; |
155 | rgn->region[i].size += size; |
156 | coalesced++; |
157 | break; |
158 | } else if (adjacent < 0) { |
159 | rgn->region[i].size += size; |
160 | coalesced++; |
161 | break; |
162 | } |
163 | } |
164 | |
165 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { |
166 | lmb_coalesce_regions(rgn, i, i+1); |
167 | coalesced++; |
168 | } |
169 | |
170 | if (coalesced) |
171 | return coalesced; |
172 | if (rgn->cnt >= MAX_LMB_REGIONS) |
173 | return -1; |
174 | |
175 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
176 | for (i = rgn->cnt - 1; i >= 0; i--) { |
177 | if (base < rgn->region[i].base) { |
178 | rgn->region[i+1].base = rgn->region[i].base; |
179 | rgn->region[i+1].size = rgn->region[i].size; |
180 | } else { |
181 | rgn->region[i+1].base = base; |
182 | rgn->region[i+1].size = size; |
183 | break; |
184 | } |
185 | } |
186 | |
187 | if (base < rgn->region[0].base) { |
188 | rgn->region[0].base = base; |
189 | rgn->region[0].size = size; |
190 | } |
191 | rgn->cnt++; |
192 | |
193 | return 0; |
194 | } |
195 | |
196 | long lmb_add(u64 base, u64 size) |
197 | { |
198 | struct lmb_region *_rgn = &lmb.memory; |
199 | |
200 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ |
201 | if (base == 0) |
202 | lmb.rmo_size = size; |
203 | |
204 | return lmb_add_region(_rgn, base, size); |
205 | |
206 | } |
207 | |
208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) |
209 | { |
210 | u64 rgnbegin, rgnend; |
211 | u64 end = base + size; |
212 | int i; |
213 | |
214 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
215 | |
216 | /* Find the region where (base, size) belongs to */ |
217 | for (i=0; i < rgn->cnt; i++) { |
218 | rgnbegin = rgn->region[i].base; |
219 | rgnend = rgnbegin + rgn->region[i].size; |
220 | |
221 | if ((rgnbegin <= base) && (end <= rgnend)) |
222 | break; |
223 | } |
224 | |
225 | /* Didn't find the region */ |
226 | if (i == rgn->cnt) |
227 | return -1; |
228 | |
229 | /* Check to see if we are removing entire region */ |
230 | if ((rgnbegin == base) && (rgnend == end)) { |
231 | lmb_remove_region(rgn, i); |
232 | return 0; |
233 | } |
234 | |
235 | /* Check to see if region is matching at the front */ |
236 | if (rgnbegin == base) { |
237 | rgn->region[i].base = end; |
238 | rgn->region[i].size -= size; |
239 | return 0; |
240 | } |
241 | |
242 | /* Check to see if the region is matching at the end */ |
243 | if (rgnend == end) { |
244 | rgn->region[i].size -= size; |
245 | return 0; |
246 | } |
247 | |
248 | /* |
249 | * We need to split the entry - adjust the current one to the |
250 | * beginging of the hole and add the region after hole. |
251 | */ |
252 | rgn->region[i].size = base - rgn->region[i].base; |
253 | return lmb_add_region(rgn, end, rgnend - end); |
254 | } |
255 | |
256 | long lmb_remove(u64 base, u64 size) |
257 | { |
258 | return __lmb_remove(&lmb.memory, base, size); |
259 | } |
260 | |
261 | long __init lmb_free(u64 base, u64 size) |
262 | { |
263 | return __lmb_remove(&lmb.reserved, base, size); |
264 | } |
265 | |
266 | long __init lmb_reserve(u64 base, u64 size) |
267 | { |
268 | struct lmb_region *_rgn = &lmb.reserved; |
269 | |
270 | BUG_ON(0 == size); |
271 | |
272 | return lmb_add_region(_rgn, base, size); |
273 | } |
274 | |
275 | long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) |
276 | { |
277 | unsigned long i; |
278 | |
279 | for (i = 0; i < rgn->cnt; i++) { |
280 | u64 rgnbase = rgn->region[i].base; |
281 | u64 rgnsize = rgn->region[i].size; |
282 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
283 | break; |
284 | } |
285 | |
286 | return (i < rgn->cnt) ? i : -1; |
287 | } |
288 | |
289 | static u64 lmb_align_down(u64 addr, u64 size) |
290 | { |
291 | return addr & ~(size - 1); |
292 | } |
293 | |
294 | static u64 lmb_align_up(u64 addr, u64 size) |
295 | { |
296 | return (addr + (size - 1)) & ~(size - 1); |
297 | } |
298 | |
299 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, |
300 | u64 size, u64 align) |
301 | { |
302 | u64 base, res_base; |
303 | long j; |
304 | |
305 | base = lmb_align_down((end - size), align); |
306 | while (start <= base) { |
307 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
308 | if (j < 0) { |
309 | /* this area isn't reserved, take it */ |
310 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
311 | base = ~(u64)0; |
312 | return base; |
313 | } |
314 | res_base = lmb.reserved.region[j].base; |
315 | if (res_base < size) |
316 | break; |
317 | base = lmb_align_down(res_base - size, align); |
318 | } |
319 | |
320 | return ~(u64)0; |
321 | } |
322 | |
323 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, |
324 | u64 (*nid_range)(u64, u64, int *), |
325 | u64 size, u64 align, int nid) |
326 | { |
327 | u64 start, end; |
328 | |
329 | start = mp->base; |
330 | end = start + mp->size; |
331 | |
332 | start = lmb_align_up(start, align); |
333 | while (start < end) { |
334 | u64 this_end; |
335 | int this_nid; |
336 | |
337 | this_end = nid_range(start, end, &this_nid); |
338 | if (this_nid == nid) { |
339 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, |
340 | size, align); |
341 | if (ret != ~(u64)0) |
342 | return ret; |
343 | } |
344 | start = this_end; |
345 | } |
346 | |
347 | return ~(u64)0; |
348 | } |
349 | |
350 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, |
351 | u64 (*nid_range)(u64 start, u64 end, int *nid)) |
352 | { |
353 | struct lmb_region *mem = &lmb.memory; |
354 | int i; |
355 | |
356 | BUG_ON(0 == size); |
357 | |
358 | size = lmb_align_up(size, align); |
359 | |
360 | for (i = 0; i < mem->cnt; i++) { |
361 | u64 ret = lmb_alloc_nid_region(&mem->region[i], |
362 | nid_range, |
363 | size, align, nid); |
364 | if (ret != ~(u64)0) |
365 | return ret; |
366 | } |
367 | |
368 | return lmb_alloc(size, align); |
369 | } |
370 | |
371 | u64 __init lmb_alloc(u64 size, u64 align) |
372 | { |
373 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); |
374 | } |
375 | |
376 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
377 | { |
378 | u64 alloc; |
379 | |
380 | alloc = __lmb_alloc_base(size, align, max_addr); |
381 | |
382 | if (alloc == 0) |
383 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", |
384 | (unsigned long long) size, (unsigned long long) max_addr); |
385 | |
386 | return alloc; |
387 | } |
388 | |
389 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
390 | { |
391 | long i, j; |
392 | u64 base = 0; |
393 | u64 res_base; |
394 | |
395 | BUG_ON(0 == size); |
396 | |
397 | size = lmb_align_up(size, align); |
398 | |
399 | /* On some platforms, make sure we allocate lowmem */ |
400 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ |
401 | if (max_addr == LMB_ALLOC_ANYWHERE) |
402 | max_addr = LMB_REAL_LIMIT; |
403 | |
404 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { |
405 | u64 lmbbase = lmb.memory.region[i].base; |
406 | u64 lmbsize = lmb.memory.region[i].size; |
407 | |
408 | if (lmbsize < size) |
409 | continue; |
410 | if (max_addr == LMB_ALLOC_ANYWHERE) |
411 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
412 | else if (lmbbase < max_addr) { |
413 | base = min(lmbbase + lmbsize, max_addr); |
414 | base = lmb_align_down(base - size, align); |
415 | } else |
416 | continue; |
417 | |
418 | while (base && lmbbase <= base) { |
419 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
420 | if (j < 0) { |
421 | /* this area isn't reserved, take it */ |
422 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
423 | return 0; |
424 | return base; |
425 | } |
426 | res_base = lmb.reserved.region[j].base; |
427 | if (res_base < size) |
428 | break; |
429 | base = lmb_align_down(res_base - size, align); |
430 | } |
431 | } |
432 | return 0; |
433 | } |
434 | |
435 | /* You must call lmb_analyze() before this. */ |
436 | u64 __init lmb_phys_mem_size(void) |
437 | { |
438 | return lmb.memory.size; |
439 | } |
440 | |
441 | u64 lmb_end_of_DRAM(void) |
442 | { |
443 | int idx = lmb.memory.cnt - 1; |
444 | |
445 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); |
446 | } |
447 | |
448 | /* You must call lmb_analyze() after this. */ |
449 | void __init lmb_enforce_memory_limit(u64 memory_limit) |
450 | { |
451 | unsigned long i; |
452 | u64 limit; |
453 | struct lmb_property *p; |
454 | |
455 | if (!memory_limit) |
456 | return; |
457 | |
458 | /* Truncate the lmb regions to satisfy the memory limit. */ |
459 | limit = memory_limit; |
460 | for (i = 0; i < lmb.memory.cnt; i++) { |
461 | if (limit > lmb.memory.region[i].size) { |
462 | limit -= lmb.memory.region[i].size; |
463 | continue; |
464 | } |
465 | |
466 | lmb.memory.region[i].size = limit; |
467 | lmb.memory.cnt = i + 1; |
468 | break; |
469 | } |
470 | |
471 | if (lmb.memory.region[0].size < lmb.rmo_size) |
472 | lmb.rmo_size = lmb.memory.region[0].size; |
473 | |
474 | memory_limit = lmb_end_of_DRAM(); |
475 | |
476 | /* And truncate any reserves above the limit also. */ |
477 | for (i = 0; i < lmb.reserved.cnt; i++) { |
478 | p = &lmb.reserved.region[i]; |
479 | |
480 | if (p->base > memory_limit) |
481 | p->size = 0; |
482 | else if ((p->base + p->size) > memory_limit) |
483 | p->size = memory_limit - p->base; |
484 | |
485 | if (p->size == 0) { |
486 | lmb_remove_region(&lmb.reserved, i); |
487 | i--; |
488 | } |
489 | } |
490 | } |
491 | |
492 | int __init lmb_is_reserved(u64 addr) |
493 | { |
494 | int i; |
495 | |
496 | for (i = 0; i < lmb.reserved.cnt; i++) { |
497 | u64 upper = lmb.reserved.region[i].base + |
498 | lmb.reserved.region[i].size - 1; |
499 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) |
500 | return 1; |
501 | } |
502 | return 0; |
503 | } |
504 | |
505 | int lmb_is_region_reserved(u64 base, u64 size) |
506 | { |
507 | return lmb_overlaps_region(&lmb.reserved, base, size); |
508 | } |
509 | |
510 | /* |
511 | * Given a <base, len>, find which memory regions belong to this range. |
512 | * Adjust the request and return a contiguous chunk. |
513 | */ |
514 | int lmb_find(struct lmb_property *res) |
515 | { |
516 | int i; |
517 | u64 rstart, rend; |
518 | |
519 | rstart = res->base; |
520 | rend = rstart + res->size - 1; |
521 | |
522 | for (i = 0; i < lmb.memory.cnt; i++) { |
523 | u64 start = lmb.memory.region[i].base; |
524 | u64 end = start + lmb.memory.region[i].size - 1; |
525 | |
526 | if (start > rend) |
527 | return -1; |
528 | |
529 | if ((end >= rstart) && (start < rend)) { |
530 | /* adjust the request */ |
531 | if (rstart < start) |
532 | rstart = start; |
533 | if (rend > end) |
534 | rend = end; |
535 | res->base = rstart; |
536 | res->size = rend - rstart + 1; |
537 | return 0; |
538 | } |
539 | } |
540 | return -1; |
541 | } |
542 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9