1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9 #include <common.h>
10 #include <image.h>
11 #include <lmb.h>
12 #include <log.h>
13 #include <malloc.h>
14
15 #include <asm/global_data.h>
16
17 DECLARE_GLOBAL_DATA_PTR;
18
19 #define LMB_ALLOC_ANYWHERE 0
20
lmb_dump_region(struct lmb_region * rgn,char * name)21 static void lmb_dump_region(struct lmb_region *rgn, char *name)
22 {
23 unsigned long long base, size, end;
24 enum lmb_flags flags;
25 int i;
26
27 printf(" %s.cnt = 0x%lx\n", name, rgn->cnt);
28
29 for (i = 0; i < rgn->cnt; i++) {
30 base = rgn->region[i].base;
31 size = rgn->region[i].size;
32 end = base + size - 1;
33 flags = rgn->region[i].flags;
34
35 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
36 name, i, base, end, size, flags);
37 }
38 }
39
lmb_dump_all_force(struct lmb * lmb)40 void lmb_dump_all_force(struct lmb *lmb)
41 {
42 printf("lmb_dump_all:\n");
43 lmb_dump_region(&lmb->memory, "memory");
44 lmb_dump_region(&lmb->reserved, "reserved");
45 }
46
lmb_dump_all(struct lmb * lmb)47 void lmb_dump_all(struct lmb *lmb)
48 {
49 #ifdef DEBUG
50 lmb_dump_all_force(lmb);
51 #endif
52 }
53
lmb_addrs_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)54 static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
55 phys_addr_t base2, phys_size_t size2)
56 {
57 const phys_addr_t base1_end = base1 + size1 - 1;
58 const phys_addr_t base2_end = base2 + size2 - 1;
59
60 return ((base1 <= base2_end) && (base2 <= base1_end));
61 }
62
lmb_addrs_adjacent(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)63 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
64 phys_addr_t base2, phys_size_t size2)
65 {
66 if (base2 == base1 + size1)
67 return 1;
68 else if (base1 == base2 + size2)
69 return -1;
70
71 return 0;
72 }
73
lmb_regions_adjacent(struct lmb_region * rgn,unsigned long r1,unsigned long r2)74 static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
75 unsigned long r2)
76 {
77 phys_addr_t base1 = rgn->region[r1].base;
78 phys_size_t size1 = rgn->region[r1].size;
79 phys_addr_t base2 = rgn->region[r2].base;
80 phys_size_t size2 = rgn->region[r2].size;
81
82 return lmb_addrs_adjacent(base1, size1, base2, size2);
83 }
84
lmb_remove_region(struct lmb_region * rgn,unsigned long r)85 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
86 {
87 unsigned long i;
88
89 for (i = r; i < rgn->cnt - 1; i++) {
90 rgn->region[i].base = rgn->region[i + 1].base;
91 rgn->region[i].size = rgn->region[i + 1].size;
92 rgn->region[i].flags = rgn->region[i + 1].flags;
93 }
94 rgn->cnt--;
95 }
96
97 /* Assumption: base addr of region 1 < base addr of region 2 */
lmb_coalesce_regions(struct lmb_region * rgn,unsigned long r1,unsigned long r2)98 static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
99 unsigned long r2)
100 {
101 rgn->region[r1].size += rgn->region[r2].size;
102 lmb_remove_region(rgn, r2);
103 }
104
lmb_init(struct lmb * lmb)105 void lmb_init(struct lmb *lmb)
106 {
107 #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
108 lmb->memory.max = CONFIG_LMB_MAX_REGIONS;
109 lmb->reserved.max = CONFIG_LMB_MAX_REGIONS;
110 #else
111 lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS;
112 lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS;
113 lmb->memory.region = lmb->memory_regions;
114 lmb->reserved.region = lmb->reserved_regions;
115 #endif
116 lmb->memory.cnt = 0;
117 lmb->reserved.cnt = 0;
118 }
119
arch_lmb_reserve_generic(struct lmb * lmb,ulong sp,ulong end,ulong align)120 void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
121 {
122 ulong bank_end;
123 int bank;
124
125 /*
126 * Reserve memory from aligned address below the bottom of U-Boot stack
127 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
128 * that memory.
129 */
130 debug("## Current stack ends at 0x%08lx ", sp);
131
132 /* adjust sp by 4K to be safe */
133 sp -= align;
134 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
135 if (!gd->bd->bi_dram[bank].size ||
136 sp < gd->bd->bi_dram[bank].start)
137 continue;
138 /* Watch out for RAM at end of address space! */
139 bank_end = gd->bd->bi_dram[bank].start +
140 gd->bd->bi_dram[bank].size - 1;
141 if (sp > bank_end)
142 continue;
143 if (bank_end > end)
144 bank_end = end - 1;
145
146 lmb_reserve(lmb, sp, bank_end - sp + 1);
147 break;
148 }
149 }
150
lmb_reserve_common(struct lmb * lmb,void * fdt_blob)151 static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
152 {
153 arch_lmb_reserve(lmb);
154 board_lmb_reserve(lmb);
155
156 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
157 boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
158 }
159
160 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve(struct lmb * lmb,struct bd_info * bd,void * fdt_blob)161 void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
162 {
163 int i;
164
165 lmb_init(lmb);
166
167 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
168 if (bd->bi_dram[i].size) {
169 lmb_add(lmb, bd->bi_dram[i].start,
170 bd->bi_dram[i].size);
171 }
172 }
173
174 lmb_reserve_common(lmb, fdt_blob);
175 }
176
177 /* Initialize the struct, add memory and call arch/board reserve functions */
lmb_init_and_reserve_range(struct lmb * lmb,phys_addr_t base,phys_size_t size,void * fdt_blob)178 void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
179 phys_size_t size, void *fdt_blob)
180 {
181 lmb_init(lmb);
182 lmb_add(lmb, base, size);
183 lmb_reserve_common(lmb, fdt_blob);
184 }
185
186 /* This routine called with relocation disabled. */
lmb_add_region_flags(struct lmb_region * rgn,phys_addr_t base,phys_size_t size,enum lmb_flags flags)187 static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
188 phys_size_t size, enum lmb_flags flags)
189 {
190 unsigned long coalesced = 0;
191 long adjacent, i;
192
193 if (rgn->cnt == 0) {
194 rgn->region[0].base = base;
195 rgn->region[0].size = size;
196 rgn->region[0].flags = flags;
197 rgn->cnt = 1;
198 return 0;
199 }
200
201 /* First try and coalesce this LMB with another. */
202 for (i = 0; i < rgn->cnt; i++) {
203 phys_addr_t rgnbase = rgn->region[i].base;
204 phys_size_t rgnsize = rgn->region[i].size;
205 phys_size_t rgnflags = rgn->region[i].flags;
206
207 if (rgnbase == base && rgnsize == size) {
208 if (flags == rgnflags)
209 /* Already have this region, so we're done */
210 return 0;
211 else
212 return -1; /* regions with new flags */
213 }
214
215 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
216 if (adjacent > 0) {
217 if (flags != rgnflags)
218 break;
219 rgn->region[i].base -= size;
220 rgn->region[i].size += size;
221 coalesced++;
222 break;
223 } else if (adjacent < 0) {
224 if (flags != rgnflags)
225 break;
226 rgn->region[i].size += size;
227 coalesced++;
228 break;
229 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
230 /* regions overlap */
231 return -1;
232 }
233 }
234
235 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
236 if (rgn->region[i].flags == rgn->region[i + 1].flags) {
237 lmb_coalesce_regions(rgn, i, i + 1);
238 coalesced++;
239 }
240 }
241
242 if (coalesced)
243 return coalesced;
244 if (rgn->cnt >= rgn->max)
245 return -1;
246
247 /* Couldn't coalesce the LMB, so add it to the sorted table. */
248 for (i = rgn->cnt-1; i >= 0; i--) {
249 if (base < rgn->region[i].base) {
250 rgn->region[i + 1].base = rgn->region[i].base;
251 rgn->region[i + 1].size = rgn->region[i].size;
252 rgn->region[i + 1].flags = rgn->region[i].flags;
253 } else {
254 rgn->region[i + 1].base = base;
255 rgn->region[i + 1].size = size;
256 rgn->region[i + 1].flags = flags;
257 break;
258 }
259 }
260
261 if (base < rgn->region[0].base) {
262 rgn->region[0].base = base;
263 rgn->region[0].size = size;
264 rgn->region[0].flags = flags;
265 }
266
267 rgn->cnt++;
268
269 return 0;
270 }
271
lmb_add_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)272 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
273 phys_size_t size)
274 {
275 return lmb_add_region_flags(rgn, base, size, LMB_NONE);
276 }
277
278 /* This routine may be called with relocation disabled. */
lmb_add(struct lmb * lmb,phys_addr_t base,phys_size_t size)279 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
280 {
281 struct lmb_region *_rgn = &(lmb->memory);
282
283 return lmb_add_region(_rgn, base, size);
284 }
285
lmb_free(struct lmb * lmb,phys_addr_t base,phys_size_t size)286 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
287 {
288 struct lmb_region *rgn = &(lmb->reserved);
289 phys_addr_t rgnbegin, rgnend;
290 phys_addr_t end = base + size - 1;
291 int i;
292
293 rgnbegin = rgnend = 0; /* supress gcc warnings */
294
295 /* Find the region where (base, size) belongs to */
296 for (i = 0; i < rgn->cnt; i++) {
297 rgnbegin = rgn->region[i].base;
298 rgnend = rgnbegin + rgn->region[i].size - 1;
299
300 if ((rgnbegin <= base) && (end <= rgnend))
301 break;
302 }
303
304 /* Didn't find the region */
305 if (i == rgn->cnt)
306 return -1;
307
308 /* Check to see if we are removing entire region */
309 if ((rgnbegin == base) && (rgnend == end)) {
310 lmb_remove_region(rgn, i);
311 return 0;
312 }
313
314 /* Check to see if region is matching at the front */
315 if (rgnbegin == base) {
316 rgn->region[i].base = end + 1;
317 rgn->region[i].size -= size;
318 return 0;
319 }
320
321 /* Check to see if the region is matching at the end */
322 if (rgnend == end) {
323 rgn->region[i].size -= size;
324 return 0;
325 }
326
327 /*
328 * We need to split the entry - adjust the current one to the
329 * beginging of the hole and add the region after hole.
330 */
331 rgn->region[i].size = base - rgn->region[i].base;
332 return lmb_add_region_flags(rgn, end + 1, rgnend - end,
333 rgn->region[i].flags);
334 }
335
lmb_reserve_flags(struct lmb * lmb,phys_addr_t base,phys_size_t size,enum lmb_flags flags)336 long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
337 enum lmb_flags flags)
338 {
339 struct lmb_region *_rgn = &(lmb->reserved);
340
341 return lmb_add_region_flags(_rgn, base, size, flags);
342 }
343
lmb_reserve(struct lmb * lmb,phys_addr_t base,phys_size_t size)344 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
345 {
346 return lmb_reserve_flags(lmb, base, size, LMB_NONE);
347 }
348
lmb_overlaps_region(struct lmb_region * rgn,phys_addr_t base,phys_size_t size)349 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
350 phys_size_t size)
351 {
352 unsigned long i;
353
354 for (i = 0; i < rgn->cnt; i++) {
355 phys_addr_t rgnbase = rgn->region[i].base;
356 phys_size_t rgnsize = rgn->region[i].size;
357 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
358 break;
359 }
360
361 return (i < rgn->cnt) ? i : -1;
362 }
363
lmb_alloc(struct lmb * lmb,phys_size_t size,ulong align)364 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
365 {
366 return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
367 }
368
lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)369 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
370 {
371 phys_addr_t alloc;
372
373 alloc = __lmb_alloc_base(lmb, size, align, max_addr);
374
375 if (alloc == 0)
376 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
377 (ulong)size, (ulong)max_addr);
378
379 return alloc;
380 }
381
lmb_align_down(phys_addr_t addr,phys_size_t size)382 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
383 {
384 return addr & ~(size - 1);
385 }
386
__lmb_alloc_base(struct lmb * lmb,phys_size_t size,ulong align,phys_addr_t max_addr)387 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
388 {
389 long i, rgn;
390 phys_addr_t base = 0;
391 phys_addr_t res_base;
392
393 for (i = lmb->memory.cnt - 1; i >= 0; i--) {
394 phys_addr_t lmbbase = lmb->memory.region[i].base;
395 phys_size_t lmbsize = lmb->memory.region[i].size;
396
397 if (lmbsize < size)
398 continue;
399 if (max_addr == LMB_ALLOC_ANYWHERE)
400 base = lmb_align_down(lmbbase + lmbsize - size, align);
401 else if (lmbbase < max_addr) {
402 base = lmbbase + lmbsize;
403 if (base < lmbbase)
404 base = -1;
405 base = min(base, max_addr);
406 base = lmb_align_down(base - size, align);
407 } else
408 continue;
409
410 while (base && lmbbase <= base) {
411 rgn = lmb_overlaps_region(&lmb->reserved, base, size);
412 if (rgn < 0) {
413 /* This area isn't reserved, take it */
414 if (lmb_add_region(&lmb->reserved, base,
415 size) < 0)
416 return 0;
417 return base;
418 }
419 res_base = lmb->reserved.region[rgn].base;
420 if (res_base < size)
421 break;
422 base = lmb_align_down(res_base - size, align);
423 }
424 }
425 return 0;
426 }
427
428 /*
429 * Try to allocate a specific address range: must be in defined memory but not
430 * reserved
431 */
lmb_alloc_addr(struct lmb * lmb,phys_addr_t base,phys_size_t size)432 phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
433 {
434 long rgn;
435
436 /* Check if the requested address is in one of the memory regions */
437 rgn = lmb_overlaps_region(&lmb->memory, base, size);
438 if (rgn >= 0) {
439 /*
440 * Check if the requested end address is in the same memory
441 * region we found.
442 */
443 if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
444 lmb->memory.region[rgn].size,
445 base + size - 1, 1)) {
446 /* ok, reserve the memory */
447 if (lmb_reserve(lmb, base, size) >= 0)
448 return base;
449 }
450 }
451 return 0;
452 }
453
454 /* Return number of bytes from a given address that are free */
lmb_get_free_size(struct lmb * lmb,phys_addr_t addr)455 phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
456 {
457 int i;
458 long rgn;
459
460 /* check if the requested address is in the memory regions */
461 rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
462 if (rgn >= 0) {
463 for (i = 0; i < lmb->reserved.cnt; i++) {
464 if (addr < lmb->reserved.region[i].base) {
465 /* first reserved range > requested address */
466 return lmb->reserved.region[i].base - addr;
467 }
468 if (lmb->reserved.region[i].base +
469 lmb->reserved.region[i].size > addr) {
470 /* requested addr is in this reserved range */
471 return 0;
472 }
473 }
474 /* if we come here: no reserved ranges above requested addr */
475 return lmb->memory.region[lmb->memory.cnt - 1].base +
476 lmb->memory.region[lmb->memory.cnt - 1].size - addr;
477 }
478 return 0;
479 }
480
lmb_is_reserved_flags(struct lmb * lmb,phys_addr_t addr,int flags)481 int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
482 {
483 int i;
484
485 for (i = 0; i < lmb->reserved.cnt; i++) {
486 phys_addr_t upper = lmb->reserved.region[i].base +
487 lmb->reserved.region[i].size - 1;
488 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
489 return (lmb->reserved.region[i].flags & flags) == flags;
490 }
491 return 0;
492 }
493
lmb_is_reserved(struct lmb * lmb,phys_addr_t addr)494 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
495 {
496 return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
497 }
498
board_lmb_reserve(struct lmb * lmb)499 __weak void board_lmb_reserve(struct lmb *lmb)
500 {
501 /* please define platform specific board_lmb_reserve() */
502 }
503
arch_lmb_reserve(struct lmb * lmb)504 __weak void arch_lmb_reserve(struct lmb *lmb)
505 {
506 /* please define platform specific arch_lmb_reserve() */
507 }
508