Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/memblock/tests/basic_api.c
25923 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
#include "basic_api.h"
3
#include <string.h>
4
#include <linux/memblock.h>
5
6
#define EXPECTED_MEMBLOCK_REGIONS 128
7
#define FUNC_ADD "memblock_add"
8
#define FUNC_RESERVE "memblock_reserve"
9
#define FUNC_REMOVE "memblock_remove"
10
#define FUNC_FREE "memblock_free"
11
#define FUNC_TRIM "memblock_trim_memory"
12
13
static int memblock_initialization_check(void)
14
{
15
PREFIX_PUSH();
16
17
ASSERT_NE(memblock.memory.regions, NULL);
18
ASSERT_EQ(memblock.memory.cnt, 0);
19
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
20
ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
21
22
ASSERT_NE(memblock.reserved.regions, NULL);
23
ASSERT_EQ(memblock.reserved.cnt, 0);
24
ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
25
ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
26
27
ASSERT_EQ(memblock.bottom_up, false);
28
ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
29
30
test_pass_pop();
31
32
return 0;
33
}
34
35
/*
36
* A simple test that adds a memory block of a specified base address
37
* and size to the collection of available memory regions (memblock.memory).
38
* Expect to create a new entry. The region counter and total memory get
39
* updated.
40
*/
41
static int memblock_add_simple_check(void)
42
{
43
struct memblock_region *rgn;
44
45
rgn = &memblock.memory.regions[0];
46
47
struct region r = {
48
.base = SZ_1G,
49
.size = SZ_4M
50
};
51
52
PREFIX_PUSH();
53
54
reset_memblock_regions();
55
memblock_add(r.base, r.size);
56
57
ASSERT_EQ(rgn->base, r.base);
58
ASSERT_EQ(rgn->size, r.size);
59
60
ASSERT_EQ(memblock.memory.cnt, 1);
61
ASSERT_EQ(memblock.memory.total_size, r.size);
62
63
test_pass_pop();
64
65
return 0;
66
}
67
68
/*
69
* A simple test that adds a memory block of a specified base address, size,
70
* NUMA node and memory flags to the collection of available memory regions.
71
* Expect to create a new entry. The region counter and total memory get
72
* updated.
73
*/
74
static int memblock_add_node_simple_check(void)
75
{
76
struct memblock_region *rgn;
77
78
rgn = &memblock.memory.regions[0];
79
80
struct region r = {
81
.base = SZ_1M,
82
.size = SZ_16M
83
};
84
85
PREFIX_PUSH();
86
87
reset_memblock_regions();
88
memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
89
90
ASSERT_EQ(rgn->base, r.base);
91
ASSERT_EQ(rgn->size, r.size);
92
#ifdef CONFIG_NUMA
93
ASSERT_EQ(rgn->nid, 1);
94
#endif
95
ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
96
97
ASSERT_EQ(memblock.memory.cnt, 1);
98
ASSERT_EQ(memblock.memory.total_size, r.size);
99
100
test_pass_pop();
101
102
return 0;
103
}
104
105
/*
106
* A test that tries to add two memory blocks that don't overlap with one
107
* another:
108
*
109
* | +--------+ +--------+ |
110
* | | r1 | | r2 | |
111
* +--------+--------+--------+--------+--+
112
*
113
* Expect to add two correctly initialized entries to the collection of
114
* available memory regions (memblock.memory). The total size and
115
* region counter fields get updated.
116
*/
117
static int memblock_add_disjoint_check(void)
118
{
119
struct memblock_region *rgn1, *rgn2;
120
121
rgn1 = &memblock.memory.regions[0];
122
rgn2 = &memblock.memory.regions[1];
123
124
struct region r1 = {
125
.base = SZ_1G,
126
.size = SZ_8K
127
};
128
struct region r2 = {
129
.base = SZ_1G + SZ_16K,
130
.size = SZ_8K
131
};
132
133
PREFIX_PUSH();
134
135
reset_memblock_regions();
136
memblock_add(r1.base, r1.size);
137
memblock_add(r2.base, r2.size);
138
139
ASSERT_EQ(rgn1->base, r1.base);
140
ASSERT_EQ(rgn1->size, r1.size);
141
142
ASSERT_EQ(rgn2->base, r2.base);
143
ASSERT_EQ(rgn2->size, r2.size);
144
145
ASSERT_EQ(memblock.memory.cnt, 2);
146
ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
147
148
test_pass_pop();
149
150
return 0;
151
}
152
153
/*
154
* A test that tries to add two memory blocks r1 and r2, where r2 overlaps
155
* with the beginning of r1 (that is r1.base < r2.base + r2.size):
156
*
157
* | +----+----+------------+ |
158
* | | |r2 | r1 | |
159
* +----+----+----+------------+----------+
160
* ^ ^
161
* | |
162
* | r1.base
163
* |
164
* r2.base
165
*
166
* Expect to merge the two entries into one region that starts at r2.base
167
* and has size of two regions minus their intersection. The total size of
168
* the available memory is updated, and the region counter stays the same.
169
*/
170
static int memblock_add_overlap_top_check(void)
171
{
172
struct memblock_region *rgn;
173
phys_addr_t total_size;
174
175
rgn = &memblock.memory.regions[0];
176
177
struct region r1 = {
178
.base = SZ_512M,
179
.size = SZ_1G
180
};
181
struct region r2 = {
182
.base = SZ_256M,
183
.size = SZ_512M
184
};
185
186
PREFIX_PUSH();
187
188
total_size = (r1.base - r2.base) + r1.size;
189
190
reset_memblock_regions();
191
memblock_add(r1.base, r1.size);
192
memblock_add(r2.base, r2.size);
193
194
ASSERT_EQ(rgn->base, r2.base);
195
ASSERT_EQ(rgn->size, total_size);
196
197
ASSERT_EQ(memblock.memory.cnt, 1);
198
ASSERT_EQ(memblock.memory.total_size, total_size);
199
200
test_pass_pop();
201
202
return 0;
203
}
204
205
/*
206
* A test that tries to add two memory blocks r1 and r2, where r2 overlaps
207
* with the end of r1 (that is r2.base < r1.base + r1.size):
208
*
209
* | +--+------+----------+ |
210
* | | | r1 | r2 | |
211
* +--+--+------+----------+--------------+
212
* ^ ^
213
* | |
214
* | r2.base
215
* |
216
* r1.base
217
*
218
* Expect to merge the two entries into one region that starts at r1.base
219
* and has size of two regions minus their intersection. The total size of
220
* the available memory is updated, and the region counter stays the same.
221
*/
222
static int memblock_add_overlap_bottom_check(void)
223
{
224
struct memblock_region *rgn;
225
phys_addr_t total_size;
226
227
rgn = &memblock.memory.regions[0];
228
229
struct region r1 = {
230
.base = SZ_128M,
231
.size = SZ_512M
232
};
233
struct region r2 = {
234
.base = SZ_256M,
235
.size = SZ_1G
236
};
237
238
PREFIX_PUSH();
239
240
total_size = (r2.base - r1.base) + r2.size;
241
242
reset_memblock_regions();
243
memblock_add(r1.base, r1.size);
244
memblock_add(r2.base, r2.size);
245
246
ASSERT_EQ(rgn->base, r1.base);
247
ASSERT_EQ(rgn->size, total_size);
248
249
ASSERT_EQ(memblock.memory.cnt, 1);
250
ASSERT_EQ(memblock.memory.total_size, total_size);
251
252
test_pass_pop();
253
254
return 0;
255
}
256
257
/*
258
* A test that tries to add two memory blocks r1 and r2, where r2 is
259
* within the range of r1 (that is r1.base < r2.base &&
260
* r2.base + r2.size < r1.base + r1.size):
261
*
262
* | +-------+--+-----------------------+
263
* | | |r2| r1 |
264
* +---+-------+--+-----------------------+
265
* ^
266
* |
267
* r1.base
268
*
269
* Expect to merge two entries into one region that stays the same.
270
* The counter and total size of available memory are not updated.
271
*/
272
static int memblock_add_within_check(void)
273
{
274
struct memblock_region *rgn;
275
276
rgn = &memblock.memory.regions[0];
277
278
struct region r1 = {
279
.base = SZ_8M,
280
.size = SZ_32M
281
};
282
struct region r2 = {
283
.base = SZ_16M,
284
.size = SZ_1M
285
};
286
287
PREFIX_PUSH();
288
289
reset_memblock_regions();
290
memblock_add(r1.base, r1.size);
291
memblock_add(r2.base, r2.size);
292
293
ASSERT_EQ(rgn->base, r1.base);
294
ASSERT_EQ(rgn->size, r1.size);
295
296
ASSERT_EQ(memblock.memory.cnt, 1);
297
ASSERT_EQ(memblock.memory.total_size, r1.size);
298
299
test_pass_pop();
300
301
return 0;
302
}
303
304
/*
305
* A simple test that tries to add the same memory block twice. Expect
306
* the counter and total size of available memory to not be updated.
307
*/
308
static int memblock_add_twice_check(void)
309
{
310
struct region r = {
311
.base = SZ_16K,
312
.size = SZ_2M
313
};
314
315
PREFIX_PUSH();
316
317
reset_memblock_regions();
318
319
memblock_add(r.base, r.size);
320
memblock_add(r.base, r.size);
321
322
ASSERT_EQ(memblock.memory.cnt, 1);
323
ASSERT_EQ(memblock.memory.total_size, r.size);
324
325
test_pass_pop();
326
327
return 0;
328
}
329
330
/*
331
* A test that tries to add two memory blocks that don't overlap with one
332
* another and then add a third memory block in the space between the first two:
333
*
334
* | +--------+--------+--------+ |
335
* | | r1 | r3 | r2 | |
336
* +--------+--------+--------+--------+--+
337
*
338
* Expect to merge the three entries into one region that starts at r1.base
339
* and has size of r1.size + r2.size + r3.size. The region counter and total
340
* size of the available memory are updated.
341
*/
342
static int memblock_add_between_check(void)
343
{
344
struct memblock_region *rgn;
345
phys_addr_t total_size;
346
347
rgn = &memblock.memory.regions[0];
348
349
struct region r1 = {
350
.base = SZ_1G,
351
.size = SZ_8K
352
};
353
struct region r2 = {
354
.base = SZ_1G + SZ_16K,
355
.size = SZ_8K
356
};
357
struct region r3 = {
358
.base = SZ_1G + SZ_8K,
359
.size = SZ_8K
360
};
361
362
PREFIX_PUSH();
363
364
total_size = r1.size + r2.size + r3.size;
365
366
reset_memblock_regions();
367
memblock_add(r1.base, r1.size);
368
memblock_add(r2.base, r2.size);
369
memblock_add(r3.base, r3.size);
370
371
ASSERT_EQ(rgn->base, r1.base);
372
ASSERT_EQ(rgn->size, total_size);
373
374
ASSERT_EQ(memblock.memory.cnt, 1);
375
ASSERT_EQ(memblock.memory.total_size, total_size);
376
377
test_pass_pop();
378
379
return 0;
380
}
381
382
/*
383
* A simple test that tries to add a memory block r when r extends past
384
* PHYS_ADDR_MAX:
385
*
386
* +--------+
387
* | r |
388
* +--------+
389
* | +----+
390
* | | rgn|
391
* +----------------------------+----+
392
*
393
* Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the
394
* total size of available memory and the counter to be updated.
395
*/
396
static int memblock_add_near_max_check(void)
397
{
398
struct memblock_region *rgn;
399
phys_addr_t total_size;
400
401
rgn = &memblock.memory.regions[0];
402
403
struct region r = {
404
.base = PHYS_ADDR_MAX - SZ_1M,
405
.size = SZ_2M
406
};
407
408
PREFIX_PUSH();
409
410
total_size = PHYS_ADDR_MAX - r.base;
411
412
reset_memblock_regions();
413
memblock_add(r.base, r.size);
414
415
ASSERT_EQ(rgn->base, r.base);
416
ASSERT_EQ(rgn->size, total_size);
417
418
ASSERT_EQ(memblock.memory.cnt, 1);
419
ASSERT_EQ(memblock.memory.total_size, total_size);
420
421
test_pass_pop();
422
423
return 0;
424
}
425
426
/*
427
* A test that trying to add the 129th memory block.
428
* Expect to trigger memblock_double_array() to double the
429
* memblock.memory.max, find a new valid memory as
430
* memory.regions.
431
*/
432
static int memblock_add_many_check(void)
433
{
434
int i;
435
void *orig_region;
436
struct region r = {
437
.base = SZ_16K,
438
.size = SZ_16K,
439
};
440
phys_addr_t new_memory_regions_size;
441
phys_addr_t base, size = SZ_64;
442
phys_addr_t gap_size = SZ_64;
443
444
PREFIX_PUSH();
445
446
reset_memblock_regions();
447
memblock_allow_resize();
448
449
dummy_physical_memory_init();
450
/*
451
* We allocated enough memory by using dummy_physical_memory_init(), and
452
* split it into small block. First we split a large enough memory block
453
* as the memory region which will be choosed by memblock_double_array().
454
*/
455
base = PAGE_ALIGN(dummy_physical_memory_base());
456
new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 *
457
sizeof(struct memblock_region));
458
memblock_add(base, new_memory_regions_size);
459
460
/* This is the base of small memory block. */
461
base += new_memory_regions_size + gap_size;
462
463
orig_region = memblock.memory.regions;
464
465
for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
466
/*
467
* Add these small block to fulfill the memblock. We keep a
468
* gap between the nearby memory to avoid being merged.
469
*/
470
memblock_add(base, size);
471
base += size + gap_size;
472
473
ASSERT_EQ(memblock.memory.cnt, i + 2);
474
ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size +
475
(i + 1) * size);
476
}
477
478
/*
479
* At there, memblock_double_array() has been succeed, check if it
480
* update the memory.max.
481
*/
482
ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
483
484
/* memblock_double_array() will reserve the memory it used. Check it. */
485
ASSERT_EQ(memblock.reserved.cnt, 1);
486
ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size);
487
488
/*
489
* Now memblock_double_array() works fine. Let's check after the
490
* double_array(), the memblock_add() still works as normal.
491
*/
492
memblock_add(r.base, r.size);
493
ASSERT_EQ(memblock.memory.regions[0].base, r.base);
494
ASSERT_EQ(memblock.memory.regions[0].size, r.size);
495
496
ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2);
497
ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size +
498
new_memory_regions_size +
499
r.size);
500
ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
501
502
dummy_physical_memory_cleanup();
503
504
/*
505
* The current memory.regions is occupying a range of memory that
506
* allocated from dummy_physical_memory_init(). After free the memory,
507
* we must not use it. So restore the origin memory region to make sure
508
* the tests can run as normal and not affected by the double array.
509
*/
510
memblock.memory.regions = orig_region;
511
memblock.memory.cnt = INIT_MEMBLOCK_REGIONS;
512
513
test_pass_pop();
514
515
return 0;
516
}
517
518
static int memblock_add_checks(void)
519
{
520
prefix_reset();
521
prefix_push(FUNC_ADD);
522
test_print("Running %s tests...\n", FUNC_ADD);
523
524
memblock_add_simple_check();
525
memblock_add_node_simple_check();
526
memblock_add_disjoint_check();
527
memblock_add_overlap_top_check();
528
memblock_add_overlap_bottom_check();
529
memblock_add_within_check();
530
memblock_add_twice_check();
531
memblock_add_between_check();
532
memblock_add_near_max_check();
533
memblock_add_many_check();
534
535
prefix_pop();
536
537
return 0;
538
}
539
540
/*
541
* A simple test that marks a memory block of a specified base address
542
* and size as reserved and to the collection of reserved memory regions
543
* (memblock.reserved). Expect to create a new entry. The region counter
544
* and total memory size are updated.
545
*/
546
static int memblock_reserve_simple_check(void)
547
{
548
struct memblock_region *rgn;
549
550
rgn = &memblock.reserved.regions[0];
551
552
struct region r = {
553
.base = SZ_2G,
554
.size = SZ_128M
555
};
556
557
PREFIX_PUSH();
558
559
reset_memblock_regions();
560
memblock_reserve(r.base, r.size);
561
562
ASSERT_EQ(rgn->base, r.base);
563
ASSERT_EQ(rgn->size, r.size);
564
565
test_pass_pop();
566
567
return 0;
568
}
569
570
/*
571
* A test that tries to mark two memory blocks that don't overlap as reserved:
572
*
573
* | +--+ +----------------+ |
574
* | |r1| | r2 | |
575
* +--------+--+------+----------------+--+
576
*
577
* Expect to add two entries to the collection of reserved memory regions
578
* (memblock.reserved). The total size and region counter for
579
* memblock.reserved are updated.
580
*/
581
static int memblock_reserve_disjoint_check(void)
582
{
583
struct memblock_region *rgn1, *rgn2;
584
585
rgn1 = &memblock.reserved.regions[0];
586
rgn2 = &memblock.reserved.regions[1];
587
588
struct region r1 = {
589
.base = SZ_256M,
590
.size = SZ_16M
591
};
592
struct region r2 = {
593
.base = SZ_512M,
594
.size = SZ_512M
595
};
596
597
PREFIX_PUSH();
598
599
reset_memblock_regions();
600
memblock_reserve(r1.base, r1.size);
601
memblock_reserve(r2.base, r2.size);
602
603
ASSERT_EQ(rgn1->base, r1.base);
604
ASSERT_EQ(rgn1->size, r1.size);
605
606
ASSERT_EQ(rgn2->base, r2.base);
607
ASSERT_EQ(rgn2->size, r2.size);
608
609
ASSERT_EQ(memblock.reserved.cnt, 2);
610
ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
611
612
test_pass_pop();
613
614
return 0;
615
}
616
617
/*
618
* A test that tries to mark two memory blocks r1 and r2 as reserved,
619
* where r2 overlaps with the beginning of r1 (that is
620
* r1.base < r2.base + r2.size):
621
*
622
* | +--------------+--+--------------+ |
623
* | | r2 | | r1 | |
624
* +--+--------------+--+--------------+--+
625
* ^ ^
626
* | |
627
* | r1.base
628
* |
629
* r2.base
630
*
631
* Expect to merge two entries into one region that starts at r2.base and
632
* has size of two regions minus their intersection. The total size of the
633
* reserved memory is updated, and the region counter is not updated.
634
*/
635
static int memblock_reserve_overlap_top_check(void)
636
{
637
struct memblock_region *rgn;
638
phys_addr_t total_size;
639
640
rgn = &memblock.reserved.regions[0];
641
642
struct region r1 = {
643
.base = SZ_1G,
644
.size = SZ_1G
645
};
646
struct region r2 = {
647
.base = SZ_128M,
648
.size = SZ_1G
649
};
650
651
PREFIX_PUSH();
652
653
total_size = (r1.base - r2.base) + r1.size;
654
655
reset_memblock_regions();
656
memblock_reserve(r1.base, r1.size);
657
memblock_reserve(r2.base, r2.size);
658
659
ASSERT_EQ(rgn->base, r2.base);
660
ASSERT_EQ(rgn->size, total_size);
661
662
ASSERT_EQ(memblock.reserved.cnt, 1);
663
ASSERT_EQ(memblock.reserved.total_size, total_size);
664
665
test_pass_pop();
666
667
return 0;
668
}
669
670
/*
671
* A test that tries to mark two memory blocks r1 and r2 as reserved,
672
* where r2 overlaps with the end of r1 (that is
673
* r2.base < r1.base + r1.size):
674
*
675
* | +--------------+--+--------------+ |
676
* | | r1 | | r2 | |
677
* +--+--------------+--+--------------+--+
678
* ^ ^
679
* | |
680
* | r2.base
681
* |
682
* r1.base
683
*
684
* Expect to merge two entries into one region that starts at r1.base and
685
* has size of two regions minus their intersection. The total size of the
686
* reserved memory is updated, and the region counter is not updated.
687
*/
688
static int memblock_reserve_overlap_bottom_check(void)
689
{
690
struct memblock_region *rgn;
691
phys_addr_t total_size;
692
693
rgn = &memblock.reserved.regions[0];
694
695
struct region r1 = {
696
.base = SZ_2K,
697
.size = SZ_128K
698
};
699
struct region r2 = {
700
.base = SZ_128K,
701
.size = SZ_128K
702
};
703
704
PREFIX_PUSH();
705
706
total_size = (r2.base - r1.base) + r2.size;
707
708
reset_memblock_regions();
709
memblock_reserve(r1.base, r1.size);
710
memblock_reserve(r2.base, r2.size);
711
712
ASSERT_EQ(rgn->base, r1.base);
713
ASSERT_EQ(rgn->size, total_size);
714
715
ASSERT_EQ(memblock.reserved.cnt, 1);
716
ASSERT_EQ(memblock.reserved.total_size, total_size);
717
718
test_pass_pop();
719
720
return 0;
721
}
722
723
/*
724
* A test that tries to mark two memory blocks r1 and r2 as reserved,
725
* where r2 is within the range of r1 (that is
726
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
727
*
728
* | +-----+--+---------------------------|
729
* | | |r2| r1 |
730
* +-+-----+--+---------------------------+
731
* ^ ^
732
* | |
733
* | r2.base
734
* |
735
* r1.base
736
*
737
* Expect to merge two entries into one region that stays the same. The
738
* counter and total size of available memory are not updated.
739
*/
740
static int memblock_reserve_within_check(void)
741
{
742
struct memblock_region *rgn;
743
744
rgn = &memblock.reserved.regions[0];
745
746
struct region r1 = {
747
.base = SZ_1M,
748
.size = SZ_8M
749
};
750
struct region r2 = {
751
.base = SZ_2M,
752
.size = SZ_64K
753
};
754
755
PREFIX_PUSH();
756
757
reset_memblock_regions();
758
memblock_reserve(r1.base, r1.size);
759
memblock_reserve(r2.base, r2.size);
760
761
ASSERT_EQ(rgn->base, r1.base);
762
ASSERT_EQ(rgn->size, r1.size);
763
764
ASSERT_EQ(memblock.reserved.cnt, 1);
765
ASSERT_EQ(memblock.reserved.total_size, r1.size);
766
767
test_pass_pop();
768
769
return 0;
770
}
771
772
/*
773
* A simple test that tries to reserve the same memory block twice.
774
* Expect the region counter and total size of reserved memory to not
775
* be updated.
776
*/
777
static int memblock_reserve_twice_check(void)
778
{
779
struct region r = {
780
.base = SZ_16K,
781
.size = SZ_2M
782
};
783
784
PREFIX_PUSH();
785
786
reset_memblock_regions();
787
788
memblock_reserve(r.base, r.size);
789
memblock_reserve(r.base, r.size);
790
791
ASSERT_EQ(memblock.reserved.cnt, 1);
792
ASSERT_EQ(memblock.reserved.total_size, r.size);
793
794
test_pass_pop();
795
796
return 0;
797
}
798
799
/*
800
* A test that tries to mark two memory blocks that don't overlap as reserved
801
* and then reserve a third memory block in the space between the first two:
802
*
803
* | +--------+--------+--------+ |
804
* | | r1 | r3 | r2 | |
805
* +--------+--------+--------+--------+--+
806
*
807
* Expect to merge the three entries into one reserved region that starts at
808
* r1.base and has size of r1.size + r2.size + r3.size. The region counter and
809
* total for memblock.reserved are updated.
810
*/
811
static int memblock_reserve_between_check(void)
812
{
813
struct memblock_region *rgn;
814
phys_addr_t total_size;
815
816
rgn = &memblock.reserved.regions[0];
817
818
struct region r1 = {
819
.base = SZ_1G,
820
.size = SZ_8K
821
};
822
struct region r2 = {
823
.base = SZ_1G + SZ_16K,
824
.size = SZ_8K
825
};
826
struct region r3 = {
827
.base = SZ_1G + SZ_8K,
828
.size = SZ_8K
829
};
830
831
PREFIX_PUSH();
832
833
total_size = r1.size + r2.size + r3.size;
834
835
reset_memblock_regions();
836
memblock_reserve(r1.base, r1.size);
837
memblock_reserve(r2.base, r2.size);
838
memblock_reserve(r3.base, r3.size);
839
840
ASSERT_EQ(rgn->base, r1.base);
841
ASSERT_EQ(rgn->size, total_size);
842
843
ASSERT_EQ(memblock.reserved.cnt, 1);
844
ASSERT_EQ(memblock.reserved.total_size, total_size);
845
846
test_pass_pop();
847
848
return 0;
849
}
850
851
/*
852
* A simple test that tries to reserve a memory block r when r extends past
853
* PHYS_ADDR_MAX:
854
*
855
* +--------+
856
* | r |
857
* +--------+
858
* | +----+
859
* | | rgn|
860
* +----------------------------+----+
861
*
862
* Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the
863
* total size of reserved memory and the counter to be updated.
864
*/
865
static int memblock_reserve_near_max_check(void)
866
{
867
struct memblock_region *rgn;
868
phys_addr_t total_size;
869
870
rgn = &memblock.reserved.regions[0];
871
872
struct region r = {
873
.base = PHYS_ADDR_MAX - SZ_1M,
874
.size = SZ_2M
875
};
876
877
PREFIX_PUSH();
878
879
total_size = PHYS_ADDR_MAX - r.base;
880
881
reset_memblock_regions();
882
memblock_reserve(r.base, r.size);
883
884
ASSERT_EQ(rgn->base, r.base);
885
ASSERT_EQ(rgn->size, total_size);
886
887
ASSERT_EQ(memblock.reserved.cnt, 1);
888
ASSERT_EQ(memblock.reserved.total_size, total_size);
889
890
test_pass_pop();
891
892
return 0;
893
}
894
895
/*
896
* A test that trying to reserve the 129th memory block.
897
* Expect to trigger memblock_double_array() to double the
898
* memblock.memory.max, find a new valid memory as
899
* reserved.regions.
900
*/
901
static int memblock_reserve_many_check(void)
902
{
903
int i;
904
void *orig_region;
905
struct region r = {
906
.base = SZ_16K,
907
.size = SZ_16K,
908
};
909
phys_addr_t memory_base = SZ_128K;
910
phys_addr_t new_reserved_regions_size;
911
912
PREFIX_PUSH();
913
914
reset_memblock_regions();
915
memblock_allow_resize();
916
917
/* Add a valid memory region used by double_array(). */
918
dummy_physical_memory_init();
919
memblock_add(dummy_physical_memory_base(), MEM_SIZE);
920
921
for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
922
/* Reserve some fakes memory region to fulfill the memblock. */
923
memblock_reserve(memory_base, MEM_SIZE);
924
925
ASSERT_EQ(memblock.reserved.cnt, i + 1);
926
ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
927
928
/* Keep the gap so these memory region will not be merged. */
929
memory_base += MEM_SIZE * 2;
930
}
931
932
orig_region = memblock.reserved.regions;
933
934
/* This reserve the 129 memory_region, and makes it double array. */
935
memblock_reserve(memory_base, MEM_SIZE);
936
937
/*
938
* This is the memory region size used by the doubled reserved.regions,
939
* and it has been reserved due to it has been used. The size is used to
940
* calculate the total_size that the memblock.reserved have now.
941
*/
942
new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
943
sizeof(struct memblock_region));
944
/*
945
* The double_array() will find a free memory region as the new
946
* reserved.regions, and the used memory region will be reserved, so
947
* there will be one more region exist in the reserved memblock. And the
948
* one more reserved region's size is new_reserved_regions_size.
949
*/
950
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
951
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
952
new_reserved_regions_size);
953
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
954
955
/*
956
* Now memblock_double_array() works fine. Let's check after the
957
* double_array(), the memblock_reserve() still works as normal.
958
*/
959
memblock_reserve(r.base, r.size);
960
ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
961
ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
962
963
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
964
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
965
new_reserved_regions_size +
966
r.size);
967
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
968
969
dummy_physical_memory_cleanup();
970
971
/*
972
* The current reserved.regions is occupying a range of memory that
973
* allocated from dummy_physical_memory_init(). After free the memory,
974
* we must not use it. So restore the origin memory region to make sure
975
* the tests can run as normal and not affected by the double array.
976
*/
977
memblock.reserved.regions = orig_region;
978
memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
979
980
test_pass_pop();
981
982
return 0;
983
}
984
985
986
/*
987
* A test that trying to reserve the 129th memory block at all locations.
988
* Expect to trigger memblock_double_array() to double the
989
* memblock.memory.max, find a new valid memory as reserved.regions.
990
*
991
* 0 1 2 128
992
* +-------+ +-------+ +-------+ +-------+
993
* | 32K | | 32K | | 32K | ... | 32K |
994
* +-------+-------+-------+-------+-------+ +-------+
995
* |<-32K->| |<-32K->|
996
*
997
*/
998
/* Keep the gap so these memory region will not be merged. */
999
#define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx))
1000
static int memblock_reserve_all_locations_check(void)
1001
{
1002
int i, skip;
1003
void *orig_region;
1004
struct region r = {
1005
.base = SZ_16K,
1006
.size = SZ_16K,
1007
};
1008
phys_addr_t new_reserved_regions_size;
1009
1010
PREFIX_PUSH();
1011
1012
/* Reserve the 129th memory block for all possible positions*/
1013
for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) {
1014
reset_memblock_regions();
1015
memblock_allow_resize();
1016
1017
/* Add a valid memory region used by double_array(). */
1018
dummy_physical_memory_init();
1019
memblock_add(dummy_physical_memory_base(), MEM_SIZE);
1020
1021
for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) {
1022
if (i == skip)
1023
continue;
1024
1025
/* Reserve some fakes memory region to fulfill the memblock. */
1026
memblock_reserve(MEMORY_BASE(i), MEM_SIZE);
1027
1028
if (i < skip) {
1029
ASSERT_EQ(memblock.reserved.cnt, i + 1);
1030
ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
1031
} else {
1032
ASSERT_EQ(memblock.reserved.cnt, i);
1033
ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
1034
}
1035
}
1036
1037
orig_region = memblock.reserved.regions;
1038
1039
/* This reserve the 129 memory_region, and makes it double array. */
1040
memblock_reserve(MEMORY_BASE(skip), MEM_SIZE);
1041
1042
/*
1043
* This is the memory region size used by the doubled reserved.regions,
1044
* and it has been reserved due to it has been used. The size is used to
1045
* calculate the total_size that the memblock.reserved have now.
1046
*/
1047
new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
1048
sizeof(struct memblock_region));
1049
/*
1050
* The double_array() will find a free memory region as the new
1051
* reserved.regions, and the used memory region will be reserved, so
1052
* there will be one more region exist in the reserved memblock. And the
1053
* one more reserved region's size is new_reserved_regions_size.
1054
*/
1055
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
1056
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1057
new_reserved_regions_size);
1058
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1059
1060
/*
1061
* Now memblock_double_array() works fine. Let's check after the
1062
* double_array(), the memblock_reserve() still works as normal.
1063
*/
1064
memblock_reserve(r.base, r.size);
1065
ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
1066
ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
1067
1068
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
1069
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1070
new_reserved_regions_size +
1071
r.size);
1072
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1073
1074
dummy_physical_memory_cleanup();
1075
1076
/*
1077
* The current reserved.regions is occupying a range of memory that
1078
* allocated from dummy_physical_memory_init(). After free the memory,
1079
* we must not use it. So restore the origin memory region to make sure
1080
* the tests can run as normal and not affected by the double array.
1081
*/
1082
memblock.reserved.regions = orig_region;
1083
memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
1084
}
1085
1086
test_pass_pop();
1087
1088
return 0;
1089
}
1090
1091
/*
1092
* A test that trying to reserve the 129th memory block at all locations.
1093
* Expect to trigger memblock_double_array() to double the
1094
* memblock.memory.max, find a new valid memory as reserved.regions. And make
1095
* sure it doesn't conflict with the range we want to reserve.
1096
*
1097
* For example, we have 128 regions in reserved and now want to reserve
1098
* the skipped one. Since reserved is full, memblock_double_array() would find
1099
* an available range in memory for the new array. We intended to put two
1100
* ranges in memory with one is the exact range of the skipped one. Before
1101
* commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling
1102
* reserved array"), the new array would sits in the skipped range which is a
1103
* conflict. The expected new array should be allocated from memory.regions[0].
1104
*
1105
* 0 1
1106
* memory +-------+ +-------+
1107
* | 32K | | 32K |
1108
* +-------+ ------+-------+-------+-------+
1109
* |<-32K->|<-32K->|<-32K->|
1110
*
1111
* 0 skipped 127
1112
* reserved +-------+ ......... +-------+
1113
* | 32K | . 32K . ... | 32K |
1114
* +-------+-------+-------+ +-------+
1115
* |<-32K->|
1116
* ^
1117
* |
1118
* |
1119
* skipped one
1120
*/
1121
/* Keep the gap so these memory region will not be merged. */
1122
#define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx))
1123
static int memblock_reserve_many_may_conflict_check(void)
1124
{
1125
int i, skip;
1126
void *orig_region;
1127
struct region r = {
1128
.base = SZ_16K,
1129
.size = SZ_16K,
1130
};
1131
phys_addr_t new_reserved_regions_size;
1132
1133
/*
1134
* 0 1 129
1135
* +---+ +---+ +---+
1136
* |32K| |32K| .. |32K|
1137
* +---+ +---+ +---+
1138
*
1139
* Pre-allocate the range for 129 memory block + one range for double
1140
* memblock.reserved.regions at idx 0.
1141
*/
1142
dummy_physical_memory_init();
1143
phys_addr_t memory_base = dummy_physical_memory_base();
1144
phys_addr_t offset = PAGE_ALIGN(memory_base);
1145
1146
PREFIX_PUSH();
1147
1148
/* Reserve the 129th memory block for all possible positions*/
1149
for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) {
1150
reset_memblock_regions();
1151
memblock_allow_resize();
1152
1153
reset_memblock_attributes();
1154
/* Add a valid memory region used by double_array(). */
1155
memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE);
1156
/*
1157
* Add a memory region which will be reserved as 129th memory
1158
* region. This is not expected to be used by double_array().
1159
*/
1160
memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
1161
1162
for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) {
1163
if (i == skip)
1164
continue;
1165
1166
/* Reserve some fakes memory region to fulfill the memblock. */
1167
memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE);
1168
1169
if (i < skip) {
1170
ASSERT_EQ(memblock.reserved.cnt, i);
1171
ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
1172
} else {
1173
ASSERT_EQ(memblock.reserved.cnt, i - 1);
1174
ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE);
1175
}
1176
}
1177
1178
orig_region = memblock.reserved.regions;
1179
1180
/* This reserve the 129 memory_region, and makes it double array. */
1181
memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
1182
1183
/*
1184
* This is the memory region size used by the doubled reserved.regions,
1185
* and it has been reserved due to it has been used. The size is used to
1186
* calculate the total_size that the memblock.reserved have now.
1187
*/
1188
new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
1189
sizeof(struct memblock_region));
1190
/*
1191
* The double_array() will find a free memory region as the new
1192
* reserved.regions, and the used memory region will be reserved, so
1193
* there will be one more region exist in the reserved memblock. And the
1194
* one more reserved region's size is new_reserved_regions_size.
1195
*/
1196
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
1197
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1198
new_reserved_regions_size);
1199
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1200
1201
/*
1202
* The first reserved region is allocated for double array
1203
* with the size of new_reserved_regions_size and the base to be
1204
* MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size
1205
*/
1206
ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size,
1207
MEMORY_BASE_OFFSET(0, offset) + SZ_32K);
1208
ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size);
1209
1210
/*
1211
* Now memblock_double_array() works fine. Let's check after the
1212
* double_array(), the memblock_reserve() still works as normal.
1213
*/
1214
memblock_reserve(r.base, r.size);
1215
ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
1216
ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
1217
1218
ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
1219
ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1220
new_reserved_regions_size +
1221
r.size);
1222
ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1223
1224
/*
1225
* The current reserved.regions is occupying a range of memory that
1226
* allocated from dummy_physical_memory_init(). After free the memory,
1227
* we must not use it. So restore the origin memory region to make sure
1228
* the tests can run as normal and not affected by the double array.
1229
*/
1230
memblock.reserved.regions = orig_region;
1231
memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
1232
}
1233
1234
dummy_physical_memory_cleanup();
1235
1236
test_pass_pop();
1237
1238
return 0;
1239
}
1240
1241
static int memblock_reserve_checks(void)
1242
{
1243
prefix_reset();
1244
prefix_push(FUNC_RESERVE);
1245
test_print("Running %s tests...\n", FUNC_RESERVE);
1246
1247
memblock_reserve_simple_check();
1248
memblock_reserve_disjoint_check();
1249
memblock_reserve_overlap_top_check();
1250
memblock_reserve_overlap_bottom_check();
1251
memblock_reserve_within_check();
1252
memblock_reserve_twice_check();
1253
memblock_reserve_between_check();
1254
memblock_reserve_near_max_check();
1255
memblock_reserve_many_check();
1256
memblock_reserve_all_locations_check();
1257
memblock_reserve_many_may_conflict_check();
1258
1259
prefix_pop();
1260
1261
return 0;
1262
}
1263
1264
/*
1265
* A simple test that tries to remove a region r1 from the array of
1266
* available memory regions. By "removing" a region we mean overwriting it
1267
* with the next region r2 in memblock.memory:
1268
*
1269
* | ...... +----------------+ |
1270
* | : r1 : | r2 | |
1271
* +--+----+----------+----------------+--+
1272
* ^
1273
* |
1274
* rgn.base
1275
*
1276
* Expect to add two memory blocks r1 and r2 and then remove r1 so that
1277
* r2 is the first available region. The region counter and total size
1278
* are updated.
1279
*/
1280
static int memblock_remove_simple_check(void)
1281
{
1282
struct memblock_region *rgn;
1283
1284
rgn = &memblock.memory.regions[0];
1285
1286
struct region r1 = {
1287
.base = SZ_2K,
1288
.size = SZ_4K
1289
};
1290
struct region r2 = {
1291
.base = SZ_128K,
1292
.size = SZ_4M
1293
};
1294
1295
PREFIX_PUSH();
1296
1297
reset_memblock_regions();
1298
memblock_add(r1.base, r1.size);
1299
memblock_add(r2.base, r2.size);
1300
memblock_remove(r1.base, r1.size);
1301
1302
ASSERT_EQ(rgn->base, r2.base);
1303
ASSERT_EQ(rgn->size, r2.size);
1304
1305
ASSERT_EQ(memblock.memory.cnt, 1);
1306
ASSERT_EQ(memblock.memory.total_size, r2.size);
1307
1308
test_pass_pop();
1309
1310
return 0;
1311
}
1312
1313
/*
1314
* A test that tries to remove a region r2 that was not registered as
1315
* available memory (i.e. has no corresponding entry in memblock.memory):
1316
*
1317
* +----------------+
1318
* | r2 |
1319
* +----------------+
1320
* | +----+ |
1321
* | | r1 | |
1322
* +--+----+------------------------------+
1323
* ^
1324
* |
1325
* rgn.base
1326
*
1327
* Expect the array, regions counter and total size to not be modified.
1328
*/
1329
static int memblock_remove_absent_check(void)
1330
{
1331
struct memblock_region *rgn;
1332
1333
rgn = &memblock.memory.regions[0];
1334
1335
struct region r1 = {
1336
.base = SZ_512K,
1337
.size = SZ_4M
1338
};
1339
struct region r2 = {
1340
.base = SZ_64M,
1341
.size = SZ_1G
1342
};
1343
1344
PREFIX_PUSH();
1345
1346
reset_memblock_regions();
1347
memblock_add(r1.base, r1.size);
1348
memblock_remove(r2.base, r2.size);
1349
1350
ASSERT_EQ(rgn->base, r1.base);
1351
ASSERT_EQ(rgn->size, r1.size);
1352
1353
ASSERT_EQ(memblock.memory.cnt, 1);
1354
ASSERT_EQ(memblock.memory.total_size, r1.size);
1355
1356
test_pass_pop();
1357
1358
return 0;
1359
}
1360
1361
/*
1362
* A test that tries to remove a region r2 that overlaps with the
1363
* beginning of the already existing entry r1
1364
* (that is r1.base < r2.base + r2.size):
1365
*
1366
* +-----------------+
1367
* | r2 |
1368
* +-----------------+
1369
* | .........+--------+ |
1370
* | : r1 | rgn | |
1371
* +-----------------+--------+--------+--+
1372
* ^ ^
1373
* | |
1374
* | rgn.base
1375
* r1.base
1376
*
1377
* Expect that only the intersection of both regions is removed from the
1378
* available memory pool. The regions counter and total size are updated.
1379
*/
1380
static int memblock_remove_overlap_top_check(void)
1381
{
1382
struct memblock_region *rgn;
1383
phys_addr_t r1_end, r2_end, total_size;
1384
1385
rgn = &memblock.memory.regions[0];
1386
1387
struct region r1 = {
1388
.base = SZ_32M,
1389
.size = SZ_32M
1390
};
1391
struct region r2 = {
1392
.base = SZ_16M,
1393
.size = SZ_32M
1394
};
1395
1396
PREFIX_PUSH();
1397
1398
r1_end = r1.base + r1.size;
1399
r2_end = r2.base + r2.size;
1400
total_size = r1_end - r2_end;
1401
1402
reset_memblock_regions();
1403
memblock_add(r1.base, r1.size);
1404
memblock_remove(r2.base, r2.size);
1405
1406
ASSERT_EQ(rgn->base, r1.base + r2.base);
1407
ASSERT_EQ(rgn->size, total_size);
1408
1409
ASSERT_EQ(memblock.memory.cnt, 1);
1410
ASSERT_EQ(memblock.memory.total_size, total_size);
1411
1412
test_pass_pop();
1413
1414
return 0;
1415
}
1416
1417
/*
1418
* A test that tries to remove a region r2 that overlaps with the end of
1419
* the already existing region r1 (that is r2.base < r1.base + r1.size):
1420
*
1421
* +--------------------------------+
1422
* | r2 |
1423
* +--------------------------------+
1424
* | +---+..... |
1425
* | |rgn| r1 : |
1426
* +-+---+----+---------------------------+
1427
* ^
1428
* |
1429
* r1.base
1430
*
1431
* Expect that only the intersection of both regions is removed from the
1432
* available memory pool. The regions counter and total size are updated.
1433
*/
1434
static int memblock_remove_overlap_bottom_check(void)
1435
{
1436
struct memblock_region *rgn;
1437
phys_addr_t total_size;
1438
1439
rgn = &memblock.memory.regions[0];
1440
1441
struct region r1 = {
1442
.base = SZ_2M,
1443
.size = SZ_64M
1444
};
1445
struct region r2 = {
1446
.base = SZ_32M,
1447
.size = SZ_256M
1448
};
1449
1450
PREFIX_PUSH();
1451
1452
total_size = r2.base - r1.base;
1453
1454
reset_memblock_regions();
1455
memblock_add(r1.base, r1.size);
1456
memblock_remove(r2.base, r2.size);
1457
1458
ASSERT_EQ(rgn->base, r1.base);
1459
ASSERT_EQ(rgn->size, total_size);
1460
1461
ASSERT_EQ(memblock.memory.cnt, 1);
1462
ASSERT_EQ(memblock.memory.total_size, total_size);
1463
1464
test_pass_pop();
1465
1466
return 0;
1467
}
1468
1469
/*
1470
* A test that tries to remove a region r2 that is within the range of
1471
* the already existing entry r1 (that is
1472
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1473
*
1474
* +----+
1475
* | r2 |
1476
* +----+
1477
* | +-------------+....+---------------+ |
1478
* | | rgn1 | r1 | rgn2 | |
1479
* +-+-------------+----+---------------+-+
1480
* ^
1481
* |
1482
* r1.base
1483
*
1484
* Expect that the region is split into two - one that ends at r2.base and
1485
* another that starts at r2.base + r2.size, with appropriate sizes. The
1486
* region counter and total size are updated.
1487
*/
1488
static int memblock_remove_within_check(void)
1489
{
1490
struct memblock_region *rgn1, *rgn2;
1491
phys_addr_t r1_size, r2_size, total_size;
1492
1493
rgn1 = &memblock.memory.regions[0];
1494
rgn2 = &memblock.memory.regions[1];
1495
1496
struct region r1 = {
1497
.base = SZ_1M,
1498
.size = SZ_32M
1499
};
1500
struct region r2 = {
1501
.base = SZ_16M,
1502
.size = SZ_1M
1503
};
1504
1505
PREFIX_PUSH();
1506
1507
r1_size = r2.base - r1.base;
1508
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1509
total_size = r1_size + r2_size;
1510
1511
reset_memblock_regions();
1512
memblock_add(r1.base, r1.size);
1513
memblock_remove(r2.base, r2.size);
1514
1515
ASSERT_EQ(rgn1->base, r1.base);
1516
ASSERT_EQ(rgn1->size, r1_size);
1517
1518
ASSERT_EQ(rgn2->base, r2.base + r2.size);
1519
ASSERT_EQ(rgn2->size, r2_size);
1520
1521
ASSERT_EQ(memblock.memory.cnt, 2);
1522
ASSERT_EQ(memblock.memory.total_size, total_size);
1523
1524
test_pass_pop();
1525
1526
return 0;
1527
}
1528
1529
/*
1530
* A simple test that tries to remove a region r1 from the array of
1531
* available memory regions when r1 is the only available region.
1532
* Expect to add a memory block r1 and then remove r1 so that a dummy
1533
* region is added. The region counter stays the same, and the total size
1534
* is updated.
1535
*/
1536
static int memblock_remove_only_region_check(void)
1537
{
1538
struct memblock_region *rgn;
1539
1540
rgn = &memblock.memory.regions[0];
1541
1542
struct region r1 = {
1543
.base = SZ_2K,
1544
.size = SZ_4K
1545
};
1546
1547
PREFIX_PUSH();
1548
1549
reset_memblock_regions();
1550
memblock_add(r1.base, r1.size);
1551
memblock_remove(r1.base, r1.size);
1552
1553
ASSERT_EQ(rgn->base, 0);
1554
ASSERT_EQ(rgn->size, 0);
1555
1556
ASSERT_EQ(memblock.memory.cnt, 0);
1557
ASSERT_EQ(memblock.memory.total_size, 0);
1558
1559
test_pass_pop();
1560
1561
return 0;
1562
}
1563
1564
/*
1565
* A simple test that tries remove a region r2 from the array of available
1566
* memory regions when r2 extends past PHYS_ADDR_MAX:
1567
*
1568
* +--------+
1569
* | r2 |
1570
* +--------+
1571
* | +---+....+
1572
* | |rgn| |
1573
* +------------------------+---+----+
1574
*
1575
* Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed.
1576
* Expect the total size of available memory to be updated and the counter to
1577
* not be updated.
1578
*/
1579
static int memblock_remove_near_max_check(void)
1580
{
1581
struct memblock_region *rgn;
1582
phys_addr_t total_size;
1583
1584
rgn = &memblock.memory.regions[0];
1585
1586
struct region r1 = {
1587
.base = PHYS_ADDR_MAX - SZ_2M,
1588
.size = SZ_2M
1589
};
1590
1591
struct region r2 = {
1592
.base = PHYS_ADDR_MAX - SZ_1M,
1593
.size = SZ_2M
1594
};
1595
1596
PREFIX_PUSH();
1597
1598
total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
1599
1600
reset_memblock_regions();
1601
memblock_add(r1.base, r1.size);
1602
memblock_remove(r2.base, r2.size);
1603
1604
ASSERT_EQ(rgn->base, r1.base);
1605
ASSERT_EQ(rgn->size, total_size);
1606
1607
ASSERT_EQ(memblock.memory.cnt, 1);
1608
ASSERT_EQ(memblock.memory.total_size, total_size);
1609
1610
test_pass_pop();
1611
1612
return 0;
1613
}
1614
1615
/*
1616
* A test that tries to remove a region r3 that overlaps with two existing
1617
* regions r1 and r2:
1618
*
1619
* +----------------+
1620
* | r3 |
1621
* +----------------+
1622
* | +----+..... ........+--------+
1623
* | | |r1 : : |r2 | |
1624
* +----+----+----+---+-------+--------+-----+
1625
*
1626
* Expect that only the intersections of r1 with r3 and r2 with r3 are removed
1627
* from the available memory pool. Expect the total size of available memory to
1628
* be updated and the counter to not be updated.
1629
*/
1630
static int memblock_remove_overlap_two_check(void)
1631
{
1632
struct memblock_region *rgn1, *rgn2;
1633
phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
1634
1635
rgn1 = &memblock.memory.regions[0];
1636
rgn2 = &memblock.memory.regions[1];
1637
1638
struct region r1 = {
1639
.base = SZ_16M,
1640
.size = SZ_32M
1641
};
1642
struct region r2 = {
1643
.base = SZ_64M,
1644
.size = SZ_64M
1645
};
1646
struct region r3 = {
1647
.base = SZ_32M,
1648
.size = SZ_64M
1649
};
1650
1651
PREFIX_PUSH();
1652
1653
r2_end = r2.base + r2.size;
1654
r3_end = r3.base + r3.size;
1655
new_r1_size = r3.base - r1.base;
1656
new_r2_size = r2_end - r3_end;
1657
total_size = new_r1_size + new_r2_size;
1658
1659
reset_memblock_regions();
1660
memblock_add(r1.base, r1.size);
1661
memblock_add(r2.base, r2.size);
1662
memblock_remove(r3.base, r3.size);
1663
1664
ASSERT_EQ(rgn1->base, r1.base);
1665
ASSERT_EQ(rgn1->size, new_r1_size);
1666
1667
ASSERT_EQ(rgn2->base, r3_end);
1668
ASSERT_EQ(rgn2->size, new_r2_size);
1669
1670
ASSERT_EQ(memblock.memory.cnt, 2);
1671
ASSERT_EQ(memblock.memory.total_size, total_size);
1672
1673
test_pass_pop();
1674
1675
return 0;
1676
}
1677
1678
static int memblock_remove_checks(void)
1679
{
1680
prefix_reset();
1681
prefix_push(FUNC_REMOVE);
1682
test_print("Running %s tests...\n", FUNC_REMOVE);
1683
1684
memblock_remove_simple_check();
1685
memblock_remove_absent_check();
1686
memblock_remove_overlap_top_check();
1687
memblock_remove_overlap_bottom_check();
1688
memblock_remove_within_check();
1689
memblock_remove_only_region_check();
1690
memblock_remove_near_max_check();
1691
memblock_remove_overlap_two_check();
1692
1693
prefix_pop();
1694
1695
return 0;
1696
}
1697
1698
/*
1699
* A simple test that tries to free a memory block r1 that was marked
1700
* earlier as reserved. By "freeing" a region we mean overwriting it with
1701
* the next entry r2 in memblock.reserved:
1702
*
1703
* | ...... +----+ |
1704
* | : r1 : | r2 | |
1705
* +--------------+----+-----------+----+-+
1706
* ^
1707
* |
1708
* rgn.base
1709
*
1710
* Expect to reserve two memory regions and then erase r1 region with the
1711
* value of r2. The region counter and total size are updated.
1712
*/
1713
static int memblock_free_simple_check(void)
1714
{
1715
struct memblock_region *rgn;
1716
1717
rgn = &memblock.reserved.regions[0];
1718
1719
struct region r1 = {
1720
.base = SZ_4M,
1721
.size = SZ_1M
1722
};
1723
struct region r2 = {
1724
.base = SZ_8M,
1725
.size = SZ_1M
1726
};
1727
1728
PREFIX_PUSH();
1729
1730
reset_memblock_regions();
1731
memblock_reserve(r1.base, r1.size);
1732
memblock_reserve(r2.base, r2.size);
1733
memblock_free((void *)r1.base, r1.size);
1734
1735
ASSERT_EQ(rgn->base, r2.base);
1736
ASSERT_EQ(rgn->size, r2.size);
1737
1738
ASSERT_EQ(memblock.reserved.cnt, 1);
1739
ASSERT_EQ(memblock.reserved.total_size, r2.size);
1740
1741
test_pass_pop();
1742
1743
return 0;
1744
}
1745
1746
/*
1747
* A test that tries to free a region r2 that was not marked as reserved
1748
* (i.e. has no corresponding entry in memblock.reserved):
1749
*
1750
* +----------------+
1751
* | r2 |
1752
* +----------------+
1753
* | +----+ |
1754
* | | r1 | |
1755
* +--+----+------------------------------+
1756
* ^
1757
* |
1758
* rgn.base
1759
*
1760
* The array, regions counter and total size are not modified.
1761
*/
1762
static int memblock_free_absent_check(void)
1763
{
1764
struct memblock_region *rgn;
1765
1766
rgn = &memblock.reserved.regions[0];
1767
1768
struct region r1 = {
1769
.base = SZ_2M,
1770
.size = SZ_8K
1771
};
1772
struct region r2 = {
1773
.base = SZ_16M,
1774
.size = SZ_128M
1775
};
1776
1777
PREFIX_PUSH();
1778
1779
reset_memblock_regions();
1780
memblock_reserve(r1.base, r1.size);
1781
memblock_free((void *)r2.base, r2.size);
1782
1783
ASSERT_EQ(rgn->base, r1.base);
1784
ASSERT_EQ(rgn->size, r1.size);
1785
1786
ASSERT_EQ(memblock.reserved.cnt, 1);
1787
ASSERT_EQ(memblock.reserved.total_size, r1.size);
1788
1789
test_pass_pop();
1790
1791
return 0;
1792
}
1793
1794
/*
1795
* A test that tries to free a region r2 that overlaps with the beginning
1796
* of the already existing entry r1 (that is r1.base < r2.base + r2.size):
1797
*
1798
* +----+
1799
* | r2 |
1800
* +----+
1801
* | ...+--------------+ |
1802
* | : | r1 | |
1803
* +----+--+--------------+---------------+
1804
* ^ ^
1805
* | |
1806
* | rgn.base
1807
* |
1808
* r1.base
1809
*
1810
* Expect that only the intersection of both regions is freed. The
1811
* regions counter and total size are updated.
1812
*/
1813
static int memblock_free_overlap_top_check(void)
1814
{
1815
struct memblock_region *rgn;
1816
phys_addr_t total_size;
1817
1818
rgn = &memblock.reserved.regions[0];
1819
1820
struct region r1 = {
1821
.base = SZ_8M,
1822
.size = SZ_32M
1823
};
1824
struct region r2 = {
1825
.base = SZ_1M,
1826
.size = SZ_8M
1827
};
1828
1829
PREFIX_PUSH();
1830
1831
total_size = (r1.size + r1.base) - (r2.base + r2.size);
1832
1833
reset_memblock_regions();
1834
memblock_reserve(r1.base, r1.size);
1835
memblock_free((void *)r2.base, r2.size);
1836
1837
ASSERT_EQ(rgn->base, r2.base + r2.size);
1838
ASSERT_EQ(rgn->size, total_size);
1839
1840
ASSERT_EQ(memblock.reserved.cnt, 1);
1841
ASSERT_EQ(memblock.reserved.total_size, total_size);
1842
1843
test_pass_pop();
1844
1845
return 0;
1846
}
1847
1848
/*
1849
* A test that tries to free a region r2 that overlaps with the end of
1850
* the already existing entry r1 (that is r2.base < r1.base + r1.size):
1851
*
1852
* +----------------+
1853
* | r2 |
1854
* +----------------+
1855
* | +-----------+..... |
1856
* | | r1 | : |
1857
* +----+-----------+----+----------------+
1858
*
1859
* Expect that only the intersection of both regions is freed. The
1860
* regions counter and total size are updated.
1861
*/
1862
static int memblock_free_overlap_bottom_check(void)
1863
{
1864
struct memblock_region *rgn;
1865
phys_addr_t total_size;
1866
1867
rgn = &memblock.reserved.regions[0];
1868
1869
struct region r1 = {
1870
.base = SZ_8M,
1871
.size = SZ_32M
1872
};
1873
struct region r2 = {
1874
.base = SZ_32M,
1875
.size = SZ_32M
1876
};
1877
1878
PREFIX_PUSH();
1879
1880
total_size = r2.base - r1.base;
1881
1882
reset_memblock_regions();
1883
memblock_reserve(r1.base, r1.size);
1884
memblock_free((void *)r2.base, r2.size);
1885
1886
ASSERT_EQ(rgn->base, r1.base);
1887
ASSERT_EQ(rgn->size, total_size);
1888
1889
ASSERT_EQ(memblock.reserved.cnt, 1);
1890
ASSERT_EQ(memblock.reserved.total_size, total_size);
1891
1892
test_pass_pop();
1893
1894
return 0;
1895
}
1896
1897
/*
1898
* A test that tries to free a region r2 that is within the range of the
1899
* already existing entry r1 (that is
1900
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1901
*
1902
* +----+
1903
* | r2 |
1904
* +----+
1905
* | +------------+....+---------------+
1906
* | | rgn1 | r1 | rgn2 |
1907
* +----+------------+----+---------------+
1908
* ^
1909
* |
1910
* r1.base
1911
*
1912
* Expect that the region is split into two - one that ends at r2.base and
1913
* another that starts at r2.base + r2.size, with appropriate sizes. The
1914
* region counter and total size fields are updated.
1915
*/
1916
static int memblock_free_within_check(void)
1917
{
1918
struct memblock_region *rgn1, *rgn2;
1919
phys_addr_t r1_size, r2_size, total_size;
1920
1921
rgn1 = &memblock.reserved.regions[0];
1922
rgn2 = &memblock.reserved.regions[1];
1923
1924
struct region r1 = {
1925
.base = SZ_1M,
1926
.size = SZ_8M
1927
};
1928
struct region r2 = {
1929
.base = SZ_4M,
1930
.size = SZ_1M
1931
};
1932
1933
PREFIX_PUSH();
1934
1935
r1_size = r2.base - r1.base;
1936
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1937
total_size = r1_size + r2_size;
1938
1939
reset_memblock_regions();
1940
memblock_reserve(r1.base, r1.size);
1941
memblock_free((void *)r2.base, r2.size);
1942
1943
ASSERT_EQ(rgn1->base, r1.base);
1944
ASSERT_EQ(rgn1->size, r1_size);
1945
1946
ASSERT_EQ(rgn2->base, r2.base + r2.size);
1947
ASSERT_EQ(rgn2->size, r2_size);
1948
1949
ASSERT_EQ(memblock.reserved.cnt, 2);
1950
ASSERT_EQ(memblock.reserved.total_size, total_size);
1951
1952
test_pass_pop();
1953
1954
return 0;
1955
}
1956
1957
/*
1958
* A simple test that tries to free a memory block r1 that was marked
1959
* earlier as reserved when r1 is the only available region.
1960
* Expect to reserve a memory block r1 and then free r1 so that r1 is
1961
* overwritten with a dummy region. The region counter stays the same,
1962
* and the total size is updated.
1963
*/
1964
static int memblock_free_only_region_check(void)
1965
{
1966
struct memblock_region *rgn;
1967
1968
rgn = &memblock.reserved.regions[0];
1969
1970
struct region r1 = {
1971
.base = SZ_2K,
1972
.size = SZ_4K
1973
};
1974
1975
PREFIX_PUSH();
1976
1977
reset_memblock_regions();
1978
memblock_reserve(r1.base, r1.size);
1979
memblock_free((void *)r1.base, r1.size);
1980
1981
ASSERT_EQ(rgn->base, 0);
1982
ASSERT_EQ(rgn->size, 0);
1983
1984
ASSERT_EQ(memblock.reserved.cnt, 0);
1985
ASSERT_EQ(memblock.reserved.total_size, 0);
1986
1987
test_pass_pop();
1988
1989
return 0;
1990
}
1991
1992
/*
1993
* A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX:
1994
*
1995
* +--------+
1996
* | r2 |
1997
* +--------+
1998
* | +---+....+
1999
* | |rgn| |
2000
* +------------------------+---+----+
2001
*
2002
* Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed.
2003
* Expect the total size of reserved memory to be updated and the counter to
2004
* not be updated.
2005
*/
2006
static int memblock_free_near_max_check(void)
2007
{
2008
struct memblock_region *rgn;
2009
phys_addr_t total_size;
2010
2011
rgn = &memblock.reserved.regions[0];
2012
2013
struct region r1 = {
2014
.base = PHYS_ADDR_MAX - SZ_2M,
2015
.size = SZ_2M
2016
};
2017
2018
struct region r2 = {
2019
.base = PHYS_ADDR_MAX - SZ_1M,
2020
.size = SZ_2M
2021
};
2022
2023
PREFIX_PUSH();
2024
2025
total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
2026
2027
reset_memblock_regions();
2028
memblock_reserve(r1.base, r1.size);
2029
memblock_free((void *)r2.base, r2.size);
2030
2031
ASSERT_EQ(rgn->base, r1.base);
2032
ASSERT_EQ(rgn->size, total_size);
2033
2034
ASSERT_EQ(memblock.reserved.cnt, 1);
2035
ASSERT_EQ(memblock.reserved.total_size, total_size);
2036
2037
test_pass_pop();
2038
2039
return 0;
2040
}
2041
2042
/*
2043
* A test that tries to free a reserved region r3 that overlaps with two
2044
* existing reserved regions r1 and r2:
2045
*
2046
* +----------------+
2047
* | r3 |
2048
* +----------------+
2049
* | +----+..... ........+--------+
2050
* | | |r1 : : |r2 | |
2051
* +----+----+----+---+-------+--------+-----+
2052
*
2053
* Expect that only the intersections of r1 with r3 and r2 with r3 are freed
2054
* from the collection of reserved memory. Expect the total size of reserved
2055
* memory to be updated and the counter to not be updated.
2056
*/
2057
static int memblock_free_overlap_two_check(void)
2058
{
2059
struct memblock_region *rgn1, *rgn2;
2060
phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
2061
2062
rgn1 = &memblock.reserved.regions[0];
2063
rgn2 = &memblock.reserved.regions[1];
2064
2065
struct region r1 = {
2066
.base = SZ_16M,
2067
.size = SZ_32M
2068
};
2069
struct region r2 = {
2070
.base = SZ_64M,
2071
.size = SZ_64M
2072
};
2073
struct region r3 = {
2074
.base = SZ_32M,
2075
.size = SZ_64M
2076
};
2077
2078
PREFIX_PUSH();
2079
2080
r2_end = r2.base + r2.size;
2081
r3_end = r3.base + r3.size;
2082
new_r1_size = r3.base - r1.base;
2083
new_r2_size = r2_end - r3_end;
2084
total_size = new_r1_size + new_r2_size;
2085
2086
reset_memblock_regions();
2087
memblock_reserve(r1.base, r1.size);
2088
memblock_reserve(r2.base, r2.size);
2089
memblock_free((void *)r3.base, r3.size);
2090
2091
ASSERT_EQ(rgn1->base, r1.base);
2092
ASSERT_EQ(rgn1->size, new_r1_size);
2093
2094
ASSERT_EQ(rgn2->base, r3_end);
2095
ASSERT_EQ(rgn2->size, new_r2_size);
2096
2097
ASSERT_EQ(memblock.reserved.cnt, 2);
2098
ASSERT_EQ(memblock.reserved.total_size, total_size);
2099
2100
test_pass_pop();
2101
2102
return 0;
2103
}
2104
2105
static int memblock_free_checks(void)
2106
{
2107
prefix_reset();
2108
prefix_push(FUNC_FREE);
2109
test_print("Running %s tests...\n", FUNC_FREE);
2110
2111
memblock_free_simple_check();
2112
memblock_free_absent_check();
2113
memblock_free_overlap_top_check();
2114
memblock_free_overlap_bottom_check();
2115
memblock_free_within_check();
2116
memblock_free_only_region_check();
2117
memblock_free_near_max_check();
2118
memblock_free_overlap_two_check();
2119
2120
prefix_pop();
2121
2122
return 0;
2123
}
2124
2125
static int memblock_set_bottom_up_check(void)
2126
{
2127
prefix_push("memblock_set_bottom_up");
2128
2129
memblock_set_bottom_up(false);
2130
ASSERT_EQ(memblock.bottom_up, false);
2131
memblock_set_bottom_up(true);
2132
ASSERT_EQ(memblock.bottom_up, true);
2133
2134
reset_memblock_attributes();
2135
test_pass_pop();
2136
2137
return 0;
2138
}
2139
2140
static int memblock_bottom_up_check(void)
2141
{
2142
prefix_push("memblock_bottom_up");
2143
2144
memblock_set_bottom_up(false);
2145
ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
2146
ASSERT_EQ(memblock_bottom_up(), false);
2147
memblock_set_bottom_up(true);
2148
ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
2149
ASSERT_EQ(memblock_bottom_up(), true);
2150
2151
reset_memblock_attributes();
2152
test_pass_pop();
2153
2154
return 0;
2155
}
2156
2157
static int memblock_bottom_up_checks(void)
2158
{
2159
test_print("Running memblock_*bottom_up tests...\n");
2160
2161
prefix_reset();
2162
memblock_set_bottom_up_check();
2163
prefix_reset();
2164
memblock_bottom_up_check();
2165
2166
return 0;
2167
}
2168
2169
/*
2170
* A test that tries to trim memory when both ends of the memory region are
2171
* aligned. Expect that the memory will not be trimmed. Expect the counter to
2172
* not be updated.
2173
*/
2174
static int memblock_trim_memory_aligned_check(void)
2175
{
2176
struct memblock_region *rgn;
2177
const phys_addr_t alignment = SMP_CACHE_BYTES;
2178
2179
rgn = &memblock.memory.regions[0];
2180
2181
struct region r = {
2182
.base = alignment,
2183
.size = alignment * 4
2184
};
2185
2186
PREFIX_PUSH();
2187
2188
reset_memblock_regions();
2189
memblock_add(r.base, r.size);
2190
memblock_trim_memory(alignment);
2191
2192
ASSERT_EQ(rgn->base, r.base);
2193
ASSERT_EQ(rgn->size, r.size);
2194
2195
ASSERT_EQ(memblock.memory.cnt, 1);
2196
2197
test_pass_pop();
2198
2199
return 0;
2200
}
2201
2202
/*
2203
* A test that tries to trim memory when there are two available regions, r1 and
2204
* r2. Region r1 is aligned on both ends and region r2 is unaligned on one end
2205
* and smaller than the alignment:
2206
*
2207
* alignment
2208
* |--------|
2209
* | +-----------------+ +------+ |
2210
* | | r1 | | r2 | |
2211
* +--------+-----------------+--------+------+---+
2212
* ^ ^ ^ ^ ^
2213
* |________|________|________| |
2214
* | Unaligned address
2215
* Aligned addresses
2216
*
2217
* Expect that r1 will not be trimmed and r2 will be removed. Expect the
2218
* counter to be updated.
2219
*/
2220
static int memblock_trim_memory_too_small_check(void)
2221
{
2222
struct memblock_region *rgn;
2223
const phys_addr_t alignment = SMP_CACHE_BYTES;
2224
2225
rgn = &memblock.memory.regions[0];
2226
2227
struct region r1 = {
2228
.base = alignment,
2229
.size = alignment * 2
2230
};
2231
struct region r2 = {
2232
.base = alignment * 4,
2233
.size = alignment - SZ_2
2234
};
2235
2236
PREFIX_PUSH();
2237
2238
reset_memblock_regions();
2239
memblock_add(r1.base, r1.size);
2240
memblock_add(r2.base, r2.size);
2241
memblock_trim_memory(alignment);
2242
2243
ASSERT_EQ(rgn->base, r1.base);
2244
ASSERT_EQ(rgn->size, r1.size);
2245
2246
ASSERT_EQ(memblock.memory.cnt, 1);
2247
2248
test_pass_pop();
2249
2250
return 0;
2251
}
2252
2253
/*
2254
* A test that tries to trim memory when there are two available regions, r1 and
2255
* r2. Region r1 is aligned on both ends and region r2 is unaligned at the base
2256
* and aligned at the end:
2257
*
2258
* Unaligned address
2259
* |
2260
* v
2261
* | +-----------------+ +---------------+ |
2262
* | | r1 | | r2 | |
2263
* +--------+-----------------+----------+---------------+---+
2264
* ^ ^ ^ ^ ^ ^
2265
* |________|________|________|________|________|
2266
* |
2267
* Aligned addresses
2268
*
2269
* Expect that r1 will not be trimmed and r2 will be trimmed at the base.
2270
* Expect the counter to not be updated.
2271
*/
2272
static int memblock_trim_memory_unaligned_base_check(void)
2273
{
2274
struct memblock_region *rgn1, *rgn2;
2275
const phys_addr_t alignment = SMP_CACHE_BYTES;
2276
phys_addr_t offset = SZ_2;
2277
phys_addr_t new_r2_base, new_r2_size;
2278
2279
rgn1 = &memblock.memory.regions[0];
2280
rgn2 = &memblock.memory.regions[1];
2281
2282
struct region r1 = {
2283
.base = alignment,
2284
.size = alignment * 2
2285
};
2286
struct region r2 = {
2287
.base = alignment * 4 + offset,
2288
.size = alignment * 2 - offset
2289
};
2290
2291
PREFIX_PUSH();
2292
2293
new_r2_base = r2.base + (alignment - offset);
2294
new_r2_size = r2.size - (alignment - offset);
2295
2296
reset_memblock_regions();
2297
memblock_add(r1.base, r1.size);
2298
memblock_add(r2.base, r2.size);
2299
memblock_trim_memory(alignment);
2300
2301
ASSERT_EQ(rgn1->base, r1.base);
2302
ASSERT_EQ(rgn1->size, r1.size);
2303
2304
ASSERT_EQ(rgn2->base, new_r2_base);
2305
ASSERT_EQ(rgn2->size, new_r2_size);
2306
2307
ASSERT_EQ(memblock.memory.cnt, 2);
2308
2309
test_pass_pop();
2310
2311
return 0;
2312
}
2313
2314
/*
2315
* A test that tries to trim memory when there are two available regions, r1 and
2316
* r2. Region r1 is aligned on both ends and region r2 is aligned at the base
2317
* and unaligned at the end:
2318
*
2319
* Unaligned address
2320
* |
2321
* v
2322
* | +-----------------+ +---------------+ |
2323
* | | r1 | | r2 | |
2324
* +--------+-----------------+--------+---------------+---+
2325
* ^ ^ ^ ^ ^ ^
2326
* |________|________|________|________|________|
2327
* |
2328
* Aligned addresses
2329
*
2330
* Expect that r1 will not be trimmed and r2 will be trimmed at the end.
2331
* Expect the counter to not be updated.
2332
*/
2333
static int memblock_trim_memory_unaligned_end_check(void)
2334
{
2335
struct memblock_region *rgn1, *rgn2;
2336
const phys_addr_t alignment = SMP_CACHE_BYTES;
2337
phys_addr_t offset = SZ_2;
2338
phys_addr_t new_r2_size;
2339
2340
rgn1 = &memblock.memory.regions[0];
2341
rgn2 = &memblock.memory.regions[1];
2342
2343
struct region r1 = {
2344
.base = alignment,
2345
.size = alignment * 2
2346
};
2347
struct region r2 = {
2348
.base = alignment * 4,
2349
.size = alignment * 2 - offset
2350
};
2351
2352
PREFIX_PUSH();
2353
2354
new_r2_size = r2.size - (alignment - offset);
2355
2356
reset_memblock_regions();
2357
memblock_add(r1.base, r1.size);
2358
memblock_add(r2.base, r2.size);
2359
memblock_trim_memory(alignment);
2360
2361
ASSERT_EQ(rgn1->base, r1.base);
2362
ASSERT_EQ(rgn1->size, r1.size);
2363
2364
ASSERT_EQ(rgn2->base, r2.base);
2365
ASSERT_EQ(rgn2->size, new_r2_size);
2366
2367
ASSERT_EQ(memblock.memory.cnt, 2);
2368
2369
test_pass_pop();
2370
2371
return 0;
2372
}
2373
2374
static int memblock_trim_memory_checks(void)
2375
{
2376
prefix_reset();
2377
prefix_push(FUNC_TRIM);
2378
test_print("Running %s tests...\n", FUNC_TRIM);
2379
2380
memblock_trim_memory_aligned_check();
2381
memblock_trim_memory_too_small_check();
2382
memblock_trim_memory_unaligned_base_check();
2383
memblock_trim_memory_unaligned_end_check();
2384
2385
prefix_pop();
2386
2387
return 0;
2388
}
2389
2390
static int memblock_overlaps_region_check(void)
2391
{
2392
struct region r = {
2393
.base = SZ_1G,
2394
.size = SZ_4M
2395
};
2396
2397
PREFIX_PUSH();
2398
2399
reset_memblock_regions();
2400
memblock_add(r.base, r.size);
2401
2402
/* Far Away */
2403
ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M));
2404
ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M));
2405
2406
/* Neighbor */
2407
ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M));
2408
ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M));
2409
2410
/* Partial Overlap */
2411
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M));
2412
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M));
2413
2414
/* Totally Overlap */
2415
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M));
2416
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M));
2417
ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M));
2418
2419
test_pass_pop();
2420
2421
return 0;
2422
}
2423
2424
static int memblock_overlaps_region_checks(void)
2425
{
2426
prefix_reset();
2427
prefix_push("memblock_overlaps_region");
2428
test_print("Running memblock_overlaps_region tests...\n");
2429
2430
memblock_overlaps_region_check();
2431
2432
prefix_pop();
2433
2434
return 0;
2435
}
2436
2437
#ifdef CONFIG_NUMA
2438
static int memblock_set_node_check(void)
2439
{
2440
unsigned long i, max_reserved;
2441
struct memblock_region *rgn;
2442
void *orig_region;
2443
2444
PREFIX_PUSH();
2445
2446
reset_memblock_regions();
2447
memblock_allow_resize();
2448
2449
dummy_physical_memory_init();
2450
memblock_add(dummy_physical_memory_base(), MEM_SIZE);
2451
orig_region = memblock.reserved.regions;
2452
2453
/* Equally Split range to node 0 and 1*/
2454
memblock_set_node(memblock_start_of_DRAM(),
2455
memblock_phys_mem_size() / 2, &memblock.memory, 0);
2456
memblock_set_node(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2,
2457
memblock_phys_mem_size() / 2, &memblock.memory, 1);
2458
2459
ASSERT_EQ(memblock.memory.cnt, 2);
2460
rgn = &memblock.memory.regions[0];
2461
ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
2462
ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
2463
ASSERT_EQ(memblock_get_region_node(rgn), 0);
2464
rgn = &memblock.memory.regions[1];
2465
ASSERT_EQ(rgn->base, memblock_start_of_DRAM() + memblock_phys_mem_size() / 2);
2466
ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
2467
ASSERT_EQ(memblock_get_region_node(rgn), 1);
2468
2469
/* Reserve 126 regions with the last one across node boundary */
2470
for (i = 0; i < 125; i++)
2471
memblock_reserve(memblock_start_of_DRAM() + SZ_16 * i, SZ_8);
2472
2473
memblock_reserve(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2 - SZ_8,
2474
SZ_16);
2475
2476
/*
2477
* Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()")
2478
* do following process to set nid to each memblock.reserved region.
2479
* But it may miss some region if memblock_set_node() double the
2480
* array.
2481
*
2482
* By checking 'max', we make sure all region nid is set properly.
2483
*/
2484
repeat:
2485
max_reserved = memblock.reserved.max;
2486
for_each_mem_region(rgn) {
2487
int nid = memblock_get_region_node(rgn);
2488
2489
memblock_set_node(rgn->base, rgn->size, &memblock.reserved, nid);
2490
}
2491
if (max_reserved != memblock.reserved.max)
2492
goto repeat;
2493
2494
/* Confirm each region has valid node set */
2495
for_each_reserved_mem_region(rgn) {
2496
ASSERT_TRUE(numa_valid_node(memblock_get_region_node(rgn)));
2497
if (rgn == (memblock.reserved.regions + memblock.reserved.cnt - 1))
2498
ASSERT_EQ(1, memblock_get_region_node(rgn));
2499
else
2500
ASSERT_EQ(0, memblock_get_region_node(rgn));
2501
}
2502
2503
dummy_physical_memory_cleanup();
2504
2505
/*
2506
* The current reserved.regions is occupying a range of memory that
2507
* allocated from dummy_physical_memory_init(). After free the memory,
2508
* we must not use it. So restore the origin memory region to make sure
2509
* the tests can run as normal and not affected by the double array.
2510
*/
2511
memblock.reserved.regions = orig_region;
2512
memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
2513
2514
test_pass_pop();
2515
2516
return 0;
2517
}
2518
2519
static int memblock_set_node_checks(void)
2520
{
2521
prefix_reset();
2522
prefix_push("memblock_set_node");
2523
test_print("Running memblock_set_node tests...\n");
2524
2525
memblock_set_node_check();
2526
2527
prefix_pop();
2528
2529
return 0;
2530
}
2531
#else
2532
static int memblock_set_node_checks(void)
2533
{
2534
return 0;
2535
}
2536
#endif
2537
2538
int memblock_basic_checks(void)
2539
{
2540
memblock_initialization_check();
2541
memblock_add_checks();
2542
memblock_reserve_checks();
2543
memblock_remove_checks();
2544
memblock_free_checks();
2545
memblock_bottom_up_checks();
2546
memblock_trim_memory_checks();
2547
memblock_overlaps_region_checks();
2548
memblock_set_node_checks();
2549
2550
return 0;
2551
}
2552
2553