1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
|
#include "errno.h"
#include "globals.h"
#include "test/usertest.h"
#include "test/proctest.h"
#include "util/debug.h"
#include "util/printf.h"
#include "util/string.h"
#include "mm/mm.h"
#include "mm/page.h"
#include "mm/slab.h"
#include "mm/kmalloc.h"
#include "vm/vmmap.h"
#include "mm/mman.h"
#include "mm/page.h"
#include "mm/mobj.h"
#include "vm/shadow.h"
typedef struct mobj_shadow
{
// the mobj parts of this shadow object
mobj_t mobj;
// a reference to the mobj that is the data source for this shadow object
// This should be a reference to a shadow object of some ancestor process.
// This is used to traverse the shadow object chain.
mobj_t *shadowed;
// a reference to the mobj at the bottom of this shadow object's chain
// this should NEVER be a shadow object (i.e. it should have some type other
// than MOBJ_SHADOW)
mobj_t *bottom_mobj;
} mobj_shadow_t;
#define MOBJ_TO_SO(o) CONTAINER_OF(o, struct mobj_shadow, mobj)
/*
Test to make sure that vmmap_write and vmmap_read are able to interact with
anonymous objects correctly
*/
long test_vmmap_write_read() {
vmmap_t *map = curproc->p_vmmap;
// First vmmap_map into the address space
// offset + using anonymous objects (in reality, most anon objects will not have an offset)
vmarea_t* new_vma = NULL;
size_t num_pages = 6;
size_t offset = 2 * 4096;
long ret = vmmap_map(map, NULL, 0, num_pages,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &new_vma);
test_assert(ret == 0, "vmmap failed!");
test_assert(new_vma != NULL, "vmarea was not returned properly");
test_assert(new_vma->vma_off == offset / PAGE_SIZE, "offset was not updated correctly");
test_assert(new_vma->vma_obj != NULL, "vma_obj pointer was not set");
test_assert(new_vma->vma_prot & PROT_READ, "check protections failed — PROT_READ not set");
test_assert(new_vma->vma_prot & PROT_WRITE, "check proections failed — PROT_WRITE not set");
size_t start_page = new_vma->vma_start;
size_t end_page = new_vma->vma_end;
test_assert(end_page - start_page == num_pages, "number of pages mapped is incorrect");
// write to some random location within the vmarea, offset of 20 bytes within the page
// fill in the rest of the page with a bunch of A's
size_t offset_in_page = 20;
char* buf = page_alloc_n(1);
memset(buf, 'A', 4096);
size_t amount_to_read_write = PAGE_SIZE - offset_in_page;
dbg(DBG_TEST, "write starting from an offset into the 2nd page and the rest of the bytes\n");
// in the second page
dbg(DBG_TEST, "[_______xxxxxxxxxxx]\n");
char* addr = (char*)PN_TO_ADDR(start_page + 2) + offset_in_page;
// write starting from an offset into the 2nd page
ret = vmmap_write(map, (void*)addr, buf, PAGE_SIZE - offset_in_page);
test_assert(ret == 0, "vmmap_write failed");
// make sure we read the same in vmmap_read
char* read_buf = page_alloc_n(1);
ret = vmmap_read(map, (void*)addr, read_buf, PAGE_SIZE - offset_in_page);
test_assert(0 == memcmp(read_buf, buf, amount_to_read_write), "data read is not equal");
page_free_n(buf, 1);
page_free_n(read_buf, 1);
dbg(DBG_TEST, "write multi-page content starting from an offset within the third page\n");
dbg(DBG_TEST, "[______xxxxx][xxxxxxxxxxx][xxx________]\n");
// so something like the above
// request more data
buf = page_alloc_n(2);
read_buf = page_alloc_n(2);
// offset is at the halfway mark in the page
offset_in_page = 4096 / 2;
addr = (char*)PN_TO_ADDR(start_page + 3) + offset_in_page;
amount_to_read_write = PAGE_SIZE * 2;
memset(buf, 'B', PAGE_SIZE * 2);
ret = vmmap_write(map, (void*)addr, buf, amount_to_read_write);
test_assert(ret == 0, "vmmap_write failed");
ret = vmmap_read(map, addr, read_buf, amount_to_read_write);
test_assert(ret == 0, "vmmap_read failed");
test_assert(0 == memcmp(read_buf, buf, amount_to_read_write), "data read is not equal");
page_free_n(buf, 2);
page_free_n(read_buf, 2);
dbg(DBG_TEST, "test a multi-vmarea mapping, this vmarea should be adjacent to the one previously made\n");
// each vmarea has 6 pages
// [ ][ ]
vmarea_t* prior_vma = NULL;
ret = vmmap_map(map, NULL, 0, num_pages,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &prior_vma);
test_assert(prior_vma != NULL, "vma was not initialized correctly");
test_assert(prior_vma->vma_end == new_vma->vma_start, "end was not calculated properly");
test_assert(prior_vma->vma_end - prior_vma->vma_start == num_pages, "number of pages was not calculated correctly");
// start reading from somewhere within the second to last page at an offset
// read almost 4 pages in total --> two from prev area, two from next vmarea
buf = page_alloc_n(4);
read_buf = page_alloc_n(4);
// we're really just cycling through the alphabet
memset(buf, 'C', PAGE_SIZE * 4);
// keep the same offset
offset_in_page = 4096 / 2;
// write all four pages of data
size_t amount_to_write = 4 * PAGE_SIZE; // magic numbers go brrrr
addr = (char*)PN_TO_ADDR(prior_vma->vma_end - 2) + offset_in_page;
ret = vmmap_write(map, (void*)addr, buf, amount_to_write);
test_assert(ret == 0, "vmmap_write failed");
size_t amount_to_read = offset_in_page + (3*PAGE_SIZE);
ret = vmmap_read(map, addr, read_buf, amount_to_read);
test_assert(ret == 0, "vmmap_read failed");
test_assert(0 == memcmp(read_buf, buf, amount_to_read), "data read is not equal");
page_free_n(buf, 4);
page_free_n(read_buf, 4);
// clean up for the test
vmmap_destroy(&map);
curproc->p_vmmap = vmmap_create();
return 0;
}
/*
Test to make sure that vmmap_
*/
long test_vmmap() {
vmmap_t *map = curproc->p_vmmap;
// Make sure we start out cleanly
KASSERT(vmmap_is_range_empty(map, ADDR_TO_PN(USER_MEM_LOW), ADDR_TO_PN(USER_MEM_HIGH - USER_MEM_LOW)));
// Go through the address space, make sure we find nothing
for (size_t i = ADDR_TO_PN(USER_MEM_LOW); i < ADDR_TO_PN(USER_MEM_HIGH); i += PAGE_SIZE) {
KASSERT(!vmmap_lookup(map, i));
}
// You can probably change this.
size_t num_vmareas = 5;
// Probably shouldn't change this to anything that's not a power of two.
size_t num_pages_per_vmarea = 16;
size_t prev_start = ADDR_TO_PN(USER_MEM_HIGH);
long ret;
for (size_t i = 0; i < num_vmareas; i++) {
size_t start = vmmap_find_range(map, num_pages_per_vmarea, VMMAP_DIR_HILO);
test_assert(start + num_pages_per_vmarea == prev_start, "Incorrect return value from vmmap_find_range");
vmarea_t *vma = NULL;
// specifiying MAP_FIXED such that we for sure map it starting rom this page
// avoid creating shadow objects
ret = vmmap_map(map, NULL, start, num_pages_per_vmarea, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, 0, VMMAP_DIR_HILO, &vma);
test_assert(ret == 0, "vmmap failed");
test_assert(vma != NULL, "vmarea was not created properly in vmmap");
test_assert(vma->vma_start == start, "start was not set correctly");
test_assert(vma->vma_end == start + num_pages_per_vmarea, "ending page was not set correctly");
test_assert(vma->vma_off == 0, "off was not set correctly");
test_assert(vmmap_lookup(map, vma->vma_start) != NULL, "could not find inserted vmarea");
prev_start = start;
}
// Now, our address space should look like:
// EMPTY EMPTY EMPTY [ ][ ][ ][ ][ ]
dbg(DBG_TEST, "address space looks like: EMPTY EMPTY EMPTY [___][___][___][___][___]\n");
// ^LP
// ^HP
// ^section_start
// HP --> the highest possible userland page number
// LP --> the lowest possible userland page number
// section start --> HP - (num_vmareas * num_pages_per_vmarea)
// test to make sure that the vmmap is ordered from lowest to highest
prev_start = 0;
size_t prev_end = 0;
dbg(DBG_TEST, "testing the ordering of the vmmap\n");
list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink) {
if (!prev_start) {
prev_start = vma->vma_start;
prev_end = vma->vma_end;
} else {
test_assert(prev_start < vma->vma_start, "overlap with start");
test_assert(prev_end <= vma->vma_start, "overlap with end");
}
}
size_t LP = ADDR_TO_PN(USER_MEM_LOW);
size_t HP = ADDR_TO_PN(USER_MEM_HIGH);
size_t section_start = HP - (num_vmareas * num_pages_per_vmarea);
for (size_t page = HP - (num_vmareas * num_pages_per_vmarea); page < HP; page++) {
// Make sure that we can find everything
test_assert(vmmap_lookup(map, page) != NULL, "could not find page: %X\n", page);
}
// // Let's remove the middle entry entirely.
size_t remove_idx = num_vmareas / 2;
size_t remove_start = section_start + (remove_idx * num_pages_per_vmarea);
size_t remove_end = remove_start + num_pages_per_vmarea;
dbg(DBG_TEST, "removing middle entry\n");
dbg(DBG_TEST, "EMPTY EMPTY EMPTY [___][___]EMPTY[___][___]\n");
ret = vmmap_remove(map, remove_start, num_pages_per_vmarea);
test_assert(ret == 0, "vmmap remove failed");
// Let's make sure we can't find anything in the middle entry.
for (size_t i = 0; i < num_pages_per_vmarea; i++) {
// Within the current vmarea, we shouldn't find anything.
test_assert(vmmap_lookup(map, i + remove_start) == NULL, "Should not find a vmarea");
// If we go one vmarea beyond, we should find stuff!
size_t next_vmarea_start = remove_start + num_pages_per_vmarea;
test_assert(vmmap_lookup(map, i + next_vmarea_start) != NULL, "where is the vmarea?");
}
// Now, let's put our vmmap_remove implementation to the test.
// Currently, we have:
// EMPTY EMPTY EMPTY [ ][ ]EMPTY[ ][ ]
dbg(DBG_TEST, "splitting first entry\n");
dbg(DBG_TEST, "EMPTY EMPTY EMPTY [_]E[_][___]EMPTY[___][___]\n");
// Let's split the first element into pieces, from 4 to 12.
// This leaves us with:
// EMPTY EMPTY EMPTY [ ]E[ ][ ]EMPTY[ ][ ]
size_t split_start = section_start + (num_pages_per_vmarea / 4);
size_t split_end = section_start + 3*(num_pages_per_vmarea / 4);
test_assert(vmmap_remove(map, split_start, split_end - split_start) == 0, "vmmap_remove was not successful");
for (size_t i = section_start; i < HP; i++) {
if (i >= split_start && i < split_end) {
test_assert(vmmap_lookup(map, i) == NULL, "should not have found region for page: %X\n", i);
} else if (i >= remove_start && i < remove_end) {
test_assert(vmmap_lookup(map, i) == NULL, "should not have found region for page: %X\n", i);
} else {
test_assert(vmmap_lookup(map, i) != NULL, "should have found a section for page: %X\n", i);
}
}
// Now, we have:
// case 2, case 4, case 4, case 3
// EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
// **********************
// 0 4 12 16
// Oof, time to calculate this:
dbg(DBG_TEST, "removing large area of vmmap\n");
dbg(DBG_TEST, "EMPTY EMPTY EMPTY [____]EMPTY[____][____]EMPTY[____][____]\n");
dbg(DBG_TEST, "________________________________**********************__\n");
size_t big_split_start = section_start + 7*(num_pages_per_vmarea / 8);
size_t big_split_end = HP - (num_pages_per_vmarea / 2);
test_assert(vmmap_remove(map, big_split_start, big_split_end - big_split_start) == 0, "vmmap_remove failed");
for (size_t i = section_start; i < HP; i++) {
vmarea_t *lookup_vma = vmmap_lookup(map, i);
// EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
// *****************
// ^^^^^
if (i >= split_start && i < split_end) {
test_assert(lookup_vma == NULL, "why does vmarea exist");
continue;
}
// EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
// ****************
// ^^^^^^^^^^^^^^^^^
if ((i >= big_split_start) && i < big_split_end) {
test_assert(lookup_vma == NULL, "why does vmarea exist");
continue;
}
test_assert(lookup_vma != NULL, "why does vmarea not exist");
}
// clean up test and make sure everything looks good
vmmap_destroy(&curproc->p_vmmap);
curproc->p_vmmap = vmmap_create();
return 0;
}
long test_basic_shadow_collapse() {
vmmap_t *map = curproc->p_vmmap;
size_t num_pages = 6;
size_t offset = 0;
vmarea_t* new_vma = NULL;
long ret = vmmap_map(map, NULL, 0, num_pages,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &new_vma);
KASSERT(ret == 0);
KASSERT(new_vma != NULL);
KASSERT(new_vma->vma_obj != NULL);
// after this with shadow object this will look like:
// shadow_obj -> anon_bottom_mobj
test_assert(new_vma->vma_obj->mo_refcount == 1, "refcount of new object is not correct");
mobj_lock(new_vma->vma_obj);
mobj_t* new_shadow = shadow_create(new_vma->vma_obj);
mobj_unlock(new_vma->vma_obj);
// shadow_obj --> shadow_obj -> anon_bottom_mobj
test_assert(new_vma->vma_obj->mo_refcount == 2, "refcount should have incremented by 2");
test_assert(new_shadow->mo_refcount == 1, "refcount of new shadow object was not 1");
mobj_put(&new_vma->vma_obj);
// update the area to now point to the shadow object, manually doing vmmap_clone here
new_vma->vma_obj = new_shadow;
KASSERT(MOBJ_TO_SO(new_shadow)->shadowed != NULL);
KASSERT(MOBJ_TO_SO(new_shadow)->bottom_mobj != NULL);
// store a reference to bottom_mobj
mobj_t* bottom_mobj = MOBJ_TO_SO(new_shadow)->bottom_mobj;
shadow_collapse(new_shadow);
test_assert(MOBJ_TO_SO(new_shadow)->shadowed == bottom_mobj, "shadowed was not updated correctly");
test_assert(MOBJ_TO_SO(new_shadow)->bottom_mobj == bottom_mobj, "bottom mobj changed");
mobj_unlock(new_shadow);
vmmap_destroy(&map);
curproc->p_vmmap = vmmap_create();
return 0;
}
long test_vmmap_is_range_empty() {
vmmap_t *map = curproc->p_vmmap;
// Create 2 vmareas, with 8 pages per each
size_t num_vmareas = 3;
size_t num_pages_per_vmarea = 16;
size_t prev_start = ADDR_TO_PN(USER_MEM_HIGH);
for (size_t i = 0; i < num_vmareas; i++) {
ssize_t start = vmmap_find_range(map, num_pages_per_vmarea, VMMAP_DIR_HILO);
test_assert(start + num_pages_per_vmarea == prev_start, "Incorrect return value from vmmap_find_range");
vmarea_t *vma = kmalloc(sizeof(vmarea_t));
KASSERT(vma && "Unable to alloc the vmarea");
memset(vma, 0, sizeof(vmarea_t));
vma->vma_start = start;
vma->vma_end = start + num_pages_per_vmarea;
vmmap_insert(map, vma);
prev_start = start;
}
size_t LP = ADDR_TO_PN(USER_MEM_LOW);
size_t HP = ADDR_TO_PN(USER_MEM_HIGH);
size_t section_start = HP - (num_vmareas * num_pages_per_vmarea);
dbg(DBG_TEST, "EMPTY EMPTY EMPTY EMPTY EMPTY [___][___[___]\n");
// test overlap different cases:
test_assert(vmmap_is_range_empty(map,
section_start - (num_pages_per_vmarea / 2), num_pages_per_vmarea / 2),
"does not catch no overlap case correctly");
test_assert(!vmmap_is_range_empty(map, section_start - 2, 4), "two pages should've overlapped");
test_assert(!vmmap_is_range_empty(map, section_start, 8), "overlap with half of the pages in vmarea");
test_assert(!vmmap_is_range_empty(map, section_start + 8, num_pages_per_vmarea), "overlap in both vmareas");
test_assert(!vmmap_is_range_empty(map, HP - 8, 7), "overlap with second vmarea");
test_assert(!vmmap_is_range_empty(map, HP - 2, 1), "overlap with end of second vmarea");
test_assert(!vmmap_is_range_empty(map, section_start - 2, 4 + num_pages_per_vmarea), "overlap with both");
list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink) {
list_remove(&vma->vma_plink);
kfree(vma);
}
return 0;
}
long vmtest_main(long arg1, void* arg2) {
test_init();
test_vmmap();
test_vmmap_is_range_empty();
test_vmmap_write_read();
test_basic_shadow_collapse();
// Write your own tests here!
test_fini();
return 0;
}
|