@@ -115,7 +115,7 @@ const Fuzzer = struct {
115
115
/// Tracks which PCs have been seen across all runs that do not crash the fuzzer process.
116
116
/// Stored in a memory-mapped file so that it can be shared with other
117
117
/// processes and viewed while the fuzzer is running.
118
- seen_pcs : MemoryMappedList ,
118
+ seen_pcs : std.fs.MemoryMap ,
119
119
cache_dir : std.fs.Dir ,
120
120
/// Identifies the file name that will be used to store coverage
121
121
/// information, available to other processes.
@@ -229,11 +229,17 @@ const Fuzzer = struct {
229
229
} else if (existing_len != bytes_len ) {
230
230
fatal ("incompatible existing coverage file (differing lengths)" , .{});
231
231
}
232
- f .seen_pcs = MemoryMappedList .init (coverage_file , existing_len , bytes_len ) catch | err | {
232
+ f .seen_pcs = std .fs .MemoryMap .init (coverage_file , .{
233
+ .exclusivity = .shared ,
234
+ .protection = .{ .write = true },
235
+ .length = bytes_len ,
236
+ }) catch | err | {
233
237
fatal ("unable to init coverage memory map: {s}" , .{@errorName (err )});
234
238
};
235
239
if (existing_len != 0 ) {
236
- const existing_pcs_bytes = f .seen_pcs .items [@sizeOf (SeenPcsHeader ) + @sizeOf (usize ) * n_bitset_elems .. ][0 .. flagged_pcs .len * @sizeOf (usize )];
240
+ const existing_pcs_start = @sizeOf (SeenPcsHeader ) + @sizeOf (usize ) * n_bitset_elems ;
241
+ const existing_pcs_end = existing_pcs_start + flagged_pcs .len * @sizeOf (usize );
242
+ const existing_pcs_bytes = f .seen_pcs .mapped [existing_pcs_start .. existing_pcs_end ];
237
243
const existing_pcs = std .mem .bytesAsSlice (usize , existing_pcs_bytes );
238
244
for (existing_pcs , flagged_pcs , 0.. ) | old , new , i | {
239
245
if (old != new .addr ) {
@@ -249,11 +255,18 @@ const Fuzzer = struct {
249
255
.pcs_len = flagged_pcs .len ,
250
256
.lowest_stack = std .math .maxInt (usize ),
251
257
};
252
- f .seen_pcs .appendSliceAssumeCapacity (std .mem .asBytes (& header ));
253
- f .seen_pcs .appendNTimesAssumeCapacity (0 , n_bitset_elems * @sizeOf (usize ));
254
- for (flagged_pcs ) | flagged_pc | {
255
- f .seen_pcs .appendSliceAssumeCapacity (std .mem .asBytes (& flagged_pc .addr ));
256
- }
258
+ f .seen_pcs .cast (SeenPcsHeader ).* = header ;
259
+ const bitset_elems_start = @sizeOf (SeenPcsHeader );
260
+ const bitset_elems_end = bitset_elems_start + n_bitset_elems * @sizeOf (usize );
261
+ const bitset_elems_bytes = f .seen_pcs .mapped [bitset_elems_start .. bitset_elems_end ];
262
+ const bitset_elems_dest = std .mem .bytesAsSlice (usize , bitset_elems_bytes );
263
+ @memset (bitset_elems_dest , 0 );
264
+ const flagged_pcs_start = bitset_elems_end ;
265
+ const flagged_pcs_end = flagged_pcs_start + flagged_pcs .len * @sizeOf (usize );
266
+ const flagged_pcs_bytes = f .seen_pcs .mapped [flagged_pcs_start .. flagged_pcs_end ];
267
+ const flagged_pcs_dest = std .mem .bytesAsSlice (usize , flagged_pcs_bytes );
268
+ for (flagged_pcs , flagged_pcs_dest ) | item , * slot |
269
+ slot .* = item .addr ;
257
270
}
258
271
}
259
272
@@ -306,7 +319,7 @@ const Fuzzer = struct {
306
319
{
307
320
// Track code coverage from all runs.
308
321
comptime assert (SeenPcsHeader .trailing [0 ] == .pc_bits_usize );
309
- const header_end_ptr : [* ]volatile usize = @ptrCast (f .seen_pcs .items [@sizeOf (SeenPcsHeader ).. ]);
322
+ const header_end_ptr : [* ]volatile usize = @ptrCast (f .seen_pcs .mapped [@sizeOf (SeenPcsHeader ).. ]);
310
323
const remainder = f .flagged_pcs .len % @bitSizeOf (usize );
311
324
const aligned_len = f .flagged_pcs .len - remainder ;
312
325
const seen_pcs = header_end_ptr [0.. aligned_len ];
@@ -330,7 +343,7 @@ const Fuzzer = struct {
330
343
}
331
344
}
332
345
333
- const header : * volatile SeenPcsHeader = @ptrCast ( f .seen_pcs .items [0 .. @sizeOf (SeenPcsHeader )] );
346
+ const header = f .seen_pcs .cast (SeenPcsHeader );
334
347
_ = @atomicRmw (usize , & header .unique_runs , .Add , 1 , .monotonic );
335
348
}
336
349
@@ -360,7 +373,7 @@ const Fuzzer = struct {
360
373
try f .mutate ();
361
374
362
375
f .n_runs += 1 ;
363
- const header : * volatile SeenPcsHeader = @ptrCast ( f .seen_pcs .items [0 .. @sizeOf (SeenPcsHeader )] );
376
+ const header = f .seen_pcs .cast (SeenPcsHeader );
364
377
_ = @atomicRmw (usize , & header .n_runs , .Add , 1 , .monotonic );
365
378
_ = @atomicRmw (usize , & header .lowest_stack , .Min , __sancov_lowest_stack , .monotonic );
366
379
@memset (f .pc_counters , 0 );
@@ -468,53 +481,3 @@ export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void {
468
481
469
482
fuzzer .init (cache_dir ) catch | err | fatal ("unable to init fuzzer: {s}" , .{@errorName (err )});
470
483
}
471
-
472
- /// Like `std.ArrayListUnmanaged(u8)` but backed by memory mapping.
473
- pub const MemoryMappedList = struct {
474
- /// Contents of the list.
475
- ///
476
- /// Pointers to elements in this slice are invalidated by various functions
477
- /// of this ArrayList in accordance with the respective documentation. In
478
- /// all cases, "invalidated" means that the memory has been passed to this
479
- /// allocator's resize or free function.
480
- items : []align (std .mem .page_size ) volatile u8 ,
481
- /// How many bytes this list can hold without allocating additional memory.
482
- capacity : usize ,
483
-
484
- pub fn init (file : std.fs.File , length : usize , capacity : usize ) ! MemoryMappedList {
485
- const ptr = try std .posix .mmap (
486
- null ,
487
- capacity ,
488
- std .posix .PROT .READ | std .posix .PROT .WRITE ,
489
- .{ .TYPE = .SHARED },
490
- file .handle ,
491
- 0 ,
492
- );
493
- return .{
494
- .items = ptr [0.. length ],
495
- .capacity = capacity ,
496
- };
497
- }
498
-
499
- /// Append the slice of items to the list.
500
- /// Asserts that the list can hold the additional items.
501
- pub fn appendSliceAssumeCapacity (l : * MemoryMappedList , items : []const u8 ) void {
502
- const old_len = l .items .len ;
503
- const new_len = old_len + items .len ;
504
- assert (new_len <= l .capacity );
505
- l .items .len = new_len ;
506
- @memcpy (l .items [old_len .. ][0.. items .len ], items );
507
- }
508
-
509
- /// Append a value to the list `n` times.
510
- /// Never invalidates element pointers.
511
- /// The function is inline so that a comptime-known `value` parameter will
512
- /// have better memset codegen in case it has a repeated byte pattern.
513
- /// Asserts that the list can hold the additional items.
514
- pub inline fn appendNTimesAssumeCapacity (l : * MemoryMappedList , value : u8 , n : usize ) void {
515
- const new_len = l .items .len + n ;
516
- assert (new_len <= l .capacity );
517
- @memset (l .items .ptr [l .items .len .. new_len ], value );
518
- l .items .len = new_len ;
519
- }
520
- };
0 commit comments