Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/cache/src/worker/tests.rs
1693 views
1
use super::*;
2
use crate::config::tests::test_prolog;
3
use std::iter::repeat;
4
use std::process;
5
// load_config! comes from crate::cache(::config::tests);
6
7
// when doing anything with the tests, make sure they are DETERMINISTIC
8
// -- the result shouldn't rely on system time!
9
pub mod system_time_stub;
10
11
#[test]
12
fn test_on_get_create_stats_file() {
13
let (_tempdir, cache_dir, config_path) = test_prolog();
14
let cache_config = load_config!(
15
config_path,
16
"[cache]\n\
17
directory = '{cache_dir}'",
18
cache_dir
19
);
20
let worker = Worker::start_new(&cache_config);
21
22
let mod_file = cache_dir.join("some-mod");
23
worker.on_cache_get_async(mod_file);
24
worker.wait_for_all_events_handled();
25
assert_eq!(worker.events_dropped(), 0);
26
27
let stats_file = cache_dir.join("some-mod.stats");
28
let stats = read_stats_file(&stats_file).expect("Failed to read stats file");
29
assert_eq!(stats.usages, 1);
30
assert_eq!(
31
stats.compression_level,
32
cache_config.baseline_compression_level()
33
);
34
}
35
36
#[test]
37
fn test_on_get_update_usage_counter() {
38
let (_tempdir, cache_dir, config_path) = test_prolog();
39
let cache_config = load_config!(
40
config_path,
41
"[cache]\n\
42
directory = '{cache_dir}'\n\
43
worker-event-queue-size = '16'",
44
cache_dir
45
);
46
let worker = Worker::start_new(&cache_config);
47
48
let mod_file = cache_dir.join("some-mod");
49
let stats_file = cache_dir.join("some-mod.stats");
50
let default_stats = ModuleCacheStatistics::default(&cache_config);
51
assert!(write_stats_file(&stats_file, &default_stats));
52
53
let mut usages = 0;
54
for times_used in &[4, 7, 2] {
55
for _ in 0..*times_used {
56
worker.on_cache_get_async(mod_file.clone());
57
usages += 1;
58
}
59
60
worker.wait_for_all_events_handled();
61
assert_eq!(worker.events_dropped(), 0);
62
63
let stats = read_stats_file(&stats_file).expect("Failed to read stats file");
64
assert_eq!(stats.usages, usages);
65
}
66
}
67
68
#[test]
69
fn test_on_get_recompress_no_mod_file() {
70
let (_tempdir, cache_dir, config_path) = test_prolog();
71
let cache_config = load_config!(
72
config_path,
73
"[cache]\n\
74
directory = '{cache_dir}'\n\
75
worker-event-queue-size = '16'\n\
76
baseline-compression-level = 3\n\
77
optimized-compression-level = 7\n\
78
optimized-compression-usage-counter-threshold = '256'",
79
cache_dir
80
);
81
let worker = Worker::start_new(&cache_config);
82
83
let mod_file = cache_dir.join("some-mod");
84
let stats_file = cache_dir.join("some-mod.stats");
85
let mut start_stats = ModuleCacheStatistics::default(&cache_config);
86
start_stats.usages = 250;
87
assert!(write_stats_file(&stats_file, &start_stats));
88
89
let mut usages = start_stats.usages;
90
for times_used in &[4, 7, 2] {
91
for _ in 0..*times_used {
92
worker.on_cache_get_async(mod_file.clone());
93
usages += 1;
94
}
95
96
worker.wait_for_all_events_handled();
97
assert_eq!(worker.events_dropped(), 0);
98
99
let stats = read_stats_file(&stats_file).expect("Failed to read stats file");
100
assert_eq!(stats.usages, usages);
101
assert_eq!(
102
stats.compression_level,
103
cache_config.baseline_compression_level()
104
);
105
}
106
}
107
108
#[test]
109
fn test_on_get_recompress_with_mod_file() {
110
let (_tempdir, cache_dir, config_path) = test_prolog();
111
let cache_config = load_config!(
112
config_path,
113
"[cache]\n\
114
directory = '{cache_dir}'\n\
115
worker-event-queue-size = '16'\n\
116
baseline-compression-level = 3\n\
117
optimized-compression-level = 7\n\
118
optimized-compression-usage-counter-threshold = '256'",
119
cache_dir
120
);
121
let worker = Worker::start_new(&cache_config);
122
123
let mod_file = cache_dir.join("some-mod");
124
let mod_data = "some test data to be compressed";
125
let data = zstd::encode_all(
126
mod_data.as_bytes(),
127
cache_config.baseline_compression_level(),
128
)
129
.expect("Failed to compress sample mod file");
130
fs::write(&mod_file, &data).expect("Failed to write sample mod file");
131
132
let stats_file = cache_dir.join("some-mod.stats");
133
let mut start_stats = ModuleCacheStatistics::default(&cache_config);
134
start_stats.usages = 250;
135
assert!(write_stats_file(&stats_file, &start_stats));
136
137
// scenarios:
138
// 1. Shouldn't be recompressed
139
// 2. Should be recompressed
140
// 3. After lowering compression level, should be recompressed
141
let scenarios = [(4, false), (7, true), (2, false)];
142
143
let mut usages = start_stats.usages;
144
assert!(usages < cache_config.optimized_compression_usage_counter_threshold());
145
let mut tested_higher_opt_compr_lvl = false;
146
for (times_used, lower_compr_lvl) in &scenarios {
147
for _ in 0..*times_used {
148
worker.on_cache_get_async(mod_file.clone());
149
usages += 1;
150
}
151
152
worker.wait_for_all_events_handled();
153
assert_eq!(worker.events_dropped(), 0);
154
155
let mut stats = read_stats_file(&stats_file).expect("Failed to read stats file");
156
assert_eq!(stats.usages, usages);
157
assert_eq!(
158
stats.compression_level,
159
if usages < cache_config.optimized_compression_usage_counter_threshold() {
160
cache_config.baseline_compression_level()
161
} else {
162
cache_config.optimized_compression_level()
163
}
164
);
165
let compressed_data = fs::read(&mod_file).expect("Failed to read mod file");
166
let decoded_data =
167
zstd::decode_all(&compressed_data[..]).expect("Failed to decompress mod file");
168
assert_eq!(decoded_data, mod_data.as_bytes());
169
170
if *lower_compr_lvl {
171
assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());
172
tested_higher_opt_compr_lvl = true;
173
stats.compression_level -= 1;
174
assert!(write_stats_file(&stats_file, &stats));
175
}
176
}
177
assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());
178
assert!(tested_higher_opt_compr_lvl);
179
}
180
181
#[test]
182
fn test_on_get_recompress_lock() {
183
let (_tempdir, cache_dir, config_path) = test_prolog();
184
let cache_config = load_config!(
185
config_path,
186
"[cache]\n\
187
directory = '{cache_dir}'\n\
188
worker-event-queue-size = '16'\n\
189
baseline-compression-level = 3\n\
190
optimized-compression-level = 7\n\
191
optimized-compression-usage-counter-threshold = '256'\n\
192
optimizing-compression-task-timeout = '30m'\n\
193
allowed-clock-drift-for-files-from-future = '1d'",
194
cache_dir
195
);
196
let worker = Worker::start_new(&cache_config);
197
198
let mod_file = cache_dir.join("some-mod");
199
let mod_data = "some test data to be compressed";
200
let data = zstd::encode_all(
201
mod_data.as_bytes(),
202
cache_config.baseline_compression_level(),
203
)
204
.expect("Failed to compress sample mod file");
205
fs::write(&mod_file, &data).expect("Failed to write sample mod file");
206
207
let stats_file = cache_dir.join("some-mod.stats");
208
let mut start_stats = ModuleCacheStatistics::default(&cache_config);
209
start_stats.usages = 255;
210
211
let lock_file = cache_dir.join("some-mod.wip-lock");
212
213
let scenarios = [
214
// valid lock
215
(true, "past", Duration::from_secs(30 * 60 - 1)),
216
// valid future lock
217
(true, "future", Duration::from_secs(24 * 60 * 60)),
218
// expired lock
219
(false, "past", Duration::from_secs(30 * 60)),
220
// expired future lock
221
(false, "future", Duration::from_secs(24 * 60 * 60 + 1)),
222
];
223
224
for (lock_valid, duration_sign, duration) in &scenarios {
225
assert!(write_stats_file(&stats_file, &start_stats)); // restore usage & compression level
226
create_file_with_mtime(&lock_file, "", duration_sign, &duration);
227
228
worker.on_cache_get_async(mod_file.clone());
229
worker.wait_for_all_events_handled();
230
assert_eq!(worker.events_dropped(), 0);
231
232
let stats = read_stats_file(&stats_file).expect("Failed to read stats file");
233
assert_eq!(stats.usages, start_stats.usages + 1);
234
assert_eq!(
235
stats.compression_level,
236
if *lock_valid {
237
cache_config.baseline_compression_level()
238
} else {
239
cache_config.optimized_compression_level()
240
}
241
);
242
let compressed_data = fs::read(&mod_file).expect("Failed to read mod file");
243
let decoded_data =
244
zstd::decode_all(&compressed_data[..]).expect("Failed to decompress mod file");
245
assert_eq!(decoded_data, mod_data.as_bytes());
246
}
247
}
248
249
#[test]
250
fn test_on_update_fresh_stats_file() {
251
let (_tempdir, cache_dir, config_path) = test_prolog();
252
let cache_config = load_config!(
253
config_path,
254
"[cache]\n\
255
directory = '{cache_dir}'\n\
256
worker-event-queue-size = '16'\n\
257
baseline-compression-level = 3\n\
258
optimized-compression-level = 7\n\
259
cleanup-interval = '1h'",
260
cache_dir
261
);
262
let worker = Worker::start_new(&cache_config);
263
264
let mod_file = cache_dir.join("some-mod");
265
let stats_file = cache_dir.join("some-mod.stats");
266
let cleanup_certificate = cache_dir.join(".cleanup.wip-done");
267
create_file_with_mtime(&cleanup_certificate, "", "future", &Duration::from_secs(0));
268
// the below created by the worker if it cleans up
269
let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));
270
271
// scenarios:
272
// 1. Create new stats file
273
// 2. Overwrite existing file
274
for update_file in &[true, false] {
275
worker.on_cache_update_async(mod_file.clone());
276
worker.wait_for_all_events_handled();
277
assert_eq!(worker.events_dropped(), 0);
278
279
let mut stats = read_stats_file(&stats_file).expect("Failed to read stats file");
280
assert_eq!(stats.usages, 1);
281
assert_eq!(
282
stats.compression_level,
283
cache_config.baseline_compression_level()
284
);
285
286
if *update_file {
287
stats.usages += 42;
288
stats.compression_level += 1;
289
assert!(write_stats_file(&stats_file, &stats));
290
}
291
292
assert!(!worker_lock_file.exists());
293
}
294
}
295
296
#[test]
297
fn test_on_update_cleanup_limits_trash_locks() {
298
let (_tempdir, cache_dir, config_path) = test_prolog();
299
let cache_config = load_config!(
300
config_path,
301
"[cache]\n\
302
directory = '{cache_dir}'\n\
303
worker-event-queue-size = '16'\n\
304
cleanup-interval = '30m'\n\
305
optimizing-compression-task-timeout = '30m'\n\
306
allowed-clock-drift-for-files-from-future = '1d'\n\
307
file-count-soft-limit = '5'\n\
308
files-total-size-soft-limit = '30K'\n\
309
file-count-limit-percent-if-deleting = '70%'\n\
310
files-total-size-limit-percent-if-deleting = '70%'
311
",
312
cache_dir
313
);
314
let worker = Worker::start_new(&cache_config);
315
let content_1k = "a".repeat(1_000);
316
let content_10k = "a".repeat(10_000);
317
318
let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");
319
let mod_with_stats = mods_files_dir.join("mod-with-stats");
320
let trash_dirs = [
321
mods_files_dir.join("trash"),
322
mods_files_dir.join("trash").join("trash"),
323
];
324
let trash_files = [
325
cache_dir.join("trash-file"),
326
cache_dir.join("trash-file.wip-lock"),
327
cache_dir.join("target-triple").join("trash.txt"),
328
cache_dir.join("target-triple").join("trash.txt.wip-lock"),
329
mods_files_dir.join("trash.ogg"),
330
mods_files_dir.join("trash").join("trash.doc"),
331
mods_files_dir.join("trash").join("trash.doc.wip-lock"),
332
mods_files_dir.join("trash").join("trash").join("trash.xls"),
333
mods_files_dir
334
.join("trash")
335
.join("trash")
336
.join("trash.xls.wip-lock"),
337
];
338
let mod_locks = [
339
// valid lock
340
(
341
mods_files_dir.join("mod0.wip-lock"),
342
true,
343
"past",
344
Duration::from_secs(30 * 60 - 1),
345
),
346
// valid future lock
347
(
348
mods_files_dir.join("mod1.wip-lock"),
349
true,
350
"future",
351
Duration::from_secs(24 * 60 * 60),
352
),
353
// expired lock
354
(
355
mods_files_dir.join("mod2.wip-lock"),
356
false,
357
"past",
358
Duration::from_secs(30 * 60),
359
),
360
// expired future lock
361
(
362
mods_files_dir.join("mod3.wip-lock"),
363
false,
364
"future",
365
Duration::from_secs(24 * 60 * 60 + 1),
366
),
367
];
368
// the below created by the worker if it cleans up
369
let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));
370
371
let scenarios = [
372
// Close to limits, but not reached, only trash deleted
373
(2, 2, 4),
374
// File count limit exceeded
375
(1, 10, 3),
376
// Total size limit exceeded
377
(4, 0, 2),
378
// Both limits exceeded
379
(3, 5, 3),
380
];
381
382
for (files_10k, files_1k, remaining_files) in &scenarios {
383
let mut secs_ago = 100;
384
385
for d in &trash_dirs {
386
fs::create_dir_all(d).expect("Failed to create directories");
387
}
388
for f in &trash_files {
389
create_file_with_mtime(f, "", "past", &Duration::from_secs(0));
390
}
391
for (f, _, sign, duration) in &mod_locks {
392
create_file_with_mtime(f, "", sign, &duration);
393
}
394
395
let mut mods_paths = vec![];
396
for content in repeat(&content_10k)
397
.take(*files_10k)
398
.chain(repeat(&content_1k).take(*files_1k))
399
{
400
mods_paths.push(mods_files_dir.join(format!("test-mod-{}", mods_paths.len())));
401
create_file_with_mtime(
402
mods_paths.last().unwrap(),
403
content,
404
"past",
405
&Duration::from_secs(secs_ago),
406
);
407
assert!(secs_ago > 0);
408
secs_ago -= 1;
409
}
410
411
// creating .stats file updates mtime what affects test results
412
// so we use a separate nonexistent module here (orphaned .stats will be removed anyway)
413
worker.on_cache_update_async(mod_with_stats.clone());
414
worker.wait_for_all_events_handled();
415
assert_eq!(worker.events_dropped(), 0);
416
417
for ent in trash_dirs.iter().chain(trash_files.iter()) {
418
assert!(!ent.exists());
419
}
420
for (f, valid, ..) in &mod_locks {
421
assert_eq!(f.exists(), *valid);
422
}
423
for (idx, path) in mods_paths.iter().enumerate() {
424
let should_exist = idx >= mods_paths.len() - *remaining_files;
425
assert_eq!(path.exists(), should_exist);
426
if should_exist {
427
// cleanup before next iteration
428
fs::remove_file(path).expect("Failed to remove a file");
429
}
430
}
431
fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");
432
}
433
}
434
435
#[test]
436
fn test_on_update_cleanup_lru_policy() {
437
let (_tempdir, cache_dir, config_path) = test_prolog();
438
let cache_config = load_config!(
439
config_path,
440
"[cache]\n\
441
directory = '{cache_dir}'\n\
442
worker-event-queue-size = '16'\n\
443
file-count-soft-limit = '5'\n\
444
files-total-size-soft-limit = '30K'\n\
445
file-count-limit-percent-if-deleting = '80%'\n\
446
files-total-size-limit-percent-if-deleting = '70%'",
447
cache_dir
448
);
449
let worker = Worker::start_new(&cache_config);
450
let content_1k = "a".repeat(1_000);
451
let content_5k = "a".repeat(5_000);
452
let content_10k = "a".repeat(10_000);
453
454
let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");
455
fs::create_dir_all(&mods_files_dir).expect("Failed to create directories");
456
let nonexistent_mod_file = cache_dir.join("nonexistent-mod");
457
let orphaned_stats_file = cache_dir.join("orphaned-mod.stats");
458
let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));
459
460
// content, how long ago created, how long ago stats created (if created), should be alive
461
let scenarios = [
462
&[
463
(&content_10k, 29, None, false),
464
(&content_10k, 28, None, false),
465
(&content_10k, 27, None, false),
466
(&content_1k, 26, None, true),
467
(&content_10k, 25, None, true),
468
(&content_1k, 24, None, true),
469
],
470
&[
471
(&content_10k, 29, None, false),
472
(&content_10k, 28, None, false),
473
(&content_10k, 27, None, true),
474
(&content_1k, 26, None, true),
475
(&content_5k, 25, None, true),
476
(&content_1k, 24, None, true),
477
],
478
&[
479
(&content_10k, 29, Some(19), true),
480
(&content_10k, 28, None, false),
481
(&content_10k, 27, None, false),
482
(&content_1k, 26, Some(18), true),
483
(&content_5k, 25, None, true),
484
(&content_1k, 24, None, true),
485
],
486
&[
487
(&content_10k, 29, Some(19), true),
488
(&content_10k, 28, Some(18), true),
489
(&content_10k, 27, None, false),
490
(&content_1k, 26, Some(17), true),
491
(&content_5k, 25, None, false),
492
(&content_1k, 24, None, false),
493
],
494
&[
495
(&content_10k, 29, Some(19), true),
496
(&content_10k, 28, None, false),
497
(&content_1k, 27, None, false),
498
(&content_5k, 26, Some(18), true),
499
(&content_1k, 25, None, false),
500
(&content_10k, 24, None, false),
501
],
502
];
503
504
for mods in &scenarios {
505
let filenames = (0..mods.len())
506
.map(|i| {
507
(
508
mods_files_dir.join(format!("mod-{i}")),
509
mods_files_dir.join(format!("mod-{i}.stats")),
510
)
511
})
512
.collect::<Vec<_>>();
513
514
for ((content, mod_secs_ago, create_stats, _), (mod_filename, stats_filename)) in
515
mods.iter().zip(filenames.iter())
516
{
517
create_file_with_mtime(
518
mod_filename,
519
content,
520
"past",
521
&Duration::from_secs(*mod_secs_ago),
522
);
523
if let Some(stats_secs_ago) = create_stats {
524
create_file_with_mtime(
525
stats_filename,
526
"cleanup doesn't care",
527
"past",
528
&Duration::from_secs(*stats_secs_ago),
529
);
530
}
531
}
532
create_file_with_mtime(
533
&orphaned_stats_file,
534
"cleanup doesn't care",
535
"past",
536
&Duration::from_secs(0),
537
);
538
539
worker.on_cache_update_async(nonexistent_mod_file.clone());
540
worker.wait_for_all_events_handled();
541
assert_eq!(worker.events_dropped(), 0);
542
543
assert!(!orphaned_stats_file.exists());
544
for ((_, _, create_stats, alive), (mod_filename, stats_filename)) in
545
mods.iter().zip(filenames.iter())
546
{
547
assert_eq!(mod_filename.exists(), *alive);
548
assert_eq!(stats_filename.exists(), *alive && create_stats.is_some());
549
550
// cleanup for next iteration
551
if *alive {
552
fs::remove_file(&mod_filename).expect("Failed to remove a file");
553
if create_stats.is_some() {
554
fs::remove_file(&stats_filename).expect("Failed to remove a file");
555
}
556
}
557
}
558
559
fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");
560
}
561
}
562
563
// clock drift should be applied to mod cache & stats, too
564
// however, postpone deleting files to as late as possible
565
#[test]
566
fn test_on_update_cleanup_future_files() {
567
let (_tempdir, cache_dir, config_path) = test_prolog();
568
let cache_config = load_config!(
569
config_path,
570
"[cache]\n\
571
directory = '{cache_dir}'\n\
572
worker-event-queue-size = '16'\n\
573
allowed-clock-drift-for-files-from-future = '1d'\n\
574
file-count-soft-limit = '3'\n\
575
files-total-size-soft-limit = '1M'\n\
576
file-count-limit-percent-if-deleting = '70%'\n\
577
files-total-size-limit-percent-if-deleting = '70%'",
578
cache_dir
579
);
580
let worker = Worker::start_new(&cache_config);
581
let content_1k = "a".repeat(1_000);
582
583
let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");
584
fs::create_dir_all(&mods_files_dir).expect("Failed to create directories");
585
let nonexistent_mod_file = cache_dir.join("nonexistent-mod");
586
// the below created by the worker if it cleans up
587
let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));
588
589
let scenarios: [&[_]; 5] = [
590
// NOT cleaning up, everything is ok
591
&[
592
(Duration::from_secs(0), None, true),
593
(Duration::from_secs(24 * 60 * 60), None, true),
594
],
595
// NOT cleaning up, everything is ok
596
&[
597
(Duration::from_secs(0), None, true),
598
(Duration::from_secs(24 * 60 * 60 + 1), None, true),
599
],
600
// cleaning up, removing files from oldest
601
&[
602
(Duration::from_secs(0), None, false),
603
(Duration::from_secs(24 * 60 * 60), None, true),
604
(Duration::from_secs(1), None, false),
605
(Duration::from_secs(2), None, true),
606
],
607
// cleaning up, removing files from oldest; deleting file from far future
608
&[
609
(Duration::from_secs(0), None, false),
610
(Duration::from_secs(1), None, true),
611
(Duration::from_secs(24 * 60 * 60 + 1), None, false),
612
(Duration::from_secs(2), None, true),
613
],
614
// cleaning up, removing files from oldest; file from far future should have .stats from +-now => it's a legitimate file
615
&[
616
(Duration::from_secs(0), None, false),
617
(Duration::from_secs(1), None, false),
618
(
619
Duration::from_secs(24 * 60 * 60 + 1),
620
Some(Duration::from_secs(3)),
621
true,
622
),
623
(Duration::from_secs(2), None, true),
624
],
625
];
626
627
for mods in &scenarios {
628
let filenames = (0..mods.len())
629
.map(|i| {
630
(
631
mods_files_dir.join(format!("mod-{i}")),
632
mods_files_dir.join(format!("mod-{i}.stats")),
633
)
634
})
635
.collect::<Vec<_>>();
636
637
for ((duration, opt_stats_duration, _), (mod_filename, stats_filename)) in
638
mods.iter().zip(filenames.iter())
639
{
640
create_file_with_mtime(mod_filename, &content_1k, "future", duration);
641
if let Some(stats_duration) = opt_stats_duration {
642
create_file_with_mtime(stats_filename, "", "future", stats_duration);
643
}
644
}
645
646
worker.on_cache_update_async(nonexistent_mod_file.clone());
647
worker.wait_for_all_events_handled();
648
assert_eq!(worker.events_dropped(), 0);
649
650
for ((_, opt_stats_duration, alive), (mod_filename, stats_filename)) in
651
mods.iter().zip(filenames.iter())
652
{
653
assert_eq!(mod_filename.exists(), *alive);
654
assert_eq!(
655
stats_filename.exists(),
656
*alive && opt_stats_duration.is_some()
657
);
658
if *alive {
659
fs::remove_file(mod_filename).expect("Failed to remove a file");
660
if opt_stats_duration.is_some() {
661
fs::remove_file(stats_filename).expect("Failed to remove a file");
662
}
663
}
664
}
665
666
fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");
667
}
668
}
669
670
// this tests if worker triggered cleanup or not when some cleanup lock/certificate was out there
671
#[test]
672
fn test_on_update_cleanup_self_lock() {
673
let (_tempdir, cache_dir, config_path) = test_prolog();
674
let cache_config = load_config!(
675
config_path,
676
"[cache]\n\
677
directory = '{cache_dir}'\n\
678
worker-event-queue-size = '16'\n\
679
cleanup-interval = '30m'\n\
680
allowed-clock-drift-for-files-from-future = '1d'",
681
cache_dir
682
);
683
let worker = Worker::start_new(&cache_config);
684
685
let mod_file = cache_dir.join("some-mod");
686
let trash_file = cache_dir.join("trash-file.txt");
687
688
let lock_file = cache_dir.join(".cleanup.wip-lock");
689
// the below created by the worker if it cleans up
690
let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));
691
692
let scenarios = [
693
// valid lock
694
(true, "past", Duration::from_secs(30 * 60 - 1)),
695
// valid future lock
696
(true, "future", Duration::from_secs(24 * 60 * 60)),
697
// expired lock
698
(false, "past", Duration::from_secs(30 * 60)),
699
// expired future lock
700
(false, "future", Duration::from_secs(24 * 60 * 60 + 1)),
701
];
702
703
for (lock_valid, duration_sign, duration) in &scenarios {
704
create_file_with_mtime(
705
&trash_file,
706
"with trash content",
707
"future",
708
&Duration::from_secs(0),
709
);
710
create_file_with_mtime(&lock_file, "", duration_sign, &duration);
711
712
worker.on_cache_update_async(mod_file.clone());
713
worker.wait_for_all_events_handled();
714
assert_eq!(worker.events_dropped(), 0);
715
716
assert_eq!(trash_file.exists(), *lock_valid);
717
assert_eq!(lock_file.exists(), *lock_valid);
718
if *lock_valid {
719
assert!(!worker_lock_file.exists());
720
} else {
721
fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");
722
}
723
}
724
}
725
726
fn create_file_with_mtime(filename: &Path, contents: &str, offset_sign: &str, offset: &Duration) {
727
fs::write(filename, contents).expect("Failed to create a file");
728
let mtime = match offset_sign {
729
"past" => system_time_stub::NOW
730
.checked_sub(*offset)
731
.expect("Failed to calculate new mtime"),
732
"future" => system_time_stub::NOW
733
.checked_add(*offset)
734
.expect("Failed to calculate new mtime"),
735
_ => unreachable!(),
736
};
737
filetime::set_file_mtime(filename, mtime.into()).expect("Failed to set mtime");
738
}
739
740