Path: blob/main/crates/cache/src/worker/tests.rs
1693 views
use super::*;1use crate::config::tests::test_prolog;2use std::iter::repeat;3use std::process;4// load_config! comes from crate::cache(::config::tests);56// when doing anything with the tests, make sure they are DETERMINISTIC7// -- the result shouldn't rely on system time!8pub mod system_time_stub;910#[test]11fn test_on_get_create_stats_file() {12let (_tempdir, cache_dir, config_path) = test_prolog();13let cache_config = load_config!(14config_path,15"[cache]\n\16directory = '{cache_dir}'",17cache_dir18);19let worker = Worker::start_new(&cache_config);2021let mod_file = cache_dir.join("some-mod");22worker.on_cache_get_async(mod_file);23worker.wait_for_all_events_handled();24assert_eq!(worker.events_dropped(), 0);2526let stats_file = cache_dir.join("some-mod.stats");27let stats = read_stats_file(&stats_file).expect("Failed to read stats file");28assert_eq!(stats.usages, 1);29assert_eq!(30stats.compression_level,31cache_config.baseline_compression_level()32);33}3435#[test]36fn test_on_get_update_usage_counter() {37let (_tempdir, cache_dir, config_path) = test_prolog();38let cache_config = load_config!(39config_path,40"[cache]\n\41directory = '{cache_dir}'\n\42worker-event-queue-size = '16'",43cache_dir44);45let worker = Worker::start_new(&cache_config);4647let mod_file = cache_dir.join("some-mod");48let stats_file = cache_dir.join("some-mod.stats");49let default_stats = ModuleCacheStatistics::default(&cache_config);50assert!(write_stats_file(&stats_file, &default_stats));5152let mut usages = 0;53for times_used in &[4, 7, 2] {54for _ in 0..*times_used {55worker.on_cache_get_async(mod_file.clone());56usages += 1;57}5859worker.wait_for_all_events_handled();60assert_eq!(worker.events_dropped(), 0);6162let stats = read_stats_file(&stats_file).expect("Failed to read stats file");63assert_eq!(stats.usages, usages);64}65}6667#[test]68fn test_on_get_recompress_no_mod_file() {69let (_tempdir, cache_dir, config_path) = test_prolog();70let cache_config = load_config!(71config_path,72"[cache]\n\73directory = '{cache_dir}'\n\74worker-event-queue-size = '16'\n\75baseline-compression-level = 3\n\76optimized-compression-level = 7\n\77optimized-compression-usage-counter-threshold = '256'",78cache_dir79);80let worker = Worker::start_new(&cache_config);8182let mod_file = cache_dir.join("some-mod");83let stats_file = cache_dir.join("some-mod.stats");84let mut start_stats = ModuleCacheStatistics::default(&cache_config);85start_stats.usages = 250;86assert!(write_stats_file(&stats_file, &start_stats));8788let mut usages = start_stats.usages;89for times_used in &[4, 7, 2] {90for _ in 0..*times_used {91worker.on_cache_get_async(mod_file.clone());92usages += 1;93}9495worker.wait_for_all_events_handled();96assert_eq!(worker.events_dropped(), 0);9798let stats = read_stats_file(&stats_file).expect("Failed to read stats file");99assert_eq!(stats.usages, usages);100assert_eq!(101stats.compression_level,102cache_config.baseline_compression_level()103);104}105}106107#[test]108fn test_on_get_recompress_with_mod_file() {109let (_tempdir, cache_dir, config_path) = test_prolog();110let cache_config = load_config!(111config_path,112"[cache]\n\113directory = '{cache_dir}'\n\114worker-event-queue-size = '16'\n\115baseline-compression-level = 3\n\116optimized-compression-level = 7\n\117optimized-compression-usage-counter-threshold = '256'",118cache_dir119);120let worker = Worker::start_new(&cache_config);121122let mod_file = cache_dir.join("some-mod");123let mod_data = "some test data to be compressed";124let data = zstd::encode_all(125mod_data.as_bytes(),126cache_config.baseline_compression_level(),127)128.expect("Failed to compress sample mod file");129fs::write(&mod_file, &data).expect("Failed to write sample mod file");130131let stats_file = cache_dir.join("some-mod.stats");132let mut start_stats = ModuleCacheStatistics::default(&cache_config);133start_stats.usages = 250;134assert!(write_stats_file(&stats_file, &start_stats));135136// scenarios:137// 1. Shouldn't be recompressed138// 2. Should be recompressed139// 3. After lowering compression level, should be recompressed140let scenarios = [(4, false), (7, true), (2, false)];141142let mut usages = start_stats.usages;143assert!(usages < cache_config.optimized_compression_usage_counter_threshold());144let mut tested_higher_opt_compr_lvl = false;145for (times_used, lower_compr_lvl) in &scenarios {146for _ in 0..*times_used {147worker.on_cache_get_async(mod_file.clone());148usages += 1;149}150151worker.wait_for_all_events_handled();152assert_eq!(worker.events_dropped(), 0);153154let mut stats = read_stats_file(&stats_file).expect("Failed to read stats file");155assert_eq!(stats.usages, usages);156assert_eq!(157stats.compression_level,158if usages < cache_config.optimized_compression_usage_counter_threshold() {159cache_config.baseline_compression_level()160} else {161cache_config.optimized_compression_level()162}163);164let compressed_data = fs::read(&mod_file).expect("Failed to read mod file");165let decoded_data =166zstd::decode_all(&compressed_data[..]).expect("Failed to decompress mod file");167assert_eq!(decoded_data, mod_data.as_bytes());168169if *lower_compr_lvl {170assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());171tested_higher_opt_compr_lvl = true;172stats.compression_level -= 1;173assert!(write_stats_file(&stats_file, &stats));174}175}176assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());177assert!(tested_higher_opt_compr_lvl);178}179180#[test]181fn test_on_get_recompress_lock() {182let (_tempdir, cache_dir, config_path) = test_prolog();183let cache_config = load_config!(184config_path,185"[cache]\n\186directory = '{cache_dir}'\n\187worker-event-queue-size = '16'\n\188baseline-compression-level = 3\n\189optimized-compression-level = 7\n\190optimized-compression-usage-counter-threshold = '256'\n\191optimizing-compression-task-timeout = '30m'\n\192allowed-clock-drift-for-files-from-future = '1d'",193cache_dir194);195let worker = Worker::start_new(&cache_config);196197let mod_file = cache_dir.join("some-mod");198let mod_data = "some test data to be compressed";199let data = zstd::encode_all(200mod_data.as_bytes(),201cache_config.baseline_compression_level(),202)203.expect("Failed to compress sample mod file");204fs::write(&mod_file, &data).expect("Failed to write sample mod file");205206let stats_file = cache_dir.join("some-mod.stats");207let mut start_stats = ModuleCacheStatistics::default(&cache_config);208start_stats.usages = 255;209210let lock_file = cache_dir.join("some-mod.wip-lock");211212let scenarios = [213// valid lock214(true, "past", Duration::from_secs(30 * 60 - 1)),215// valid future lock216(true, "future", Duration::from_secs(24 * 60 * 60)),217// expired lock218(false, "past", Duration::from_secs(30 * 60)),219// expired future lock220(false, "future", Duration::from_secs(24 * 60 * 60 + 1)),221];222223for (lock_valid, duration_sign, duration) in &scenarios {224assert!(write_stats_file(&stats_file, &start_stats)); // restore usage & compression level225create_file_with_mtime(&lock_file, "", duration_sign, &duration);226227worker.on_cache_get_async(mod_file.clone());228worker.wait_for_all_events_handled();229assert_eq!(worker.events_dropped(), 0);230231let stats = read_stats_file(&stats_file).expect("Failed to read stats file");232assert_eq!(stats.usages, start_stats.usages + 1);233assert_eq!(234stats.compression_level,235if *lock_valid {236cache_config.baseline_compression_level()237} else {238cache_config.optimized_compression_level()239}240);241let compressed_data = fs::read(&mod_file).expect("Failed to read mod file");242let decoded_data =243zstd::decode_all(&compressed_data[..]).expect("Failed to decompress mod file");244assert_eq!(decoded_data, mod_data.as_bytes());245}246}247248#[test]249fn test_on_update_fresh_stats_file() {250let (_tempdir, cache_dir, config_path) = test_prolog();251let cache_config = load_config!(252config_path,253"[cache]\n\254directory = '{cache_dir}'\n\255worker-event-queue-size = '16'\n\256baseline-compression-level = 3\n\257optimized-compression-level = 7\n\258cleanup-interval = '1h'",259cache_dir260);261let worker = Worker::start_new(&cache_config);262263let mod_file = cache_dir.join("some-mod");264let stats_file = cache_dir.join("some-mod.stats");265let cleanup_certificate = cache_dir.join(".cleanup.wip-done");266create_file_with_mtime(&cleanup_certificate, "", "future", &Duration::from_secs(0));267// the below created by the worker if it cleans up268let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));269270// scenarios:271// 1. Create new stats file272// 2. Overwrite existing file273for update_file in &[true, false] {274worker.on_cache_update_async(mod_file.clone());275worker.wait_for_all_events_handled();276assert_eq!(worker.events_dropped(), 0);277278let mut stats = read_stats_file(&stats_file).expect("Failed to read stats file");279assert_eq!(stats.usages, 1);280assert_eq!(281stats.compression_level,282cache_config.baseline_compression_level()283);284285if *update_file {286stats.usages += 42;287stats.compression_level += 1;288assert!(write_stats_file(&stats_file, &stats));289}290291assert!(!worker_lock_file.exists());292}293}294295#[test]296fn test_on_update_cleanup_limits_trash_locks() {297let (_tempdir, cache_dir, config_path) = test_prolog();298let cache_config = load_config!(299config_path,300"[cache]\n\301directory = '{cache_dir}'\n\302worker-event-queue-size = '16'\n\303cleanup-interval = '30m'\n\304optimizing-compression-task-timeout = '30m'\n\305allowed-clock-drift-for-files-from-future = '1d'\n\306file-count-soft-limit = '5'\n\307files-total-size-soft-limit = '30K'\n\308file-count-limit-percent-if-deleting = '70%'\n\309files-total-size-limit-percent-if-deleting = '70%'310",311cache_dir312);313let worker = Worker::start_new(&cache_config);314let content_1k = "a".repeat(1_000);315let content_10k = "a".repeat(10_000);316317let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");318let mod_with_stats = mods_files_dir.join("mod-with-stats");319let trash_dirs = [320mods_files_dir.join("trash"),321mods_files_dir.join("trash").join("trash"),322];323let trash_files = [324cache_dir.join("trash-file"),325cache_dir.join("trash-file.wip-lock"),326cache_dir.join("target-triple").join("trash.txt"),327cache_dir.join("target-triple").join("trash.txt.wip-lock"),328mods_files_dir.join("trash.ogg"),329mods_files_dir.join("trash").join("trash.doc"),330mods_files_dir.join("trash").join("trash.doc.wip-lock"),331mods_files_dir.join("trash").join("trash").join("trash.xls"),332mods_files_dir333.join("trash")334.join("trash")335.join("trash.xls.wip-lock"),336];337let mod_locks = [338// valid lock339(340mods_files_dir.join("mod0.wip-lock"),341true,342"past",343Duration::from_secs(30 * 60 - 1),344),345// valid future lock346(347mods_files_dir.join("mod1.wip-lock"),348true,349"future",350Duration::from_secs(24 * 60 * 60),351),352// expired lock353(354mods_files_dir.join("mod2.wip-lock"),355false,356"past",357Duration::from_secs(30 * 60),358),359// expired future lock360(361mods_files_dir.join("mod3.wip-lock"),362false,363"future",364Duration::from_secs(24 * 60 * 60 + 1),365),366];367// the below created by the worker if it cleans up368let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));369370let scenarios = [371// Close to limits, but not reached, only trash deleted372(2, 2, 4),373// File count limit exceeded374(1, 10, 3),375// Total size limit exceeded376(4, 0, 2),377// Both limits exceeded378(3, 5, 3),379];380381for (files_10k, files_1k, remaining_files) in &scenarios {382let mut secs_ago = 100;383384for d in &trash_dirs {385fs::create_dir_all(d).expect("Failed to create directories");386}387for f in &trash_files {388create_file_with_mtime(f, "", "past", &Duration::from_secs(0));389}390for (f, _, sign, duration) in &mod_locks {391create_file_with_mtime(f, "", sign, &duration);392}393394let mut mods_paths = vec![];395for content in repeat(&content_10k)396.take(*files_10k)397.chain(repeat(&content_1k).take(*files_1k))398{399mods_paths.push(mods_files_dir.join(format!("test-mod-{}", mods_paths.len())));400create_file_with_mtime(401mods_paths.last().unwrap(),402content,403"past",404&Duration::from_secs(secs_ago),405);406assert!(secs_ago > 0);407secs_ago -= 1;408}409410// creating .stats file updates mtime what affects test results411// so we use a separate nonexistent module here (orphaned .stats will be removed anyway)412worker.on_cache_update_async(mod_with_stats.clone());413worker.wait_for_all_events_handled();414assert_eq!(worker.events_dropped(), 0);415416for ent in trash_dirs.iter().chain(trash_files.iter()) {417assert!(!ent.exists());418}419for (f, valid, ..) in &mod_locks {420assert_eq!(f.exists(), *valid);421}422for (idx, path) in mods_paths.iter().enumerate() {423let should_exist = idx >= mods_paths.len() - *remaining_files;424assert_eq!(path.exists(), should_exist);425if should_exist {426// cleanup before next iteration427fs::remove_file(path).expect("Failed to remove a file");428}429}430fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");431}432}433434#[test]435fn test_on_update_cleanup_lru_policy() {436let (_tempdir, cache_dir, config_path) = test_prolog();437let cache_config = load_config!(438config_path,439"[cache]\n\440directory = '{cache_dir}'\n\441worker-event-queue-size = '16'\n\442file-count-soft-limit = '5'\n\443files-total-size-soft-limit = '30K'\n\444file-count-limit-percent-if-deleting = '80%'\n\445files-total-size-limit-percent-if-deleting = '70%'",446cache_dir447);448let worker = Worker::start_new(&cache_config);449let content_1k = "a".repeat(1_000);450let content_5k = "a".repeat(5_000);451let content_10k = "a".repeat(10_000);452453let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");454fs::create_dir_all(&mods_files_dir).expect("Failed to create directories");455let nonexistent_mod_file = cache_dir.join("nonexistent-mod");456let orphaned_stats_file = cache_dir.join("orphaned-mod.stats");457let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));458459// content, how long ago created, how long ago stats created (if created), should be alive460let scenarios = [461&[462(&content_10k, 29, None, false),463(&content_10k, 28, None, false),464(&content_10k, 27, None, false),465(&content_1k, 26, None, true),466(&content_10k, 25, None, true),467(&content_1k, 24, None, true),468],469&[470(&content_10k, 29, None, false),471(&content_10k, 28, None, false),472(&content_10k, 27, None, true),473(&content_1k, 26, None, true),474(&content_5k, 25, None, true),475(&content_1k, 24, None, true),476],477&[478(&content_10k, 29, Some(19), true),479(&content_10k, 28, None, false),480(&content_10k, 27, None, false),481(&content_1k, 26, Some(18), true),482(&content_5k, 25, None, true),483(&content_1k, 24, None, true),484],485&[486(&content_10k, 29, Some(19), true),487(&content_10k, 28, Some(18), true),488(&content_10k, 27, None, false),489(&content_1k, 26, Some(17), true),490(&content_5k, 25, None, false),491(&content_1k, 24, None, false),492],493&[494(&content_10k, 29, Some(19), true),495(&content_10k, 28, None, false),496(&content_1k, 27, None, false),497(&content_5k, 26, Some(18), true),498(&content_1k, 25, None, false),499(&content_10k, 24, None, false),500],501];502503for mods in &scenarios {504let filenames = (0..mods.len())505.map(|i| {506(507mods_files_dir.join(format!("mod-{i}")),508mods_files_dir.join(format!("mod-{i}.stats")),509)510})511.collect::<Vec<_>>();512513for ((content, mod_secs_ago, create_stats, _), (mod_filename, stats_filename)) in514mods.iter().zip(filenames.iter())515{516create_file_with_mtime(517mod_filename,518content,519"past",520&Duration::from_secs(*mod_secs_ago),521);522if let Some(stats_secs_ago) = create_stats {523create_file_with_mtime(524stats_filename,525"cleanup doesn't care",526"past",527&Duration::from_secs(*stats_secs_ago),528);529}530}531create_file_with_mtime(532&orphaned_stats_file,533"cleanup doesn't care",534"past",535&Duration::from_secs(0),536);537538worker.on_cache_update_async(nonexistent_mod_file.clone());539worker.wait_for_all_events_handled();540assert_eq!(worker.events_dropped(), 0);541542assert!(!orphaned_stats_file.exists());543for ((_, _, create_stats, alive), (mod_filename, stats_filename)) in544mods.iter().zip(filenames.iter())545{546assert_eq!(mod_filename.exists(), *alive);547assert_eq!(stats_filename.exists(), *alive && create_stats.is_some());548549// cleanup for next iteration550if *alive {551fs::remove_file(&mod_filename).expect("Failed to remove a file");552if create_stats.is_some() {553fs::remove_file(&stats_filename).expect("Failed to remove a file");554}555}556}557558fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");559}560}561562// clock drift should be applied to mod cache & stats, too563// however, postpone deleting files to as late as possible564#[test]565fn test_on_update_cleanup_future_files() {566let (_tempdir, cache_dir, config_path) = test_prolog();567let cache_config = load_config!(568config_path,569"[cache]\n\570directory = '{cache_dir}'\n\571worker-event-queue-size = '16'\n\572allowed-clock-drift-for-files-from-future = '1d'\n\573file-count-soft-limit = '3'\n\574files-total-size-soft-limit = '1M'\n\575file-count-limit-percent-if-deleting = '70%'\n\576files-total-size-limit-percent-if-deleting = '70%'",577cache_dir578);579let worker = Worker::start_new(&cache_config);580let content_1k = "a".repeat(1_000);581582let mods_files_dir = cache_dir.join("target-triple").join("compiler-version");583fs::create_dir_all(&mods_files_dir).expect("Failed to create directories");584let nonexistent_mod_file = cache_dir.join("nonexistent-mod");585// the below created by the worker if it cleans up586let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));587588let scenarios: [&[_]; 5] = [589// NOT cleaning up, everything is ok590&[591(Duration::from_secs(0), None, true),592(Duration::from_secs(24 * 60 * 60), None, true),593],594// NOT cleaning up, everything is ok595&[596(Duration::from_secs(0), None, true),597(Duration::from_secs(24 * 60 * 60 + 1), None, true),598],599// cleaning up, removing files from oldest600&[601(Duration::from_secs(0), None, false),602(Duration::from_secs(24 * 60 * 60), None, true),603(Duration::from_secs(1), None, false),604(Duration::from_secs(2), None, true),605],606// cleaning up, removing files from oldest; deleting file from far future607&[608(Duration::from_secs(0), None, false),609(Duration::from_secs(1), None, true),610(Duration::from_secs(24 * 60 * 60 + 1), None, false),611(Duration::from_secs(2), None, true),612],613// cleaning up, removing files from oldest; file from far future should have .stats from +-now => it's a legitimate file614&[615(Duration::from_secs(0), None, false),616(Duration::from_secs(1), None, false),617(618Duration::from_secs(24 * 60 * 60 + 1),619Some(Duration::from_secs(3)),620true,621),622(Duration::from_secs(2), None, true),623],624];625626for mods in &scenarios {627let filenames = (0..mods.len())628.map(|i| {629(630mods_files_dir.join(format!("mod-{i}")),631mods_files_dir.join(format!("mod-{i}.stats")),632)633})634.collect::<Vec<_>>();635636for ((duration, opt_stats_duration, _), (mod_filename, stats_filename)) in637mods.iter().zip(filenames.iter())638{639create_file_with_mtime(mod_filename, &content_1k, "future", duration);640if let Some(stats_duration) = opt_stats_duration {641create_file_with_mtime(stats_filename, "", "future", stats_duration);642}643}644645worker.on_cache_update_async(nonexistent_mod_file.clone());646worker.wait_for_all_events_handled();647assert_eq!(worker.events_dropped(), 0);648649for ((_, opt_stats_duration, alive), (mod_filename, stats_filename)) in650mods.iter().zip(filenames.iter())651{652assert_eq!(mod_filename.exists(), *alive);653assert_eq!(654stats_filename.exists(),655*alive && opt_stats_duration.is_some()656);657if *alive {658fs::remove_file(mod_filename).expect("Failed to remove a file");659if opt_stats_duration.is_some() {660fs::remove_file(stats_filename).expect("Failed to remove a file");661}662}663}664665fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");666}667}668669// this tests if worker triggered cleanup or not when some cleanup lock/certificate was out there670#[test]671fn test_on_update_cleanup_self_lock() {672let (_tempdir, cache_dir, config_path) = test_prolog();673let cache_config = load_config!(674config_path,675"[cache]\n\676directory = '{cache_dir}'\n\677worker-event-queue-size = '16'\n\678cleanup-interval = '30m'\n\679allowed-clock-drift-for-files-from-future = '1d'",680cache_dir681);682let worker = Worker::start_new(&cache_config);683684let mod_file = cache_dir.join("some-mod");685let trash_file = cache_dir.join("trash-file.txt");686687let lock_file = cache_dir.join(".cleanup.wip-lock");688// the below created by the worker if it cleans up689let worker_lock_file = cache_dir.join(format!(".cleanup.wip-{}", process::id()));690691let scenarios = [692// valid lock693(true, "past", Duration::from_secs(30 * 60 - 1)),694// valid future lock695(true, "future", Duration::from_secs(24 * 60 * 60)),696// expired lock697(false, "past", Duration::from_secs(30 * 60)),698// expired future lock699(false, "future", Duration::from_secs(24 * 60 * 60 + 1)),700];701702for (lock_valid, duration_sign, duration) in &scenarios {703create_file_with_mtime(704&trash_file,705"with trash content",706"future",707&Duration::from_secs(0),708);709create_file_with_mtime(&lock_file, "", duration_sign, &duration);710711worker.on_cache_update_async(mod_file.clone());712worker.wait_for_all_events_handled();713assert_eq!(worker.events_dropped(), 0);714715assert_eq!(trash_file.exists(), *lock_valid);716assert_eq!(lock_file.exists(), *lock_valid);717if *lock_valid {718assert!(!worker_lock_file.exists());719} else {720fs::remove_file(&worker_lock_file).expect("Failed to remove lock file");721}722}723}724725fn create_file_with_mtime(filename: &Path, contents: &str, offset_sign: &str, offset: &Duration) {726fs::write(filename, contents).expect("Failed to create a file");727let mtime = match offset_sign {728"past" => system_time_stub::NOW729.checked_sub(*offset)730.expect("Failed to calculate new mtime"),731"future" => system_time_stub::NOW732.checked_add(*offset)733.expect("Failed to calculate new mtime"),734_ => unreachable!(),735};736filetime::set_file_mtime(filename, mtime.into()).expect("Failed to set mtime");737}738739740