use anyhow::{Context, Result, anyhow, bail};
use directories_next::ProjectDirs;
use log::{trace, warn};
use serde::{
Deserialize,
de::{self, Deserializer},
};
use std::fmt::Debug;
use std::fs;
use std::path::{Path, PathBuf};
use std::time::Duration;
#[derive(serde_derive::Deserialize, Debug)]
#[serde(deny_unknown_fields)]
struct Config {
cache: CacheConfig,
}
#[derive(serde_derive::Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub struct CacheConfig {
directory: Option<PathBuf>,
#[serde(
default = "default_worker_event_queue_size",
rename = "worker-event-queue-size",
deserialize_with = "deserialize_si_prefix"
)]
worker_event_queue_size: u64,
#[serde(
default = "default_baseline_compression_level",
rename = "baseline-compression-level"
)]
baseline_compression_level: i32,
#[serde(
default = "default_optimized_compression_level",
rename = "optimized-compression-level"
)]
optimized_compression_level: i32,
#[serde(
default = "default_optimized_compression_usage_counter_threshold",
rename = "optimized-compression-usage-counter-threshold",
deserialize_with = "deserialize_si_prefix"
)]
optimized_compression_usage_counter_threshold: u64,
#[serde(
default = "default_cleanup_interval",
rename = "cleanup-interval",
deserialize_with = "deserialize_duration"
)]
cleanup_interval: Duration,
#[serde(
default = "default_optimizing_compression_task_timeout",
rename = "optimizing-compression-task-timeout",
deserialize_with = "deserialize_duration"
)]
optimizing_compression_task_timeout: Duration,
#[serde(
default = "default_allowed_clock_drift_for_files_from_future",
rename = "allowed-clock-drift-for-files-from-future",
deserialize_with = "deserialize_duration"
)]
allowed_clock_drift_for_files_from_future: Duration,
#[serde(
default = "default_file_count_soft_limit",
rename = "file-count-soft-limit",
deserialize_with = "deserialize_si_prefix"
)]
file_count_soft_limit: u64,
#[serde(
default = "default_files_total_size_soft_limit",
rename = "files-total-size-soft-limit",
deserialize_with = "deserialize_disk_space"
)]
files_total_size_soft_limit: u64,
#[serde(
default = "default_file_count_limit_percent_if_deleting",
rename = "file-count-limit-percent-if-deleting",
deserialize_with = "deserialize_percent"
)]
file_count_limit_percent_if_deleting: u8,
#[serde(
default = "default_files_total_size_limit_percent_if_deleting",
rename = "files-total-size-limit-percent-if-deleting",
deserialize_with = "deserialize_percent"
)]
files_total_size_limit_percent_if_deleting: u8,
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
directory: None,
worker_event_queue_size: default_worker_event_queue_size(),
baseline_compression_level: default_baseline_compression_level(),
optimized_compression_level: default_optimized_compression_level(),
optimized_compression_usage_counter_threshold:
default_optimized_compression_usage_counter_threshold(),
cleanup_interval: default_cleanup_interval(),
optimizing_compression_task_timeout: default_optimizing_compression_task_timeout(),
allowed_clock_drift_for_files_from_future:
default_allowed_clock_drift_for_files_from_future(),
file_count_soft_limit: default_file_count_soft_limit(),
files_total_size_soft_limit: default_files_total_size_soft_limit(),
file_count_limit_percent_if_deleting: default_file_count_limit_percent_if_deleting(),
files_total_size_limit_percent_if_deleting:
default_files_total_size_limit_percent_if_deleting(),
}
}
}
pub fn create_new_config<P: AsRef<Path> + Debug>(config_file: Option<P>) -> Result<PathBuf> {
trace!("Creating new config file, path: {config_file:?}");
let config_file = match config_file {
Some(path) => path.as_ref().to_path_buf(),
None => default_config_path()?,
};
if config_file.exists() {
bail!(
"Configuration file '{}' already exists.",
config_file.display()
);
}
let parent_dir = config_file
.parent()
.ok_or_else(|| anyhow!("Invalid cache config path: {}", config_file.display()))?;
fs::create_dir_all(parent_dir).with_context(|| {
format!(
"Failed to create config directory, config path: {}",
config_file.display(),
)
})?;
let content = "\
# Comment out certain settings to use default values.
# For more settings, please refer to the documentation:
# https://bytecodealliance.github.io/wasmtime/cli-cache.html
[cache]
";
fs::write(&config_file, content).with_context(|| {
format!(
"Failed to flush config to the disk, path: {}",
config_file.display(),
)
})?;
Ok(config_file.to_path_buf())
}
const ZSTD_COMPRESSION_LEVELS: std::ops::RangeInclusive<i32> = 0..=21;
const fn default_worker_event_queue_size() -> u64 {
0x10
}
const fn worker_event_queue_size_warning_threshold() -> u64 {
3
}
const fn default_baseline_compression_level() -> i32 {
zstd::DEFAULT_COMPRESSION_LEVEL
}
const fn default_optimized_compression_level() -> i32 {
20
}
const fn default_optimized_compression_usage_counter_threshold() -> u64 {
0x100
}
const fn default_cleanup_interval() -> Duration {
Duration::from_secs(60 * 60)
}
const fn default_optimizing_compression_task_timeout() -> Duration {
Duration::from_secs(30 * 60)
}
const fn default_allowed_clock_drift_for_files_from_future() -> Duration {
Duration::from_secs(60 * 60 * 24)
}
const fn default_file_count_soft_limit() -> u64 {
0x10_000
}
const fn default_files_total_size_soft_limit() -> u64 {
1024 * 1024 * 512
}
const fn default_file_count_limit_percent_if_deleting() -> u8 {
70
}
const fn default_files_total_size_limit_percent_if_deleting() -> u8 {
70
}
fn project_dirs() -> Option<ProjectDirs> {
ProjectDirs::from("", "BytecodeAlliance", "wasmtime")
}
fn default_config_path() -> Result<PathBuf> {
match project_dirs() {
Some(dirs) => Ok(dirs.config_dir().join("config.toml")),
None => bail!("config file not specified and failed to get the default"),
}
}
macro_rules! generate_deserializer {
($name:ident($numname:ident: $numty:ty, $unitname:ident: &str) -> $retty:ty {$body:expr}) => {
fn $name<'de, D>(deserializer: D) -> Result<$retty, D::Error>
where
D: Deserializer<'de>,
{
let text = String::deserialize(deserializer)?;
let text = text.trim();
let split_point = text.find(|c: char| !c.is_numeric());
let (num, unit) = split_point.map_or_else(|| (text, ""), |p| text.split_at(p));
let deserialized = (|| {
let $numname = num.parse::<$numty>().ok()?;
let $unitname = unit.trim();
$body
})();
if let Some(deserialized) = deserialized {
Ok(deserialized)
} else {
Err(de::Error::custom(
"Invalid value, please refer to the documentation",
))
}
}
};
}
generate_deserializer!(deserialize_duration(num: u64, unit: &str) -> Duration {
match unit {
"s" => Some(Duration::from_secs(num)),
"m" => Some(Duration::from_secs(num * 60)),
"h" => Some(Duration::from_secs(num * 60 * 60)),
"d" => Some(Duration::from_secs(num * 60 * 60 * 24)),
_ => None,
}
});
generate_deserializer!(deserialize_si_prefix(num: u64, unit: &str) -> u64 {
match unit {
"" => Some(num),
"K" => num.checked_mul(1_000),
"M" => num.checked_mul(1_000_000),
"G" => num.checked_mul(1_000_000_000),
"T" => num.checked_mul(1_000_000_000_000),
"P" => num.checked_mul(1_000_000_000_000_000),
_ => None,
}
});
generate_deserializer!(deserialize_disk_space(num: u64, unit: &str) -> u64 {
match unit {
"" => Some(num),
"K" => num.checked_mul(1_000),
"Ki" => num.checked_mul(1u64 << 10),
"M" => num.checked_mul(1_000_000),
"Mi" => num.checked_mul(1u64 << 20),
"G" => num.checked_mul(1_000_000_000),
"Gi" => num.checked_mul(1u64 << 30),
"T" => num.checked_mul(1_000_000_000_000),
"Ti" => num.checked_mul(1u64 << 40),
"P" => num.checked_mul(1_000_000_000_000_000),
"Pi" => num.checked_mul(1u64 << 50),
_ => None,
}
});
generate_deserializer!(deserialize_percent(num: u8, unit: &str) -> u8 {
match unit {
"%" => Some(num),
_ => None,
}
});
macro_rules! generate_setting_getter {
($setting:ident: $setting_type:ty) => {
#[doc = concat!("Returns ", "`", stringify!($setting), "`.")]
pub fn $setting(&self) -> $setting_type {
self.$setting
}
};
}
impl CacheConfig {
pub fn new() -> Self {
Self::default()
}
pub fn from_file(config_file: Option<&Path>) -> Result<Self> {
let mut config = Self::load_and_parse_file(config_file)?;
config.validate()?;
Ok(config)
}
fn load_and_parse_file(config_file: Option<&Path>) -> Result<Self> {
let (config_file, user_custom_file) = match config_file {
Some(path) => (path.to_path_buf(), true),
None => (default_config_path()?, false),
};
let entity_exists = config_file.exists();
match (entity_exists, user_custom_file) {
(false, false) => Ok(Self::new()),
_ => {
let contents = fs::read_to_string(&config_file).context(format!(
"failed to read config file: {}",
config_file.display()
))?;
let config = toml::from_str::<Config>(&contents).context(format!(
"failed to parse config file: {}",
config_file.display()
))?;
Ok(config.cache)
}
}
}
generate_setting_getter!(worker_event_queue_size: u64);
generate_setting_getter!(baseline_compression_level: i32);
generate_setting_getter!(optimized_compression_level: i32);
generate_setting_getter!(optimized_compression_usage_counter_threshold: u64);
generate_setting_getter!(cleanup_interval: Duration);
generate_setting_getter!(optimizing_compression_task_timeout: Duration);
generate_setting_getter!(allowed_clock_drift_for_files_from_future: Duration);
generate_setting_getter!(file_count_soft_limit: u64);
generate_setting_getter!(files_total_size_soft_limit: u64);
generate_setting_getter!(file_count_limit_percent_if_deleting: u8);
generate_setting_getter!(files_total_size_limit_percent_if_deleting: u8);
pub fn directory(&self) -> Option<&PathBuf> {
self.directory.as_ref()
}
pub fn with_directory(&mut self, directory: impl Into<PathBuf>) -> &mut Self {
self.directory = Some(directory.into());
self
}
pub fn with_worker_event_queue_size(&mut self, size: u64) -> &mut Self {
self.worker_event_queue_size = size;
self
}
pub fn with_baseline_compression_level(&mut self, level: i32) -> &mut Self {
self.baseline_compression_level = level;
self
}
pub fn with_optimized_compression_level(&mut self, level: i32) -> &mut Self {
self.optimized_compression_level = level;
self
}
pub fn with_optimized_compression_usage_counter_threshold(
&mut self,
threshold: u64,
) -> &mut Self {
self.optimized_compression_usage_counter_threshold = threshold;
self
}
pub fn with_cleanup_interval(&mut self, interval: Duration) -> &mut Self {
self.cleanup_interval = interval;
self
}
pub fn with_optimizing_compression_task_timeout(&mut self, timeout: Duration) -> &mut Self {
self.optimizing_compression_task_timeout = timeout;
self
}
pub fn with_allowed_clock_drift_for_files_from_future(&mut self, drift: Duration) -> &mut Self {
self.allowed_clock_drift_for_files_from_future = drift;
self
}
pub fn with_file_count_soft_limit(&mut self, limit: u64) -> &mut Self {
self.file_count_soft_limit = limit;
self
}
pub fn with_files_total_size_soft_limit(&mut self, limit: u64) -> &mut Self {
self.files_total_size_soft_limit = limit;
self
}
pub fn with_file_count_limit_percent_if_deleting(&mut self, percent: u8) -> &mut Self {
self.file_count_limit_percent_if_deleting = percent;
self
}
pub fn with_files_total_size_limit_percent_if_deleting(&mut self, percent: u8) -> &mut Self {
self.files_total_size_limit_percent_if_deleting = percent;
self
}
pub(crate) fn validate(&mut self) -> Result<()> {
self.validate_directory_or_default()?;
self.validate_worker_event_queue_size();
self.validate_baseline_compression_level()?;
self.validate_optimized_compression_level()?;
self.validate_file_count_limit_percent_if_deleting()?;
self.validate_files_total_size_limit_percent_if_deleting()?;
Ok(())
}
fn validate_directory_or_default(&mut self) -> Result<()> {
if self.directory.is_none() {
match project_dirs() {
Some(proj_dirs) => self.directory = Some(proj_dirs.cache_dir().to_path_buf()),
None => {
bail!("Cache directory not specified and failed to get the default");
}
}
}
let cache_dir = self.directory.as_ref().unwrap();
if !cache_dir.is_absolute() {
bail!(
"Cache directory path has to be absolute, path: {}",
cache_dir.display(),
);
}
fs::create_dir_all(cache_dir).context(format!(
"failed to create cache directory: {}",
cache_dir.display()
))?;
let canonical = fs::canonicalize(cache_dir).context(format!(
"failed to canonicalize cache directory: {}",
cache_dir.display()
))?;
self.directory = Some(canonical);
Ok(())
}
fn validate_worker_event_queue_size(&self) {
if self.worker_event_queue_size < worker_event_queue_size_warning_threshold() {
warn!("Detected small worker event queue size. Some messages might be lost.");
}
}
fn validate_baseline_compression_level(&self) -> Result<()> {
if !ZSTD_COMPRESSION_LEVELS.contains(&self.baseline_compression_level) {
bail!(
"Invalid baseline compression level: {} not in {:#?}",
self.baseline_compression_level,
ZSTD_COMPRESSION_LEVELS
);
}
Ok(())
}
fn validate_optimized_compression_level(&self) -> Result<()> {
if !ZSTD_COMPRESSION_LEVELS.contains(&self.optimized_compression_level) {
bail!(
"Invalid optimized compression level: {} not in {:#?}",
self.optimized_compression_level,
ZSTD_COMPRESSION_LEVELS
);
}
if self.optimized_compression_level < self.baseline_compression_level {
bail!(
"Invalid optimized compression level is lower than baseline: {} < {}",
self.optimized_compression_level,
self.baseline_compression_level
);
}
Ok(())
}
fn validate_file_count_limit_percent_if_deleting(&self) -> Result<()> {
if self.file_count_limit_percent_if_deleting > 100 {
bail!(
"Invalid files count limit percent if deleting: {} not in range 0-100%",
self.file_count_limit_percent_if_deleting
);
}
Ok(())
}
fn validate_files_total_size_limit_percent_if_deleting(&self) -> Result<()> {
if self.files_total_size_limit_percent_if_deleting > 100 {
bail!(
"Invalid files total size limit percent if deleting: {} not in range 0-100%",
self.files_total_size_limit_percent_if_deleting
);
}
Ok(())
}
}
#[cfg(test)]
#[macro_use]
pub mod tests;