#include "store/migrate.hpp"
#include <stdexcept>
#include "store/dbtypes.hpp"
#include "store/exceptions.hpp"
#include "store/layout.hpp"
#include "store/metadata.hpp"
#include "store/read_backend.hpp"
#include "store/write_backend.hpp"
#include "utils/datetime.hpp"
#include "utils/env.hpp"
#include "utils/format/macros.hpp"
#include "utils/fs/exceptions.hpp"
#include "utils/fs/operations.hpp"
#include "utils/fs/path.hpp"
#include "utils/logging/macros.hpp"
#include "utils/optional.ipp"
#include "utils/sanity.hpp"
#include "utils/stream.hpp"
#include "utils/sqlite/database.hpp"
#include "utils/sqlite/exceptions.hpp"
#include "utils/sqlite/statement.ipp"
#include "utils/text/operations.hpp"
namespace datetime = utils::datetime;
namespace fs = utils::fs;
namespace sqlite = utils::sqlite;
namespace text = utils::text;
using utils::none;
using utils::optional;
namespace {
const int first_chunked_schema_version = 3;
static int
get_schema_version(const fs::path& file)
{
sqlite::database db = store::detail::open_and_setup(
file, sqlite::open_readonly);
return store::metadata::fetch_latest(db).schema_version();
}
static void
migrate_schema_step(const fs::path& file,
const int version_from,
const int version_to,
const optional< int64_t > action_id = none,
const optional< fs::path > old_database = none)
{
LI(F("Migrating schema of %s from version %s to %s") % file % version_from
% version_to);
PRE(version_to == version_from + 1);
sqlite::database db = store::detail::open_and_setup(
file, sqlite::open_readwrite);
const fs::path migration = store::detail::migration_file(version_from,
version_to);
std::string migration_string;
try {
migration_string = utils::read_file(migration);
} catch (const std::runtime_error& unused_e) {
throw store::error(F("Cannot read migration file '%s'") % migration);
}
if (action_id) {
migration_string = text::replace_all(migration_string, "@ACTION_ID@",
F("%s") % action_id.get());
}
if (old_database) {
migration_string = text::replace_all(migration_string, "@OLD_DATABASE@",
old_database.get().str());
}
try {
db.exec(migration_string);
} catch (const sqlite::error& e) {
throw store::error(F("Schema migration failed: %s") % e.what());
}
}
static void
chunk_database(const fs::path& old_file)
{
PRE(get_schema_version(old_file) == first_chunked_schema_version - 1);
LI(F("Need to split %s into per-action files") % old_file);
sqlite::database old_db = store::detail::open_and_setup(
old_file, sqlite::open_readonly);
sqlite::statement actions_stmt = old_db.create_statement(
"SELECT action_id, cwd FROM actions NATURAL JOIN contexts");
sqlite::statement start_time_stmt = old_db.create_statement(
"SELECT test_results.start_time AS start_time "
"FROM test_programs "
" JOIN test_cases "
" ON test_programs.test_program_id == test_cases.test_program_id"
" JOIN test_results "
" ON test_cases.test_case_id == test_results.test_case_id "
"WHERE test_programs.action_id == :action_id "
"ORDER BY start_time LIMIT 1");
while (actions_stmt.step()) {
const int64_t action_id = actions_stmt.safe_column_int64("action_id");
const fs::path cwd(actions_stmt.safe_column_text("cwd"));
LI(F("Extracting action %s") % action_id);
start_time_stmt.reset();
start_time_stmt.bind(":action_id", action_id);
if (!start_time_stmt.step()) {
LI(F("Skipping empty action %s") % action_id);
continue;
}
const datetime::timestamp start_time = store::column_timestamp(
start_time_stmt, "start_time");
start_time_stmt.step_without_results();
const fs::path new_file = store::layout::new_db_for_migration(
cwd, start_time);
if (fs::exists(new_file)) {
LI(F("Skipping action because %s already exists") % new_file);
continue;
}
LI(F("Creating %s for previous action %s") % new_file % action_id);
try {
fs::mkdir_p(new_file.branch_path(), 0755);
sqlite::database db = store::detail::open_and_setup(
new_file, sqlite::open_readwrite | sqlite::open_create);
store::detail::initialize(db);
db.close();
migrate_schema_step(new_file,
first_chunked_schema_version - 1,
first_chunked_schema_version,
utils::make_optional(action_id),
utils::make_optional(old_file));
} catch (...) {
fs::unlink(new_file);
}
}
fs::unlink(old_file);
}
}
fs::path
store::detail::migration_file(const int version_from, const int version_to)
{
return fs::path(utils::getenv_with_default("KYUA_STOREDIR", KYUA_STOREDIR))
/ (F("migrate_v%s_v%s.sql") % version_from % version_to);
}
void
store::detail::backup_database(const fs::path& source, const int old_version)
{
const fs::path target(F("%s.v%s.backup") % source.str() % old_version);
LI(F("Backing up database %s to %s") % source % target);
try {
fs::copy(source, target);
} catch (const fs::error& e) {
throw store::error(e.what());
}
}
void
store::migrate_schema(const utils::fs::path& file)
{
const int version_from = get_schema_version(file);
const int version_to = detail::current_schema_version;
if (version_from == version_to) {
throw error(F("Database already at schema version %s; migration not "
"needed") % version_from);
} else if (version_from > version_to) {
throw error(F("Database at schema version %s, which is newer than the "
"supported version %s") % version_from % version_to);
}
detail::backup_database(file, version_from);
int i;
for (i = version_from; i < first_chunked_schema_version - 1; ++i) {
migrate_schema_step(file, i, i + 1);
}
chunk_database(file);
INV(version_to == first_chunked_schema_version);
}