Aggregate flakes in its own table

This commit is contained in:
clerie 2025-02-10 18:05:41 +01:00
parent d2957d1799
commit 796d2bf8c1
3 changed files with 61 additions and 16 deletions

View File

@ -1,5 +1,7 @@
CREATE TABLE tracked_flakes ( CREATE TABLE flakes (
flake_uri TEXT PRIMARY KEY NOT NULL flake_uri TEXT PRIMARY KEY NOT NULL,
tracker_track BOOLEAN NOT NULL,
tracker_last_scanned INT
); );
CREATE TABLE revisions ( CREATE TABLE revisions (
@ -7,8 +9,7 @@ CREATE TABLE revisions (
flake_uri TEXT, flake_uri TEXT,
nix_store_path TEXT, nix_store_path TEXT,
nar_hash TEXT, nar_hash TEXT,
last_modified INT, last_modified INT
tracker_last_scanned INT
); );
CREATE TABLE inputs ( CREATE TABLE inputs (

View File

@ -13,6 +13,7 @@ use crate::{
}, },
storage::{ storage::{
InputRow, InputRow,
FlakeRow,
RevisionRow, RevisionRow,
Storage, Storage,
}, },
@ -39,12 +40,20 @@ pub async fn scan_flake(storage: Storage, flake_uri: &str) -> Result<()> {
nix_store_path: Some(flake_metadata.path.clone()), nix_store_path: Some(flake_metadata.path.clone()),
nar_hash: Some(flake_metadata.locked.nar_hash.clone()), nar_hash: Some(flake_metadata.locked.nar_hash.clone()),
last_modified: Some(flake_metadata.locked.last_modified.clone()), last_modified: Some(flake_metadata.locked.last_modified.clone()),
tracker_last_scanned: Some(scan_time.clone()),
}; };
storage.set_revision(revision_row) storage.set_revision(revision_row)
.await?; .await?;
let flake_row = FlakeRow {
flake_uri: flake_metadata.resolved.flake_uri()?.clone(),
tracker_track: false,
tracker_last_scanned: Some(scan_time.clone()),
};
storage.set_flake(flake_row)
.await?;
let locks_root_name = &flake_metadata.locks.root; let locks_root_name = &flake_metadata.locks.root;
let locks_root_node = flake_metadata.locks.nodes.get(locks_root_name) let locks_root_node = flake_metadata.locks.nodes.get(locks_root_name)
.context("Failed to get locks root node")?; .context("Failed to get locks root node")?;
@ -72,10 +81,19 @@ pub async fn scan_flake(storage: Storage, flake_uri: &str) -> Result<()> {
nix_store_path: None, nix_store_path: None,
nar_hash: None, nar_hash: None,
last_modified: None, last_modified: None,
tracker_last_scanned: None,
}; };
storage.set_revision_exist(revision_row) storage.set_revision_exist(revision_row)
.await?; .await?;
let flake_row = FlakeRow {
flake_uri: locks_input_node.original.clone().context("Unexpected missing lock")?.flake_uri()?.clone(),
tracker_track: false,
tracker_last_scanned: None,
};
storage.set_flake_exist(flake_row)
.await?;
} }
} }

View File

@ -35,11 +35,39 @@ impl Storage {
}) })
} }
pub async fn set_flake(&self, flake_row: FlakeRow) -> Result<SqliteQueryResult> {
sqlx::query("INSERT INTO flakes (flake_uri, tracker_track, tracker_last_scanned)
VALUES (?, ?, ?)
ON CONFLICT(flake_uri) DO UPDATE SET
tracker_track=excluded.tracker_track,
tracker_last_scanned=excluded.tracker_last_scanned
")
.bind(&flake_row.flake_uri)
.bind(&flake_row.tracker_track)
.bind(&flake_row.tracker_last_scanned)
.execute(&self.db)
.await
.context("Failed to execute database query")
}
pub async fn set_flake_exist(&self, flake_row: FlakeRow) -> Result<SqliteQueryResult> {
sqlx::query("INSERT INTO flakes (flake_uri, tracker_track)
VALUES (?, FALSE)
ON CONFLICT(flake_uri) DO NOTHING
")
.bind(&flake_row.flake_uri)
.execute(&self.db)
.await
.context("Failed to execute database query")
}
pub async fn flakes(&self) -> Result<Vec<FlakeRow>> { pub async fn flakes(&self) -> Result<Vec<FlakeRow>> {
sqlx::query_as(" sqlx::query_as("
SELECT SELECT
flake_uri flake_uri,
FROM revisions tracker_track,
tracker_last_scanned
FROM flakes
ORDER BY flake_uri ORDER BY flake_uri
") ")
.fetch_all(&self.db) .fetch_all(&self.db)
@ -48,21 +76,19 @@ impl Storage {
} }
pub async fn set_revision(&self, revision_row: RevisionRow) -> Result<SqliteQueryResult> { pub async fn set_revision(&self, revision_row: RevisionRow) -> Result<SqliteQueryResult> {
sqlx::query("INSERT INTO revisions (revision_uri, flake_uri, nix_store_path, nar_hash, last_modified, tracker_last_scanned) sqlx::query("INSERT INTO revisions (revision_uri, flake_uri, nix_store_path, nar_hash, last_modified)
VALUES (?, ?, ?, ?, ?, ?) VALUES (?, ?, ?, ?, ?)
ON CONFLICT(revision_uri) DO UPDATE SET ON CONFLICT(revision_uri) DO UPDATE SET
flake_uri=excluded.flake_uri, flake_uri=excluded.flake_uri,
nix_store_path=excluded.nix_store_path, nix_store_path=excluded.nix_store_path,
nar_hash=excluded.nar_hash, nar_hash=excluded.nar_hash,
last_modified=excluded.last_modified, last_modified=excluded.last_modified
tracker_last_scanned=excluded.tracker_last_scanned
") ")
.bind(&revision_row.revision_uri) .bind(&revision_row.revision_uri)
.bind(&revision_row.flake_uri) .bind(&revision_row.flake_uri)
.bind(&revision_row.nix_store_path) .bind(&revision_row.nix_store_path)
.bind(&revision_row.nar_hash) .bind(&revision_row.nar_hash)
.bind(&revision_row.last_modified) .bind(&revision_row.last_modified)
.bind(&revision_row.tracker_last_scanned)
.execute(&self.db) .execute(&self.db)
.await .await
.context("Failed to execute database query") .context("Failed to execute database query")
@ -87,8 +113,7 @@ impl Storage {
flake_uri, flake_uri,
nix_store_path, nix_store_path,
nar_hash, nar_hash,
last_modified, last_modified
tracker_last_scanned
FROM revisions FROM revisions
WHERE flake_uri = ? WHERE flake_uri = ?
ORDER BY last_modified DESC ORDER BY last_modified DESC
@ -194,7 +219,6 @@ pub struct RevisionRow {
pub nix_store_path: Option<String>, pub nix_store_path: Option<String>,
pub nar_hash: Option<String>, pub nar_hash: Option<String>,
pub last_modified: Option<i64>, pub last_modified: Option<i64>,
pub tracker_last_scanned: Option<i64>,
} }
impl RevisionRow { impl RevisionRow {
@ -250,6 +274,8 @@ impl InputRow {
#[derive(FromRow)] #[derive(FromRow)]
pub struct FlakeRow { pub struct FlakeRow {
pub flake_uri: String, pub flake_uri: String,
pub tracker_track: bool,
pub tracker_last_scanned: Option<i64>,
} }
impl FlakeRow { impl FlakeRow {