Move flake scanning to module
This commit is contained in:
parent
7a4fbaf74a
commit
f6733f86a7
@ -2,25 +2,17 @@ use anyhow::{
|
|||||||
Context,
|
Context,
|
||||||
Result,
|
Result,
|
||||||
};
|
};
|
||||||
use chrono::{
|
|
||||||
Utc,
|
|
||||||
};
|
|
||||||
use clap::{
|
use clap::{
|
||||||
Parser,
|
Parser,
|
||||||
};
|
};
|
||||||
use flake_tracker::{
|
use flake_tracker::{
|
||||||
flake::{
|
scan::{
|
||||||
FlakeLocksNodeInputs,
|
scan_flake,
|
||||||
FlakeMetadata,
|
|
||||||
FlakeUri,
|
|
||||||
},
|
},
|
||||||
storage::{
|
storage::{
|
||||||
InputRow,
|
|
||||||
RevisionRow,
|
|
||||||
Storage,
|
Storage,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(version, about, long_about = None)]
|
#[command(version, about, long_about = None)]
|
||||||
@ -32,69 +24,11 @@ struct Cli {
|
|||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
let scan_time = Utc::now().timestamp();
|
|
||||||
|
|
||||||
let storage = Storage::connect("sqlite://flake-tracker.db")
|
let storage = Storage::connect("sqlite://flake-tracker.db")
|
||||||
.await
|
.await
|
||||||
.context("Failed to connect to database")?;
|
.context("Failed to connect to database")?;
|
||||||
|
|
||||||
|
scan_flake(storage, &cli.flake_uri).await?;
|
||||||
let flake_metadata_raw = Command::new("nix")
|
|
||||||
.arg("flake")
|
|
||||||
.arg("metadata")
|
|
||||||
.arg("--json")
|
|
||||||
.arg(cli.flake_uri)
|
|
||||||
.output()
|
|
||||||
.context("Failed to fetch flake metadata")?;
|
|
||||||
|
|
||||||
let flake_metadata: FlakeMetadata = serde_json::from_slice(&flake_metadata_raw.stdout)
|
|
||||||
.context("Failed to parse flake metadata")?;
|
|
||||||
|
|
||||||
let revision_row = RevisionRow {
|
|
||||||
revision_uri: flake_metadata.locked.flake_uri()?.clone(),
|
|
||||||
flake_uri: Some(flake_metadata.resolved.flake_uri()?.clone()),
|
|
||||||
nix_store_path: Some(flake_metadata.path.clone()),
|
|
||||||
nar_hash: Some(flake_metadata.locked.narHash.clone()),
|
|
||||||
last_modified: Some(flake_metadata.locked.lastModified.clone()),
|
|
||||||
tracker_last_scanned: Some(scan_time.clone()),
|
|
||||||
};
|
|
||||||
|
|
||||||
storage.set_revision(revision_row)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let locks_root_name = &flake_metadata.locks.root;
|
|
||||||
let locks_root_node = flake_metadata.locks.nodes.get(locks_root_name)
|
|
||||||
.context("Failed to get locks root node")?;
|
|
||||||
|
|
||||||
for (input_name, locks_input_name) in locks_root_node.inputs.clone().context("No inputs found for flake")? {
|
|
||||||
|
|
||||||
if let FlakeLocksNodeInputs::String(locks_input_name) = locks_input_name {
|
|
||||||
let locks_input_node = flake_metadata.locks.nodes.get(&locks_input_name)
|
|
||||||
.context("Failed to find lock of input")?;
|
|
||||||
|
|
||||||
let input_row = InputRow {
|
|
||||||
revision_uri: flake_metadata.locked.flake_uri()?.clone(),
|
|
||||||
input_name: input_name.clone(),
|
|
||||||
locked_revision_uri: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.flake_uri()?),
|
|
||||||
locked_flake_uri: Some(locks_input_node.original.clone().context("Unexpected missing lock")?.flake_uri()?),
|
|
||||||
locked_nar_hash: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.narHash),
|
|
||||||
last_modified: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.lastModified),
|
|
||||||
};
|
|
||||||
storage.set_input(input_row)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let revision_row = RevisionRow {
|
|
||||||
revision_uri: locks_input_node.locked.clone().context("Unexpected missing lock")?.flake_uri()?.clone(),
|
|
||||||
flake_uri: Some(locks_input_node.original.clone().context("Unexpected missing lock")?.flake_uri()?.clone()),
|
|
||||||
nix_store_path: None,
|
|
||||||
nar_hash: None,
|
|
||||||
last_modified: None,
|
|
||||||
tracker_last_scanned: None,
|
|
||||||
};
|
|
||||||
storage.set_revision_exist(revision_row)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
pub mod flake;
|
pub mod flake;
|
||||||
|
pub mod scan;
|
||||||
pub mod storage;
|
pub mod storage;
|
||||||
pub mod templates;
|
pub mod templates;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
83
src/scan.rs
Normal file
83
src/scan.rs
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
use anyhow::{
|
||||||
|
Context,
|
||||||
|
Result,
|
||||||
|
};
|
||||||
|
use chrono::{
|
||||||
|
Utc,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
flake::{
|
||||||
|
FlakeLocksNodeInputs,
|
||||||
|
FlakeMetadata,
|
||||||
|
FlakeUri,
|
||||||
|
},
|
||||||
|
storage::{
|
||||||
|
InputRow,
|
||||||
|
RevisionRow,
|
||||||
|
Storage,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
pub async fn scan_flake(storage: Storage, flake_uri: &str) -> Result<()> {
|
||||||
|
let scan_time = Utc::now().timestamp();
|
||||||
|
|
||||||
|
let flake_metadata_raw = Command::new("nix")
|
||||||
|
.arg("flake")
|
||||||
|
.arg("metadata")
|
||||||
|
.arg("--json")
|
||||||
|
.arg(flake_uri)
|
||||||
|
.output()
|
||||||
|
.context("Failed to fetch flake metadata")?;
|
||||||
|
|
||||||
|
let flake_metadata: FlakeMetadata = serde_json::from_slice(&flake_metadata_raw.stdout)
|
||||||
|
.context("Failed to parse flake metadata")?;
|
||||||
|
|
||||||
|
let revision_row = RevisionRow {
|
||||||
|
revision_uri: flake_metadata.locked.flake_uri()?.clone(),
|
||||||
|
flake_uri: Some(flake_metadata.resolved.flake_uri()?.clone()),
|
||||||
|
nix_store_path: Some(flake_metadata.path.clone()),
|
||||||
|
nar_hash: Some(flake_metadata.locked.narHash.clone()),
|
||||||
|
last_modified: Some(flake_metadata.locked.lastModified.clone()),
|
||||||
|
tracker_last_scanned: Some(scan_time.clone()),
|
||||||
|
};
|
||||||
|
|
||||||
|
storage.set_revision(revision_row)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let locks_root_name = &flake_metadata.locks.root;
|
||||||
|
let locks_root_node = flake_metadata.locks.nodes.get(locks_root_name)
|
||||||
|
.context("Failed to get locks root node")?;
|
||||||
|
|
||||||
|
for (input_name, locks_input_name) in locks_root_node.inputs.clone().context("No inputs found for flake")? {
|
||||||
|
|
||||||
|
if let FlakeLocksNodeInputs::String(locks_input_name) = locks_input_name {
|
||||||
|
let locks_input_node = flake_metadata.locks.nodes.get(&locks_input_name)
|
||||||
|
.context("Failed to find lock of input")?;
|
||||||
|
|
||||||
|
let input_row = InputRow {
|
||||||
|
revision_uri: flake_metadata.locked.flake_uri()?.clone(),
|
||||||
|
input_name: input_name.clone(),
|
||||||
|
locked_revision_uri: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.flake_uri()?),
|
||||||
|
locked_flake_uri: Some(locks_input_node.original.clone().context("Unexpected missing lock")?.flake_uri()?),
|
||||||
|
locked_nar_hash: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.narHash),
|
||||||
|
last_modified: Some(locks_input_node.locked.clone().context("Unexpected missing lock")?.lastModified),
|
||||||
|
};
|
||||||
|
storage.set_input(input_row)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let revision_row = RevisionRow {
|
||||||
|
revision_uri: locks_input_node.locked.clone().context("Unexpected missing lock")?.flake_uri()?.clone(),
|
||||||
|
flake_uri: Some(locks_input_node.original.clone().context("Unexpected missing lock")?.flake_uri()?.clone()),
|
||||||
|
nix_store_path: None,
|
||||||
|
nar_hash: None,
|
||||||
|
last_modified: None,
|
||||||
|
tracker_last_scanned: None,
|
||||||
|
};
|
||||||
|
storage.set_revision_exist(revision_row)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user