refactor code => dropping docker cli

This commit is contained in:
DataHearth 2022-03-16 12:17:15 +01:00
parent 60d0c8a17e
commit c38afccd63
No known key found for this signature in database
GPG Key ID: E88FD356ACC5F3C4
2 changed files with 102 additions and 162 deletions

View File

@ -1,141 +1,99 @@
use chrono::NaiveDateTime; use bollard::image::ListImagesOptions;
use log::{error, warn}; use bollard::models::ImageSummary;
use serde::{self, Deserialize, Deserializer}; use bollard::Docker;
use std::process::{exit, Command}; use bollard::API_DEFAULT_VERSION;
use log::info;
use std::collections::HashMap;
use crate::DateArgs; use crate::DateArgs;
use crate::DOCKER_BIN;
const GHCR_REPO: &str = "ghcr.io/datahearth/clear-docker-images"; const GHCR_REPO: &str = "ghcr.io/datahearth/clear-docker-images";
const DOCKER_REPO: &str = "datahearth/clear-docker-images"; const DOCKER_REPO: &str = "datahearth/clear-docker-images";
#[derive(Deserialize, Debug)] pub struct DockerActions {
struct Image { docker: Docker,
// image ID
#[serde(rename = "ID")]
id: String,
// image repository
#[serde(rename = "Repository")]
repository: String,
// image tag
#[serde(rename = "Tag")]
tag: String,
// image creation date as UNIX timestamp
#[serde(deserialize_with = "deserialize_creation_date", rename = "CreatedAt")]
created_at: i64,
// image size in MB
#[serde(deserialize_with = "deserialize_size", rename = "Size")]
size: f32,
}
pub fn deserialize_creation_date<'de, D>(deserializer: D) -> Result<i64, D::Error>
where
D: Deserializer<'de>,
{
let date = String::deserialize(deserializer)?;
// format => 2021-01-01 00:00:00 +0100 CET
NaiveDateTime::parse_from_str(&date, "%Y-%m-%d %H:%M:%S %z %Z")
.map(|d| d.timestamp())
.map_err(serde::de::Error::custom)
}
pub fn deserialize_size<'de, D>(deserializer: D) -> Result<f32, D::Error>
where
D: Deserializer<'de>,
{
let size = String::deserialize(deserializer)?;
if size.contains("KB") {
size.replace("KB", "")
.parse::<f32>()
.map(|s| s / 1000.0)
.map_err(serde::de::Error::custom)
} else if size.contains("MB") {
size.replace("MB", "")
.parse::<f32>()
.map_err(serde::de::Error::custom)
} else if size.contains("GB") {
size.replace("GB", "")
.parse::<f32>()
.map(|s| s * 1000.0)
.map_err(serde::de::Error::custom)
} else {
Err(serde::de::Error::custom(format!(
"Unknown size identification: {}",
size,
)))
}
}
pub fn process_imgs(
repository: Option<String>, repository: Option<String>,
tags: Vec<String>, tags: Vec<String>,
timestamps: DateArgs, date: DateArgs,
) -> (Vec<String>, f32) { }
let mut ids = vec![];
let mut saved_size = 0.0;
for img in parse_imgs(repository) { impl DockerActions {
let image: Image = serde_json::from_str(&img).unwrap(); pub fn new(
let del = timestamps socket: String,
.stop repository: Option<String>,
.map_or(timestamps.start > image.created_at, |stop| { tags: Vec<String>,
timestamps.start > image.created_at && image.created_at > stop date: DateArgs,
}); ) -> Self {
Self {
if del && (image.repository != GHCR_REPO && image.repository != DOCKER_REPO) { docker: Docker::connect_with_socket(&socket, 120, API_DEFAULT_VERSION).unwrap(),
if !tags.contains(&image.tag) { repository,
ids.push(image.id); tags,
date,
saved_size += image.size
}
} }
} }
return (ids, saved_size); pub async fn get(&self) -> Result<Vec<ImageSummary>, bollard::errors::Error> {
} let mut image_filters = HashMap::new();
fn get_images(repo: Option<String>) -> Vec<u8> { // why using &self.repository instead of selft.repository ?
let mut cmd = Command::new(DOCKER_BIN); if let Some(r) = &self.repository {
cmd.arg("images"); image_filters.insert("reference", vec![r.as_str()]);
}
repo.map(|repo| cmd.arg(repo)); self.docker
.list_images(Some(ListImagesOptions {
all: true,
filters: image_filters,
..Default::default()
}))
.await
}
cmd.args(["--format", "{{json .}}"]); pub async fn delete(
&self,
images: Vec<ImageSummary>,
dry_run: bool,
) -> Result<i64, bollard::errors::Error> {
let mut removed_size = 0;
for image in images {
info!("deleting: {}", image.id);
match cmd.output() { if !dry_run {
Ok(o) => { if let Err(e) = self.docker.delete_service(&image.id).await {
if !o.status.success() { return Err(e);
error!( }
"{}",
std::str::from_utf8(&o.stderr).expect("failed to parse STDERR to UTF-8")
);
error!("failed to retrieve docker images. Please checkout STDERR");
exit(1);
} }
o.stdout removed_size += image.size;
} }
Err(e) => {
error!("docker command failed: {}", e);
exit(1);
}
}
}
fn parse_imgs(repository: Option<String>) -> Vec<String> { Ok(removed_size)
let stdout = get_images(repository);
let output = String::from_utf8(stdout).unwrap_or_else(|e| {
error!("failed to parse docker output: {}", e);
exit(1);
});
let images: Vec<String> = output.lines().map(|s| s.to_string()).collect();
if images.len() == 0 {
warn!("No images found for current timestamp and/or repository");
} }
return images; pub fn filter(&self, images: Vec<ImageSummary>) -> Vec<ImageSummary> {
let mut to_be_deleted: Vec<ImageSummary> = vec![];
for image in images {
if self
.date
.stop
.map_or(self.date.start > image.created, |stop| {
self.date.start > image.created && image.created > stop
})
&& image.repo_tags.iter().any(|tag| {
!tag.contains(GHCR_REPO)
&& !tag.contains(DOCKER_REPO)
&& self
.tags
.iter()
.any(|excluded_tag| !tag.contains(excluded_tag))
})
{
println!("{:?}", self.tags);
to_be_deleted.push(image);
}
}
return to_be_deleted;
}
} }

View File

@ -4,11 +4,10 @@ use chrono::{NaiveDateTime, Utc};
use clap::Parser; use clap::Parser;
use log::{error, info}; use log::{error, info};
use simple_logger::SimpleLogger; use simple_logger::SimpleLogger;
use std::process::{exit, Command, Stdio}; use std::process::exit;
use crate::images::process_imgs; use crate::images::DockerActions;
const DOCKER_BIN: &str = "docker";
const TWO_DAYS_TIMESTAMP: i64 = 172_800; const TWO_DAYS_TIMESTAMP: i64 = 172_800;
/// Clear docker images from /// Clear docker images from
@ -32,13 +31,13 @@ struct Args {
#[clap(long, takes_value = false)] #[clap(long, takes_value = false)]
dry_run: bool, dry_run: bool,
/// force image removal [default: false]
#[clap(long, takes_value = false)]
force: bool,
/// add more logs [default: false] /// add more logs [default: false]
#[clap(short, long, takes_value = false)] #[clap(short, long, takes_value = false)]
verbose: bool, verbose: bool,
/// where is located the docker socket (can be a UNIX socket or TCP protocol)
#[clap(short, long, default_value = "/var/run/docker.sock")]
socket: String,
} }
#[derive(Debug)] #[derive(Debug)]
@ -47,7 +46,8 @@ pub struct DateArgs {
stop: Option<i64>, stop: Option<i64>,
} }
fn main() { #[tokio::main]
async fn main() {
let args = Args::parse(); let args = Args::parse();
let logger = SimpleLogger::new() let logger = SimpleLogger::new()
.without_timestamps() .without_timestamps()
@ -61,9 +61,10 @@ fn main() {
exit(1); exit(1);
} }
let (ids, saved_size) = process_imgs( let actions = DockerActions::new(
args.socket,
args.repository, args.repository,
args.tags.map_or(vec![], |tags| tags), args.tags.map_or(vec![], |t| t),
args.date.map_or( args.date.map_or(
DateArgs { DateArgs {
start: Utc::now().timestamp() - TWO_DAYS_TIMESTAMP, start: Utc::now().timestamp() - TWO_DAYS_TIMESTAMP,
@ -73,47 +74,28 @@ fn main() {
), ),
); );
if args.dry_run { let images = match actions.get().await {
info!("dry run activated"); Ok(i) => i,
} else { Err(e) => {
let mut cmd = Command::new(DOCKER_BIN); error!("failed to retrieve docker images: {}", e);
cmd.arg("rmi"); exit(1);
if args.force {
info!("\"--force\" flag set");
cmd.arg("--force");
} }
};
if ids.len() == 0 { let saved = match actions.delete(actions.filter(images), args.dry_run).await {
info!("nothing to do..."); Ok(s) => s,
return; Err(e) => {
error!("failed to retrieve docker images: {}", e);
exit(1);
} }
};
if args.verbose {
info!("trigger \"docker rmi\" command");
}
match cmd.args(&ids).stdout(Stdio::null()).status() {
Ok(s) => {
if !s.success() {
error!("failed to delete images. Please checkout STDERR")
}
info!("images deleted!")
}
Err(e) => error!("docker command failed: {}", e),
};
}
if args.dry_run {
info!("deleted images: {:#?}", ids);
}
info!( info!(
"Total disk space saved: {}", "Total disk space saved: {}",
if saved_size / 1000.0 > 1.0 { if saved / 1000_000 >= 1000 {
format!("{:.2}GB", saved_size / 1000.0) format!("{:.2}GB", saved as f64 / 1000_000_000.0)
} else { } else {
format!("{:.2}MB", saved_size) format!("{:.2}MB", saved as f32 / 1000_000.0)
} }
); );
} }