299 lines
11 KiB
Rust
299 lines
11 KiB
Rust
use chrono::{NaiveDateTime, TimeDelta, TimeZone, Datelike};
|
|
use diesel::{dsl::sql, prelude::*, sql_types};
|
|
use self::models::*;
|
|
use std::net::SocketAddr;
|
|
use std::env;
|
|
use serde::{Deserialize, Serialize};
|
|
use axum::{
|
|
extract::{State, Path, Query},
|
|
routing::{get, post, delete, put},
|
|
http::StatusCode,
|
|
Json, Router,
|
|
};
|
|
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
|
|
use crate::schema::work_periods;
|
|
|
|
pub mod models;
|
|
pub mod schema;
|
|
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/");
|
|
|
|
#[tokio::main]
|
|
async fn main() {
|
|
// initialize tracing
|
|
tracing_subscriber::fmt::init();
|
|
let db_url = env::var("DATABASE_URL").unwrap();
|
|
let manager = deadpool_diesel::sqlite::Manager::new(db_url, deadpool_diesel::Runtime::Tokio1);
|
|
let pool = deadpool_diesel::sqlite::Pool::builder(manager)
|
|
.build()
|
|
.unwrap();
|
|
|
|
// Run migrations
|
|
{
|
|
let conn = pool.get().await.unwrap();
|
|
conn.interact(|conn| conn.run_pending_migrations(MIGRATIONS).map(|_| ()))
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
}
|
|
|
|
// build our application with a route
|
|
let app = Router::new()
|
|
// `GET /` goes to `root`
|
|
.route("/", get(|state| get_metrics(state, None)))
|
|
|
|
.route("/api/metrics", get(|state| get_metrics(state, Some(true))))
|
|
.route("/api/history", post(add_period))
|
|
.route("/api/history", get(get_history))
|
|
.route("/api/history/:id", put(update_period))
|
|
.route("/api/history/:id", delete(delete_period))
|
|
.route("/api/history/:id", get(get_period))
|
|
|
|
.route("/api/tracking", get(get_tracking))
|
|
.route("/api/tracking", post(start_tracking))
|
|
.route("/api/tracking", delete(stop_tracking))
|
|
.with_state(pool);
|
|
|
|
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
|
|
tracing::debug!("listening on {addr}");
|
|
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
|
|
axum::serve(listener, app).await.unwrap();
|
|
}
|
|
|
|
#[derive(Deserialize)]
|
|
struct HistoryQuery {
|
|
count: Option<i64>,
|
|
since: Option<chrono::NaiveDateTime>,
|
|
until: Option<chrono::NaiveDateTime>,
|
|
project: Option<String>,
|
|
}
|
|
|
|
async fn get_history(
|
|
State(pool): State<deadpool_diesel::sqlite::Pool>,
|
|
query: Query<HistoryQuery>,
|
|
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
|
|
let count = query.count.unwrap_or(10);
|
|
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let res = conn.interact(move |conn|
|
|
{
|
|
let mut selection = work_periods::table.select(WorkPeriod::as_select()).into_boxed().order(work_periods::start_time.desc());
|
|
if query.since.is_some() || query.until.is_some() {
|
|
let start = query.since.unwrap_or_else(|| chrono::Utc.with_ymd_and_hms(1970, 1, 1, 0, 0, 0).unwrap().naive_local());
|
|
let end = query.until.unwrap_or_else(|| chrono::offset::Local::now().naive_local());
|
|
selection = selection.filter(work_periods::start_time.between(start, end));
|
|
}
|
|
if query.project.is_some() {
|
|
selection = selection.filter(work_periods::project.eq(query.project.as_ref().unwrap()));
|
|
}
|
|
selection.limit(count).load(conn)
|
|
}
|
|
).await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
Ok(Json(res))
|
|
}
|
|
|
|
async fn get_tracking(
|
|
State(pool): State<deadpool_diesel::sqlite::Pool>,
|
|
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let res = conn
|
|
.interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select(WorkPeriod::as_select()).load(conn))
|
|
.await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
Ok(Json(res))
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
struct Status {
|
|
today: String,
|
|
week: String,
|
|
month: String,
|
|
year: String,
|
|
active: String,
|
|
}
|
|
|
|
fn get_duration(periods: Vec<(NaiveDateTime, NaiveDateTime)>) -> i64 {
|
|
periods.iter()
|
|
.map(|(start, end)| end.signed_duration_since(*start))
|
|
.sum::<TimeDelta>().num_seconds()
|
|
}
|
|
|
|
async fn get_since(pool: &deadpool_diesel::sqlite::Pool, start: Option<NaiveDateTime>) -> Result<i64, (StatusCode, Json<Error>)> {
|
|
if let Some(start) = start {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
Ok(get_duration(conn.interact(move |conn| work_periods::table.filter(work_periods::start_time.ge(start)).select((
|
|
work_periods::start_time,
|
|
sql::<sql_types::Timestamp>("COALESCE(end_time, datetime('now', 'localtime'))")
|
|
)).load::<(NaiveDateTime, NaiveDateTime)>(conn)).await.map_err(internal_error)?.map_err(internal_error)?))
|
|
} else {
|
|
Ok(0)
|
|
}
|
|
}
|
|
|
|
fn format_duration(total_seconds: i64) -> String {
|
|
let hours = total_seconds / 3600;
|
|
let minutes = (total_seconds % 3600) / 60;
|
|
|
|
let mut parts = Vec::new();
|
|
|
|
if hours > 0 { parts.push(format!("{}h", hours)); }
|
|
if minutes > 0 { parts.push(format!("{}m", minutes)); }
|
|
parts.join(" ")
|
|
}
|
|
|
|
// basic handler that responds with a static string
|
|
async fn get_metrics(State(pool): State<deadpool_diesel::sqlite::Pool>, seconds: Option<bool>) -> Result<Json<Status>, (StatusCode, Json<Error>)> {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let current_time = chrono::offset::Local::now();
|
|
|
|
let active_sessions = conn.interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select((
|
|
work_periods::start_time,
|
|
sql::<sql_types::Timestamp>("COALESCE(end_time, datetime('now', 'localtime'))")
|
|
)).load::<(NaiveDateTime, NaiveDateTime)>(conn)).await.map_err(internal_error)?.map_err(internal_error)?;
|
|
let mut today = current_time.date_naive();
|
|
|
|
let started_yesterday = active_sessions.iter().any(|(start, _)| start.date() != today);
|
|
let active_duration = get_duration(active_sessions);
|
|
|
|
if active_duration > 0 && started_yesterday {
|
|
today = today - chrono::Duration::days(1); // If we are currently tracking, we haven't started today yet
|
|
}
|
|
let today_start = today.and_hms_opt(0, 0, 0);
|
|
// We start our week on Saturday, the end of the previous week
|
|
let week_start = (today - chrono::Duration::days((current_time.weekday().num_days_from_monday() - 2) as i64)).and_hms_opt(0, 0, 0);
|
|
let month_start = today.with_day(1).unwrap().and_hms_opt(0, 0, 0);
|
|
let year_start = today.with_day(1).unwrap().with_month(1).unwrap().and_hms_opt(0, 0, 0);
|
|
|
|
let transform = if let Some(true) = seconds { |x| format!("{}", x) } else { format_duration };
|
|
Ok(Json(Status {
|
|
today: transform(get_since(&pool, today_start).await?),
|
|
week: transform(get_since(&pool, week_start).await?),
|
|
month: transform(get_since(&pool, month_start).await?),
|
|
year: transform(get_since(&pool, year_start).await?),
|
|
active: transform(active_duration),
|
|
}))
|
|
}
|
|
|
|
async fn stop_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, payload: Option<Json<WorkPeriod>>) -> Result<StatusCode, (StatusCode, Json<Error>)> {
|
|
pool.get().await.map_err(internal_error)?.interact(|conn|
|
|
match payload {
|
|
Some(Json(payload)) => {
|
|
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()).find(payload.id))
|
|
.set(work_periods::end_time.eq(Some(chrono::offset::Local::now().naive_local())))
|
|
.execute(conn)
|
|
}
|
|
None => {
|
|
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()))
|
|
.set(work_periods::end_time.eq(Some(chrono::offset::Local::now().naive_local())))
|
|
.execute(conn)
|
|
}
|
|
}
|
|
).await.map_err(internal_error)?.map_err(internal_error)?;
|
|
Ok(StatusCode::OK)
|
|
}
|
|
|
|
|
|
async fn start_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
|
|
if payload.end_time.is_some() || payload.start_time.is_some() {
|
|
return Err((StatusCode::BAD_REQUEST, Json(Error {
|
|
success: false,
|
|
value: "Timestamps (start or end) cannot be specified when starting tracking.".to_string()
|
|
})));
|
|
}
|
|
let mut payload = payload.clone();
|
|
payload.start_time = Some(chrono::offset::Local::now().naive_local());
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
// insert your application logic here
|
|
let res = conn
|
|
.interact(move |conn| {
|
|
diesel::insert_into(work_periods::table)
|
|
.values(&payload)
|
|
.returning(WorkPeriod::as_returning())
|
|
.get_result(conn)
|
|
})
|
|
.await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
|
|
// this will be converted into a JSON response
|
|
// with a status code of `201 Created`
|
|
Ok((StatusCode::CREATED, Json(res)))
|
|
}
|
|
|
|
async fn add_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let res = conn
|
|
.interact(move |conn| {
|
|
diesel::insert_into(work_periods::table)
|
|
.values(&payload)
|
|
.returning(WorkPeriod::as_returning())
|
|
.get_result(conn)
|
|
})
|
|
.await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
|
|
Ok((StatusCode::CREATED, Json(res)))
|
|
}
|
|
|
|
async fn update_period(State(pool): State<deadpool_diesel::sqlite::Pool>,
|
|
Path(period_id): Path<i32>,
|
|
Json(payload): Json<WorkPeriod>
|
|
) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
|
|
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let res = conn.interact(move |conn| {
|
|
diesel::update(work_periods::table.find(period_id))
|
|
.set(payload)
|
|
.get_result(conn)
|
|
})
|
|
.await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
|
|
Ok((StatusCode::OK, Json(res)))
|
|
}
|
|
|
|
async fn get_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<Json<WorkPeriod>, (StatusCode, Json<Error>)> {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
let res = conn.interact(move |conn|
|
|
work_periods::table.select(WorkPeriod::as_select()).find(period_id).first(conn)
|
|
).await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
|
|
Ok(Json(res))
|
|
}
|
|
|
|
|
|
async fn delete_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
|
|
let conn = pool.get().await.map_err(internal_error)?;
|
|
// insert your application logic here
|
|
let res = conn
|
|
.interact(move |conn| {
|
|
diesel::delete(work_periods::table.find(period_id))
|
|
.get_result(conn)
|
|
})
|
|
.await
|
|
.map_err(internal_error)?
|
|
.map_err(internal_error)?;
|
|
|
|
Ok((StatusCode::OK, Json(res)))
|
|
}
|
|
|
|
|
|
#[derive(Serialize)]
|
|
struct Error {
|
|
success: bool,
|
|
value: String,
|
|
}
|
|
|
|
fn internal_error<E>(err: E) -> (StatusCode, Json<Error>)
|
|
where
|
|
E: std::error::Error,
|
|
{
|
|
(StatusCode::INTERNAL_SERVER_ERROR, Json(Error{success: false, value: err.to_string()}))
|
|
}
|