diff --git a/migrations/2024-11-02-143917_work_periods/up.sql b/migrations/2024-11-02-143917_work_periods/up.sql index 7d18974..81ff5ec 100644 --- a/migrations/2024-11-02-143917_work_periods/up.sql +++ b/migrations/2024-11-02-143917_work_periods/up.sql @@ -2,6 +2,6 @@ CREATE TABLE work_periods ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, project VARCHAR NOT NULL, start_time TIMESTAMP NOT NULL, - end_time TIMESTAMP NOT NULL, + end_time TIMESTAMP, description VARCHAR ) diff --git a/src/main.rs b/src/main.rs index 8171e8a..0478d43 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,19 +1,22 @@ +use chrono::TimeZone; use diesel::prelude::*; use dotenvy::dotenv; use self::models::*; use std::net::SocketAddr; use std::env; +use serde::{Deserialize, Serialize}; use axum::{ - extract::State, - routing::{get, post}, + extract::{State, Path, Query}, + routing::{get, post, delete, put}, http::StatusCode, Json, Router, }; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; -pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); +use crate::schema::work_periods; pub mod models; pub mod schema; +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); #[tokio::main] async fn main() { @@ -39,7 +42,16 @@ async fn main() { let app = Router::new() // `GET /` goes to `root` .route("/", get(root)) + .route("/history", post(add_period)) + .route("/history", get(get_history)) + .route("/history/:id", put(update_period)) + .route("/history/:id", delete(delete_period)) + .route("/history/:id", get(get_period)) + + .route("/tracking", get(get_tracking)) + .route("/tracking", post(start_tracking)) + .route("/tracking", delete(stop_tracking)) .with_state(pool); // run our app with hyper, listening globally on port 3000 @@ -49,14 +61,79 @@ async fn main() { axum::serve(listener, app).await.unwrap(); } +#[derive(Deserialize)] +struct HistoryQuery { + count: Option, + since: Option, + until: Option, +} + +async fn get_history( + State(pool): State, + query: Query, +) -> Result>, (StatusCode, Json)> { + let count = query.count.unwrap_or(10); + let start = query.since.unwrap_or_else(|| chrono::Utc.with_ymd_and_hms(1970, 1, 1, 0, 0, 0).unwrap().naive_utc()); + let end = query.until.unwrap_or_else(|| chrono::Utc::now().naive_utc()); + + let conn = pool.get().await.map_err(internal_error)?; + let res = conn.interact(move |conn| + work_periods::table.select(WorkPeriod::as_select()) + .filter(work_periods::start_time.between(start, end)) + .order(work_periods::start_time.desc()) + .limit(count) + .load(conn) + ).await + .map_err(internal_error)? + .map_err(internal_error)?; + Ok(Json(res)) +} + +async fn get_tracking( + State(pool): State, +) -> Result>, (StatusCode, Json)> { + let conn = pool.get().await.map_err(internal_error)?; + let res = conn + .interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select(WorkPeriod::as_select()).load(conn)) + .await + .map_err(internal_error)? + .map_err(internal_error)?; + Ok(Json(res)) +} + // basic handler that responds with a static string async fn root() -> &'static str { "Hello, World!" } -async fn add_period(State(pool): State, Json(payload): Json) -> Result<(StatusCode, Json), (StatusCode, String)> { - use crate::schema::work_periods; +async fn stop_tracking(State(pool): State, payload: Option>) -> Result)> { + pool.get().await.map_err(internal_error)?.interact(|conn| + match payload { + Some(Json(payload)) => { + diesel::update(work_periods::table.filter(work_periods::end_time.is_null()).find(payload.id)) + .set(work_periods::end_time.eq(Some(chrono::Utc::now().naive_utc()))) + .execute(conn) + } + None => { + diesel::update(work_periods::table.filter(work_periods::end_time.is_null())) + .set(work_periods::end_time.eq(Some(chrono::Utc::now().naive_utc()))) + .execute(conn) + } + } + ).await.map_err(internal_error)?.map_err(internal_error)?; + Ok(StatusCode::OK) +} + +async fn start_tracking(State(pool): State, Json(payload): Json) -> Result<(StatusCode, Json), (StatusCode, Json)> { + if payload.end_time.is_some() || payload.start_time.is_some() { + return Err((StatusCode::BAD_REQUEST, Json(Error { + success: false, + value: "Timestamps (start or end) cannot be specified when starting tracking.".to_string() + }))); + } + let mut payload = payload.clone(); + payload.start_time = Some(chrono::Utc::now().naive_utc()); let conn = pool.get().await.map_err(internal_error)?; // insert your application logic here let res = conn @@ -75,9 +152,80 @@ async fn add_period(State(pool): State, Json(payl Ok((StatusCode::CREATED, Json(res))) } -fn internal_error(err: E) -> (StatusCode, String) +async fn add_period(State(pool): State, Json(payload): Json) -> Result<(StatusCode, Json), (StatusCode, Json)> { + let conn = pool.get().await.map_err(internal_error)?; + // insert your application logic here + let res = conn + .interact(move |conn| { + diesel::insert_into(work_periods::table) + .values(&payload) + .returning(WorkPeriod::as_returning()) + .get_result(conn) + }) + .await + .map_err(internal_error)? + .map_err(internal_error)?; + + // this will be converted into a JSON response + // with a status code of `201 Created` + Ok((StatusCode::CREATED, Json(res))) +} + +async fn update_period(State(pool): State, + Path(period_id): Path, + Json(payload): Json + ) -> Result<(StatusCode, Json), (StatusCode, Json)> { + + let conn = pool.get().await.map_err(internal_error)?; + let res = conn.interact(move |conn| { + diesel::update(work_periods::table.find(period_id)) + .set(payload) + .get_result(conn) + }) + .await + .map_err(internal_error)? + .map_err(internal_error)?; + + Ok((StatusCode::OK, Json(res))) +} + +async fn get_period(State(pool): State, Path(period_id): Path) -> Result, (StatusCode, Json)> { + let conn = pool.get().await.map_err(internal_error)?; + let res = conn.interact(move |conn| + work_periods::table.select(WorkPeriod::as_select()).find(period_id).first(conn) + ).await + .map_err(internal_error)? + .map_err(internal_error)?; + + Ok(Json(res)) +} + + +async fn delete_period(State(pool): State, Path(period_id): Path) -> Result<(StatusCode, Json), (StatusCode, Json)> { + let conn = pool.get().await.map_err(internal_error)?; + // insert your application logic here + let res = conn + .interact(move |conn| { + diesel::delete(work_periods::table.find(period_id)) + .get_result(conn) + }) + .await + .map_err(internal_error)? + .map_err(internal_error)?; + + Ok((StatusCode::OK, Json(res))) +} + + +#[derive(Serialize)] +struct Error { + success: bool, + value: String, +} + +fn internal_error(err: E) -> (StatusCode, Json) where E: std::error::Error, { - (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) + (StatusCode::INTERNAL_SERVER_ERROR, Json(Error{success: false, value: err.to_string()})) } diff --git a/src/models.rs b/src/models.rs index ca9237e..31219d3 100644 --- a/src/models.rs +++ b/src/models.rs @@ -3,23 +3,23 @@ use serde::{Deserialize, Serialize}; use super::schema::work_periods; use chrono::NaiveDateTime; -#[derive(Queryable, Debug, Serialize, Deserialize, Selectable)] +#[derive(Queryable, Debug, Serialize, Deserialize, Selectable, AsChangeset)] #[diesel(table_name = crate::schema::work_periods)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct WorkPeriod { pub id: i32, pub project: String, pub start_time: NaiveDateTime, - pub end_time: NaiveDateTime, + pub end_time: Option, pub description: Option, } -#[derive(Insertable, Serialize, Deserialize, Debug)] +#[derive(Insertable, Serialize, Deserialize, Debug, Clone)] #[diesel(table_name = work_periods)] pub struct NewPeriod { pub project: String, - pub start_time: NaiveDateTime, - pub end_time: NaiveDateTime, + pub start_time: Option, + pub end_time: Option, pub description: Option, } diff --git a/src/schema.rs b/src/schema.rs index 73eabfd..45230ac 100644 --- a/src/schema.rs +++ b/src/schema.rs @@ -5,7 +5,7 @@ diesel::table! { id -> Integer, project -> Text, start_time -> Timestamp, - end_time -> Timestamp, + end_time -> Nullable, description -> Nullable, } }