Start implementing actual methods

This commit is contained in:
Thomas Avé 2024-11-03 15:11:53 +01:00
parent 202ce82c9d
commit 68cc867d1d
4 changed files with 162 additions and 14 deletions

View File

@ -2,6 +2,6 @@ CREATE TABLE work_periods (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
project VARCHAR NOT NULL, project VARCHAR NOT NULL,
start_time TIMESTAMP NOT NULL, start_time TIMESTAMP NOT NULL,
end_time TIMESTAMP NOT NULL, end_time TIMESTAMP,
description VARCHAR description VARCHAR
) )

View File

@ -1,19 +1,22 @@
use chrono::TimeZone;
use diesel::prelude::*; use diesel::prelude::*;
use dotenvy::dotenv; use dotenvy::dotenv;
use self::models::*; use self::models::*;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::env; use std::env;
use serde::{Deserialize, Serialize};
use axum::{ use axum::{
extract::State, extract::{State, Path, Query},
routing::{get, post}, routing::{get, post, delete, put},
http::StatusCode, http::StatusCode,
Json, Router, Json, Router,
}; };
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); use crate::schema::work_periods;
pub mod models; pub mod models;
pub mod schema; pub mod schema;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/");
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
@ -39,7 +42,16 @@ async fn main() {
let app = Router::new() let app = Router::new()
// `GET /` goes to `root` // `GET /` goes to `root`
.route("/", get(root)) .route("/", get(root))
.route("/history", post(add_period)) .route("/history", post(add_period))
.route("/history", get(get_history))
.route("/history/:id", put(update_period))
.route("/history/:id", delete(delete_period))
.route("/history/:id", get(get_period))
.route("/tracking", get(get_tracking))
.route("/tracking", post(start_tracking))
.route("/tracking", delete(stop_tracking))
.with_state(pool); .with_state(pool);
// run our app with hyper, listening globally on port 3000 // run our app with hyper, listening globally on port 3000
@ -49,14 +61,79 @@ async fn main() {
axum::serve(listener, app).await.unwrap(); axum::serve(listener, app).await.unwrap();
} }
#[derive(Deserialize)]
struct HistoryQuery {
count: Option<i64>,
since: Option<chrono::NaiveDateTime>,
until: Option<chrono::NaiveDateTime>,
}
async fn get_history(
State(pool): State<deadpool_diesel::sqlite::Pool>,
query: Query<HistoryQuery>,
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
let count = query.count.unwrap_or(10);
let start = query.since.unwrap_or_else(|| chrono::Utc.with_ymd_and_hms(1970, 1, 1, 0, 0, 0).unwrap().naive_utc());
let end = query.until.unwrap_or_else(|| chrono::Utc::now().naive_utc());
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn|
work_periods::table.select(WorkPeriod::as_select())
.filter(work_periods::start_time.between(start, end))
.order(work_periods::start_time.desc())
.limit(count)
.load(conn)
).await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
async fn get_tracking(
State(pool): State<deadpool_diesel::sqlite::Pool>,
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn
.interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select(WorkPeriod::as_select()).load(conn))
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
// basic handler that responds with a static string // basic handler that responds with a static string
async fn root() -> &'static str { async fn root() -> &'static str {
"Hello, World!" "Hello, World!"
} }
async fn add_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, String)> { async fn stop_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, payload: Option<Json<WorkPeriod>>) -> Result<StatusCode, (StatusCode, Json<Error>)> {
use crate::schema::work_periods; pool.get().await.map_err(internal_error)?.interact(|conn|
match payload {
Some(Json(payload)) => {
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()).find(payload.id))
.set(work_periods::end_time.eq(Some(chrono::Utc::now().naive_utc())))
.execute(conn)
}
None => {
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()))
.set(work_periods::end_time.eq(Some(chrono::Utc::now().naive_utc())))
.execute(conn)
}
}
).await.map_err(internal_error)?.map_err(internal_error)?;
Ok(StatusCode::OK)
}
async fn start_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
if payload.end_time.is_some() || payload.start_time.is_some() {
return Err((StatusCode::BAD_REQUEST, Json(Error {
success: false,
value: "Timestamps (start or end) cannot be specified when starting tracking.".to_string()
})));
}
let mut payload = payload.clone();
payload.start_time = Some(chrono::Utc::now().naive_utc());
let conn = pool.get().await.map_err(internal_error)?; let conn = pool.get().await.map_err(internal_error)?;
// insert your application logic here // insert your application logic here
let res = conn let res = conn
@ -75,9 +152,80 @@ async fn add_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payl
Ok((StatusCode::CREATED, Json(res))) Ok((StatusCode::CREATED, Json(res)))
} }
fn internal_error<E>(err: E) -> (StatusCode, String) async fn add_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
// insert your application logic here
let res = conn
.interact(move |conn| {
diesel::insert_into(work_periods::table)
.values(&payload)
.returning(WorkPeriod::as_returning())
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
// this will be converted into a JSON response
// with a status code of `201 Created`
Ok((StatusCode::CREATED, Json(res)))
}
async fn update_period(State(pool): State<deadpool_diesel::sqlite::Pool>,
Path(period_id): Path<i32>,
Json(payload): Json<WorkPeriod>
) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn| {
diesel::update(work_periods::table.find(period_id))
.set(payload)
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok((StatusCode::OK, Json(res)))
}
async fn get_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<Json<WorkPeriod>, (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn|
work_periods::table.select(WorkPeriod::as_select()).find(period_id).first(conn)
).await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
async fn delete_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
// insert your application logic here
let res = conn
.interact(move |conn| {
diesel::delete(work_periods::table.find(period_id))
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok((StatusCode::OK, Json(res)))
}
#[derive(Serialize)]
struct Error {
success: bool,
value: String,
}
fn internal_error<E>(err: E) -> (StatusCode, Json<Error>)
where where
E: std::error::Error, E: std::error::Error,
{ {
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) (StatusCode::INTERNAL_SERVER_ERROR, Json(Error{success: false, value: err.to_string()}))
} }

View File

@ -3,23 +3,23 @@ use serde::{Deserialize, Serialize};
use super::schema::work_periods; use super::schema::work_periods;
use chrono::NaiveDateTime; use chrono::NaiveDateTime;
#[derive(Queryable, Debug, Serialize, Deserialize, Selectable)] #[derive(Queryable, Debug, Serialize, Deserialize, Selectable, AsChangeset)]
#[diesel(table_name = crate::schema::work_periods)] #[diesel(table_name = crate::schema::work_periods)]
#[diesel(check_for_backend(diesel::sqlite::Sqlite))] #[diesel(check_for_backend(diesel::sqlite::Sqlite))]
pub struct WorkPeriod { pub struct WorkPeriod {
pub id: i32, pub id: i32,
pub project: String, pub project: String,
pub start_time: NaiveDateTime, pub start_time: NaiveDateTime,
pub end_time: NaiveDateTime, pub end_time: Option<NaiveDateTime>,
pub description: Option<String>, pub description: Option<String>,
} }
#[derive(Insertable, Serialize, Deserialize, Debug)] #[derive(Insertable, Serialize, Deserialize, Debug, Clone)]
#[diesel(table_name = work_periods)] #[diesel(table_name = work_periods)]
pub struct NewPeriod { pub struct NewPeriod {
pub project: String, pub project: String,
pub start_time: NaiveDateTime, pub start_time: Option<NaiveDateTime>,
pub end_time: NaiveDateTime, pub end_time: Option<NaiveDateTime>,
pub description: Option<String>, pub description: Option<String>,
} }

View File

@ -5,7 +5,7 @@ diesel::table! {
id -> Integer, id -> Integer,
project -> Text, project -> Text,
start_time -> Timestamp, start_time -> Timestamp,
end_time -> Timestamp, end_time -> Nullable<Timestamp>,
description -> Nullable<Text>, description -> Nullable<Text>,
} }
} }