Compare commits

...

No commits in common. "server" and "cli" have entirely different histories.
server ... cli

16 changed files with 947 additions and 926 deletions

1
.env
View File

@ -1 +0,0 @@
DATABASE_URL=file:database.db

14
.envrc
View File

@ -1,14 +0,0 @@
[ -e ".devenv" ] || rm -rf .devenv
[ -L ".devenv" ] || (mkdir -p "$HOME/.cache/devenv$PWD" && ln -s "$HOME/.cache/devenv$PWD" .devenv)
if ! has nix_direnv_version || ! nix_direnv_version 2.2.1; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.1/direnvrc" "sha256-zelF0vLbEl5uaqrfIzbgNzJWGmLzCmYAkInj/LNxvKs="
fi
watch_file flake.nix
watch_file flake.lock
if ! use flake . --impure
then
echo "devenv could not be built. The devenv environment was not loaded. Make the necessary changes to devenv.nix and hit enter to try again." >&2
fi

1
.gitignore vendored
View File

@ -16,3 +16,4 @@ target/
# End of https://www.toptal.com/developers/gitignore/api/rust
.devenv
.envrc

1171
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,16 @@
[package]
name = "work-timer"
name = "work-timer-cli"
version = "0.1.0"
edition = "2021"
[dependencies]
axum = "0.7.7"
chrono = { version = "0.4.38", features = ["serde"] }
deadpool-diesel = {version = "0.6.1", features = ["sqlite"]}
diesel = { version = "2.2.4", features = ["sqlite", "returning_clauses_for_sqlite_3_35", "chrono"] }
dotenvy = "0.15.7"
libsqlite3-sys = {version="0.30.1", features = ["bundled"]}
clap = "4.5.20"
http = "1.1.0"
reqwest = {version="0.12.9", features = ["json"]}
serde = {version="1.0.214", features = ["derive"]}
serde_json = "1.0.132"
tokio = {version="1.41.0", features = ["full"]}
tracing = "0.1.40"
diesel_migrations = "2.2.0"
tracing-subscriber = {version="0.3.18", features = ["env-filter"]}
[[bin]]
name = "timer"

View File

@ -6,4 +6,4 @@ ADD ./ /app
WORKDIR /app
RUN cargo build --release
CMD [ "/app/target/release/work-timer" ]
CMD [ "/app/target/release/timer" ]

View File

@ -1,10 +0,0 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
filter = { only_tables = ["work_periods"] }
[migrations_directory]
dir = "/home/server/Workspace/Projects/WorkTimer/Server/migrations"

View File

@ -21,6 +21,9 @@
lockFile = ./Cargo.lock;
};
src = ./.;
buildInputs = [ pkgs.openssl ];
nativeBuildInputs = [ pkgs.pkg-config ];
PKG_CONFIG_PATH="${pkgs.openssl.dev}/lib/pkgconfig";
};
in {
packages = {

View File

@ -1 +0,0 @@
DROP TABLE work_periods;

View File

@ -1,7 +0,0 @@
CREATE TABLE work_periods (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
project VARCHAR NOT NULL,
start_time TIMESTAMP NOT NULL,
end_time TIMESTAMP,
description VARCHAR
)

94
src/bin/timer.rs Normal file
View File

@ -0,0 +1,94 @@
use clap::{arg, Command};
use work_timer_cli::commands::{start, stop, edit, status, Settings};
fn cli() -> Command {
Command::new("timer")
.about("A tracker for time spent working")
.subcommand_required(true)
.arg_required_else_help(true)
.allow_external_subcommands(true)
.arg(
arg!(--project <PROJECT> "The project associated with this session")
.short('p')
.default_value("trackbox")
)
.arg(
arg!(--server <URL> "The base URL of the tracking server")
.short('s')
.default_value("https://timer.thomasave.be")
)
.arg(
arg!(--json "Use JSON output")
.short('j')
.default_value("true")
.default_missing_value("false")
)
.subcommand(
Command::new("start")
.about("Start tracking a working session")
.arg(
arg!(<DESCRIPTION> "A description to add for this session")
.required(false)
)
)
.subcommand(
Command::new("stop")
.about("Finish the working session currently being tracked")
.arg(
arg!(<ID> "The ID of the session to stop")
.required(false)
)
)
.subcommand(
Command::new("edit")
.about("Edit a list of sessions")
.arg(
arg!(-n <NUM> "The maximum number of sessions to edit")
.default_value("10")
.required(false)
)
.arg(
arg!(--since <TIMESTAMP> "A timestamp to start from. Can be ISO8601 or 'today' or a weekday")
.required(false)
)
.arg(
arg!(--until <TIMESTAMP> "A timestamp to end at. Can be ISO8601 or 'today' or a weekday")
.required(false)
)
)
.subcommand(
Command::new("status")
.about("Get an overview of recent sessions")
)
}
#[tokio::main]
async fn main() {
let matches = cli().get_matches();
let project = matches.get_one::<String>("project").unwrap();
let url = matches.get_one::<String>("server").unwrap();
let json = matches.get_one::<bool>("json").unwrap();
let settings = Settings { project: project.to_string(), url: url.to_string(), json: *json };
match matches.subcommand() {
Some(("start", sub_matches)) => {
let description = sub_matches.get_one::<String>("DESCRIPTION");
start(settings, description).await.unwrap();
}
Some(("stop", sub_matches)) => {
let id = sub_matches.get_one::<i32>("ID");
stop(settings, id).await.unwrap();
}
Some(("edit", sub_matches)) => {
let since = sub_matches.get_one::<String>("since");
let until = sub_matches.get_one::<String>("until");
let num = sub_matches.get_one::<String>("NUM");
edit(settings, since, until, num).await.unwrap();
}
Some(("status", _)) => {
status(settings).await.unwrap();
}
_ => cli().print_help().unwrap(),
}
}

218
src/commands.rs Normal file
View File

@ -0,0 +1,218 @@
use std::{collections::HashMap, io::Write};
use chrono::Datelike;
use chrono::NaiveDateTime;
use reqwest::Client;
use serde_json::json;
use serde::{Deserialize, Serialize};
use std::{
env::{temp_dir, var},
fs::File,
io::Read,
process::Command,
};
pub struct Settings {
pub url: String,
pub project: String,
pub json: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct WorkPeriod {
pub id: Option<i32>,
pub project: String,
pub start_time: NaiveDateTime,
pub end_time: Option<NaiveDateTime>,
pub description: Option<String>,
}
pub async fn start(settings: Settings, description: Option<&String>) -> Result<(), reqwest::Error> {
let mut map = HashMap::new();
map.insert("project", settings.project);
if let Some(description) = description {
map.insert("description", description.to_string());
}
let client = Client::new();
let response = client.post(settings.url + "/api/tracking")
.json(&map)
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
}
Ok(())
}
pub async fn stop(settings: Settings, id: Option<&i32>) -> Result<(), reqwest::Error> {
let mut map = HashMap::new();
map.insert("project", settings.project);
if let Some(id) = id {
map.insert("id", format!("{}", id));
}
let client = Client::new();
let response = client.delete(settings.url + "/api/tracking")
.json(&map)
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
}
Ok(())
}
fn parse_timestamp(timestamp: &str) -> String {
let now = chrono::Local::now();
let weekdays = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"];
if timestamp == "today" {
return now.format("%Y-%m-%dT00:00:00").to_string();
} else if timestamp == "yesterday" {
return (now - chrono::Duration::days(1)).format("%Y-%m-%dT00:00:00").to_string();
} else {
for (i, weekday) in weekdays.iter().enumerate() {
if timestamp == *weekday {
let days = (now.weekday().num_days_from_monday() as i64 - i as i64) % 7;
return (now - chrono::Duration::days(days)).format("%Y-%m-%dT00:00:00").to_string();
}
}
}
return timestamp.to_string();
}
fn parse_periods(body: String) -> Result<Vec<WorkPeriod>, serde_json::Error> {
let periods: Vec<WorkPeriod> = serde_json::from_str(&body)?;
Ok(periods)
}
fn to_json(periods: Vec<WorkPeriod>) -> Result<String, serde_json::Error> {
let json = json!(periods);
Ok(serde_json::to_string_pretty(&json)?)
}
fn edit_periods(periods: Vec<WorkPeriod>) -> Result<Vec<WorkPeriod>, std::io::Error> {
let content = to_json(periods)?;
let editor = var("EDITOR").unwrap_or("vi".to_string());
let mut file_path = temp_dir();
file_path.push("Periods.json");
let mut file = File::create(&file_path).expect("Could not create file");
file.write_all(content.as_bytes())?;
Command::new(editor)
.arg(&file_path)
.status()
.expect("Something went wrong");
let mut editable = String::new();
File::open(file_path)
.expect("Could not open file")
.read_to_string(&mut editable)?;
let periods: Vec<WorkPeriod> = serde_json::from_str(&editable).expect("Could not parse JSON");
Ok(periods)
}
pub async fn update_period(settings: &Settings, period: WorkPeriod) -> Result<(), reqwest::Error> {
let client = Client::new();
let response = client.put(settings.url.to_string() + "/api/history/" + &period.id.unwrap().to_string())
.json(&period)
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
}
Ok(())
}
pub async fn add_period(settings: &Settings, period: &mut WorkPeriod) -> Result<(), reqwest::Error> {
let client = Client::new();
period.id = None;
let response = client.post(settings.url.to_string() + "/api/history")
.json(&period)
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
}
Ok(())
}
pub async fn delete_period(settings: &Settings, id: i32) -> Result<(), reqwest::Error> {
let client = Client::new();
let response = client.delete(settings.url.to_string() + "/api/history/" + id.to_string().as_str())
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
}
Ok(())
}
pub async fn edit(settings: Settings, since: Option<&String>, until: Option<&String>, num: Option<&String>) -> Result<(), reqwest::Error> {
let mut params = vec![
("project", settings.project.to_owned()),
];
if let Some(since) = since {
params.push(("since", parse_timestamp(since)));
}
if let Some(until) = until {
params.push(("until", parse_timestamp(until)));
}
if let Some(num) = num {
params.push(("count", num.to_string()));
}
let client = Client::new();
let url = reqwest::Url::parse_with_params((settings.url.to_owned() + "/api/history").as_str(), &params).unwrap();
let response = client.get(url)
.send()
.await?;
if !response.status().is_success() {
println!("{:?}", response.text().await);
} else {
let body = response.text().await.unwrap();
let periods = parse_periods(body).unwrap();
let mut ids = periods.iter().map(|p| p.id.unwrap()).collect::<Vec<i32>>();
let res = edit_periods(periods).unwrap();
for period in res {
let pos = ids.iter().position(|&x| x == period.id.unwrap());
if let Some(pos) = pos {
ids.remove(pos);
} else {
add_period(&settings, &mut period.clone()).await.unwrap();
}
update_period(&settings, period).await.unwrap();
}
for id in ids {
delete_period(&settings, id).await.unwrap();
}
}
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Status {
pub today: String,
pub week: String,
pub month: String,
pub year: String,
pub active: String,
}
pub async fn status(settings: Settings) -> Result<(), reqwest::Error> {
let client = Client::new();
let response = client.get(settings.url.to_string() + "/")
.send()
.await?;
let response_text = response.text().await.unwrap();
let status: Status = serde_json::from_str(&response_text).unwrap();
println!("{}", serde_json::to_string_pretty(&status).unwrap());
Ok(())
}

1
src/lib.rs Normal file
View File

@ -0,0 +1 @@
pub mod commands;

View File

@ -1,297 +0,0 @@
use chrono::{NaiveDateTime, TimeDelta, TimeZone, Datelike};
use diesel::{dsl::sql, prelude::*, sql_types};
use self::models::*;
use std::net::SocketAddr;
use std::env;
use serde::{Deserialize, Serialize};
use axum::{
extract::{State, Path, Query},
routing::{get, post, delete, put},
http::StatusCode,
Json, Router,
};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use crate::schema::work_periods;
pub mod models;
pub mod schema;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/");
#[tokio::main]
async fn main() {
// initialize tracing
tracing_subscriber::fmt::init();
let db_url = env::var("DATABASE_URL").unwrap();
let manager = deadpool_diesel::sqlite::Manager::new(db_url, deadpool_diesel::Runtime::Tokio1);
let pool = deadpool_diesel::sqlite::Pool::builder(manager)
.build()
.unwrap();
// Run migrations
{
let conn = pool.get().await.unwrap();
conn.interact(|conn| conn.run_pending_migrations(MIGRATIONS).map(|_| ()))
.await
.unwrap()
.unwrap();
}
// build our application with a route
let app = Router::new()
// `GET /` goes to `root`
.route("/", get(|state| get_metrics(state, None)))
.route("/api/metrics", get(|state| get_metrics(state, Some(true))))
.route("/api/history", post(add_period))
.route("/api/history", get(get_history))
.route("/api/history/:id", put(update_period))
.route("/api/history/:id", delete(delete_period))
.route("/api/history/:id", get(get_period))
.route("/api/tracking", get(get_tracking))
.route("/api/tracking", post(start_tracking))
.route("/api/tracking", delete(stop_tracking))
.with_state(pool);
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
tracing::debug!("listening on {addr}");
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
#[derive(Deserialize)]
struct HistoryQuery {
count: Option<i64>,
since: Option<chrono::NaiveDateTime>,
until: Option<chrono::NaiveDateTime>,
project: Option<String>,
}
async fn get_history(
State(pool): State<deadpool_diesel::sqlite::Pool>,
query: Query<HistoryQuery>,
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
let count = query.count.unwrap_or(10);
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn|
{
let mut selection = work_periods::table.select(WorkPeriod::as_select()).into_boxed().order(work_periods::start_time.desc());
if query.since.is_some() || query.until.is_some() {
let start = query.since.unwrap_or_else(|| chrono::Utc.with_ymd_and_hms(1970, 1, 1, 0, 0, 0).unwrap().naive_local());
let end = query.until.unwrap_or_else(|| chrono::offset::Local::now().naive_local());
selection = selection.filter(work_periods::start_time.between(start, end));
}
if query.project.is_some() {
selection = selection.filter(work_periods::project.eq(query.project.as_ref().unwrap()));
}
selection.limit(count).load(conn)
}
).await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
async fn get_tracking(
State(pool): State<deadpool_diesel::sqlite::Pool>,
) -> Result<Json<Vec<WorkPeriod>>, (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn
.interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select(WorkPeriod::as_select()).load(conn))
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
#[derive(Serialize)]
struct Status {
today: String,
week: String,
month: String,
year: String,
active: String,
}
fn get_duration(periods: Vec<(NaiveDateTime, NaiveDateTime)>) -> i64 {
periods.iter()
.map(|(start, end)| end.signed_duration_since(*start))
.sum::<TimeDelta>().num_seconds()
}
async fn get_since(pool: &deadpool_diesel::sqlite::Pool, start: Option<NaiveDateTime>) -> Result<i64, (StatusCode, Json<Error>)> {
if let Some(start) = start {
let conn = pool.get().await.map_err(internal_error)?;
Ok(get_duration(conn.interact(move |conn| work_periods::table.filter(work_periods::start_time.ge(start)).select((
work_periods::start_time,
sql::<sql_types::Timestamp>("COALESCE(end_time, datetime('now', 'localtime'))")
)).load::<(NaiveDateTime, NaiveDateTime)>(conn)).await.map_err(internal_error)?.map_err(internal_error)?))
} else {
Ok(0)
}
}
fn format_duration(total_seconds: i64) -> String {
let hours = total_seconds / 3600;
let minutes = (total_seconds % 3600) / 60;
let mut parts = Vec::new();
if hours > 0 { parts.push(format!("{}h", hours)); }
if minutes > 0 { parts.push(format!("{}m", minutes)); }
parts.join(" ")
}
// basic handler that responds with a static string
async fn get_metrics(State(pool): State<deadpool_diesel::sqlite::Pool>, seconds: Option<bool>) -> Result<Json<Status>, (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let current_time = chrono::offset::Local::now();
let active_sessions = conn.interact(|conn| work_periods::table.filter(work_periods::end_time.is_null()).select((
work_periods::start_time,
sql::<sql_types::Timestamp>("COALESCE(end_time, datetime('now', 'localtime'))")
)).load::<(NaiveDateTime, NaiveDateTime)>(conn)).await.map_err(internal_error)?.map_err(internal_error)?;
let mut today = current_time.date_naive();
let started_yesterday = active_sessions.iter().any(|(start, _)| start.date() != today);
let active_duration = get_duration(active_sessions);
if active_duration > 0 && started_yesterday {
today = today - chrono::Duration::days(1); // If we are currently tracking, we haven't started today yet
}
let today_start = today.and_hms_opt(0, 0, 0);
let week_start = (today - chrono::Duration::days(current_time.weekday().num_days_from_monday() as i64)).and_hms_opt(0, 0, 0);
let month_start = today.with_day(1).unwrap().and_hms_opt(0, 0, 0);
let year_start = today.with_day(1).unwrap().with_month(1).unwrap().and_hms_opt(0, 0, 0);
let transform = if let Some(true) = seconds { |x| format!("{}", x) } else { format_duration };
Ok(Json(Status {
today: transform(get_since(&pool, today_start).await?),
week: transform(get_since(&pool, week_start).await?),
month: transform(get_since(&pool, month_start).await?),
year: transform(get_since(&pool, year_start).await?),
active: transform(active_duration),
}))
}
async fn stop_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, payload: Option<Json<WorkPeriod>>) -> Result<StatusCode, (StatusCode, Json<Error>)> {
pool.get().await.map_err(internal_error)?.interact(|conn|
match payload {
Some(Json(payload)) => {
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()).find(payload.id))
.set(work_periods::end_time.eq(Some(chrono::offset::Local::now().naive_local())))
.execute(conn)
}
None => {
diesel::update(work_periods::table.filter(work_periods::end_time.is_null()))
.set(work_periods::end_time.eq(Some(chrono::offset::Local::now().naive_local())))
.execute(conn)
}
}
).await.map_err(internal_error)?.map_err(internal_error)?;
Ok(StatusCode::OK)
}
async fn start_tracking(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
if payload.end_time.is_some() || payload.start_time.is_some() {
return Err((StatusCode::BAD_REQUEST, Json(Error {
success: false,
value: "Timestamps (start or end) cannot be specified when starting tracking.".to_string()
})));
}
let mut payload = payload.clone();
payload.start_time = Some(chrono::offset::Local::now().naive_local());
let conn = pool.get().await.map_err(internal_error)?;
// insert your application logic here
let res = conn
.interact(move |conn| {
diesel::insert_into(work_periods::table)
.values(&payload)
.returning(WorkPeriod::as_returning())
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
// this will be converted into a JSON response
// with a status code of `201 Created`
Ok((StatusCode::CREATED, Json(res)))
}
async fn add_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Json(payload): Json<NewPeriod>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn
.interact(move |conn| {
diesel::insert_into(work_periods::table)
.values(&payload)
.returning(WorkPeriod::as_returning())
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok((StatusCode::CREATED, Json(res)))
}
async fn update_period(State(pool): State<deadpool_diesel::sqlite::Pool>,
Path(period_id): Path<i32>,
Json(payload): Json<WorkPeriod>
) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn| {
diesel::update(work_periods::table.find(period_id))
.set(payload)
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok((StatusCode::OK, Json(res)))
}
async fn get_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<Json<WorkPeriod>, (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
let res = conn.interact(move |conn|
work_periods::table.select(WorkPeriod::as_select()).find(period_id).first(conn)
).await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok(Json(res))
}
async fn delete_period(State(pool): State<deadpool_diesel::sqlite::Pool>, Path(period_id): Path<i32>) -> Result<(StatusCode, Json<WorkPeriod>), (StatusCode, Json<Error>)> {
let conn = pool.get().await.map_err(internal_error)?;
// insert your application logic here
let res = conn
.interact(move |conn| {
diesel::delete(work_periods::table.find(period_id))
.get_result(conn)
})
.await
.map_err(internal_error)?
.map_err(internal_error)?;
Ok((StatusCode::OK, Json(res)))
}
#[derive(Serialize)]
struct Error {
success: bool,
value: String,
}
fn internal_error<E>(err: E) -> (StatusCode, Json<Error>)
where
E: std::error::Error,
{
(StatusCode::INTERNAL_SERVER_ERROR, Json(Error{success: false, value: err.to_string()}))
}

View File

@ -1,25 +0,0 @@
use diesel::prelude::*;
use serde::{Deserialize, Serialize};
use super::schema::work_periods;
use chrono::NaiveDateTime;
#[derive(Queryable, Debug, Serialize, Deserialize, Selectable, AsChangeset)]
#[diesel(table_name = crate::schema::work_periods)]
#[diesel(check_for_backend(diesel::sqlite::Sqlite))]
pub struct WorkPeriod {
pub id: i32,
pub project: String,
pub start_time: NaiveDateTime,
pub end_time: Option<NaiveDateTime>,
pub description: Option<String>,
}
#[derive(Insertable, Serialize, Deserialize, Debug, Clone)]
#[diesel(table_name = work_periods)]
pub struct NewPeriod {
pub project: String,
pub start_time: Option<NaiveDateTime>,
pub end_time: Option<NaiveDateTime>,
pub description: Option<String>,
}

View File

@ -1,11 +0,0 @@
// @generated automatically by Diesel CLI.
diesel::table! {
work_periods (id) {
id -> Integer,
project -> Text,
start_time -> Timestamp,
end_time -> Nullable<Timestamp>,
description -> Nullable<Text>,
}
}