jrnl: simplify (or complexify, if you dont like iterators and zero-copy) the parsing
This commit is contained in:
parent
28bebd5aaa
commit
0bf5ed0c76
5 changed files with 52 additions and 56 deletions
7
Cargo.lock
generated
7
Cargo.lock
generated
|
@ -412,6 +412,7 @@ dependencies = [
|
||||||
"owo-colors",
|
"owo-colors",
|
||||||
"petgraph",
|
"petgraph",
|
||||||
"ratatui",
|
"ratatui",
|
||||||
|
"temp-file",
|
||||||
"termsize",
|
"termsize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -799,6 +800,12 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "temp-file"
|
||||||
|
version = "0.1.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7f210bda61d003f311d95611d1b68361df8fe8e732c3609f945441bde881321d"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "termion"
|
name = "termion"
|
||||||
version = "1.5.6"
|
version = "1.5.6"
|
||||||
|
|
|
@ -13,4 +13,5 @@ markdown = "0.3.0"
|
||||||
owo-colors = "4.0.0"
|
owo-colors = "4.0.0"
|
||||||
petgraph = "0.6.4"
|
petgraph = "0.6.4"
|
||||||
ratatui = "0.26.2"
|
ratatui = "0.26.2"
|
||||||
|
temp-file = "0.1.8"
|
||||||
termsize = "0.1.6"
|
termsize = "0.1.6"
|
||||||
|
|
|
@ -5,16 +5,20 @@ use crate::md::Doc;
|
||||||
|
|
||||||
pub fn list_entries(path: PathBuf) {
|
pub fn list_entries(path: PathBuf) {
|
||||||
let file = fs::read_to_string(path).unwrap();
|
let file = fs::read_to_string(path).unwrap();
|
||||||
let doc = Doc::new(&file);
|
|
||||||
|
|
||||||
for (i, entry) in doc.entries.into_iter().enumerate() {
|
if let Some(doc) = Doc::new(&file) {
|
||||||
let n = format!("{:>2}", i + 1);
|
for (i, entry) in doc.entries.into_iter().enumerate() {
|
||||||
let r = format!(". {}", entry.title,);
|
let n = format!("{:>2}", i + 1);
|
||||||
let l = format!(" {} ", crate::utils::format_datetime(entry.timestamp));
|
let r = format!(". {}", entry.title,);
|
||||||
let termsize::Size { cols, .. } = termsize::get().unwrap();
|
let l = format!(" {} ", crate::utils::format_datetime(entry.timestamp));
|
||||||
|
let termsize::Size { cols, .. } = termsize::get().unwrap();
|
||||||
|
|
||||||
let padding = " ".repeat(cols as usize - (n.len() + r.len() + l.len()));
|
let padding = " ".repeat(cols as usize - (n.len() + r.len() + l.len()));
|
||||||
|
|
||||||
println!("{}{r}{padding}{}", n.cyan(), l.white())
|
println!("{}{r}{padding}{}", n.cyan(), l.white())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
eprintln!("Parsing error...");
|
||||||
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
#![feature(iter_collect_into)]
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use std::{fs, path::PathBuf};
|
use std::{fs, path::PathBuf};
|
||||||
|
|
||||||
|
@ -34,8 +35,7 @@ fn main() {
|
||||||
// TODO: handle btter
|
// TODO: handle btter
|
||||||
let file = fs::read_to_string(cli.s10e_jrnl_file_loc).unwrap();
|
let file = fs::read_to_string(cli.s10e_jrnl_file_loc).unwrap();
|
||||||
|
|
||||||
let doc = Doc::new(&file);
|
let doc = dbg!(Doc::new(&file));
|
||||||
dbg!(doc);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,59 +1,43 @@
|
||||||
use chrono::{DateTime, FixedOffset};
|
use chrono::{DateTime, FixedOffset};
|
||||||
use markdown::{Block, Span};
|
use markdown::{Block, Span};
|
||||||
|
use std::convert::identity;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Doc {
|
pub struct Doc<'src> {
|
||||||
pub title: Vec<Span>,
|
pub entries: Vec<Entry<'src>>,
|
||||||
pub entries: Vec<Entry>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Doc {
|
impl<'src> Doc<'src> {
|
||||||
pub fn new(f: &str) -> Self {
|
// TODO: better parsing errors?
|
||||||
let mut entries = Vec::new();
|
pub fn new(f: &'src str) -> Option<Self> {
|
||||||
let mut doc_title = vec![Span::Text("Journal".to_owned())];
|
let entries = f
|
||||||
let toks = markdown::tokenize(f);
|
.split("\n## ")
|
||||||
let mut current = None;
|
.map(|s| s.split_once("\n"))
|
||||||
|
.skip(1)
|
||||||
for tok in toks {
|
.filter_map(identity)
|
||||||
match tok {
|
.map(|(title, content)| (title.split_once(": "), content))
|
||||||
Block::Header(title, 1) => doc_title = title,
|
.map(|(title, content)| {
|
||||||
Block::Header(entry_title, 2) => {
|
if let Some((ts, title)) = title {
|
||||||
if let Some(cur) = current.take() {
|
Some(Entry {
|
||||||
entries.push(cur);
|
timestamp: DateTime::parse_from_rfc3339(ts).unwrap(),
|
||||||
}
|
title,
|
||||||
|
content: content.trim_matches('\n'),
|
||||||
let Some(Span::Text(title)) = entry_title.first() else {
|
})
|
||||||
eprintln!("Error: Titles should be text.");
|
} else {
|
||||||
std::process::exit(1);
|
None
|
||||||
};
|
|
||||||
|
|
||||||
let (ts, entry_title) = title.split_once(": ").unwrap();
|
|
||||||
let ts = DateTime::parse_from_rfc3339(ts).unwrap();
|
|
||||||
// let ts = PrimitiveDateTime::parse(ts, &DT_FORMAT).unwrap();
|
|
||||||
|
|
||||||
current = Some(Entry {
|
|
||||||
timestamp: ts,
|
|
||||||
title: entry_title.to_owned(),
|
|
||||||
content: Vec::new(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
other => current.as_mut().unwrap().content.push(other),
|
})
|
||||||
}
|
.collect::<Vec<_>>();
|
||||||
}
|
|
||||||
if let Some(cur) = current {
|
|
||||||
entries.push(cur);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
entries.iter().all(|it| it.is_some()).then_some(Self {
|
||||||
title: doc_title,
|
entries: entries.into_iter().filter_map(identity).collect(),
|
||||||
entries,
|
})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Entry {
|
pub struct Entry<'src> {
|
||||||
pub timestamp: DateTime<FixedOffset>,
|
pub timestamp: DateTime<FixedOffset>,
|
||||||
pub title: String,
|
pub title: &'src str,
|
||||||
pub content: Vec<Block>,
|
pub content: &'src str,
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue