WIP: image processing library (or libraries?) #12

Draft
schrottkatze wants to merge 15 commits from schrottkatze/iowo:proc-libs into main
20 changed files with 655 additions and 58 deletions

136
Cargo.lock generated
View file

@ -66,12 +66,22 @@ dependencies = [
"eval",
"ir",
"owo-colors",
"prowocessing",
"ron",
"serde",
"serde_json",
"time",
]
[[package]]
name = "approx"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
dependencies = [
"num-traits",
]
[[package]]
name = "ariadne"
version = "0.4.0"
@ -293,6 +303,12 @@ dependencies = [
"zune-inflate",
]
[[package]]
name = "fast-srgb8"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd2e7510819d6fbf51a5545c8f922716ecfb14df168a3242f7d33e0239efe6a1"
[[package]]
name = "fdeflate"
version = "0.3.3"
@ -359,9 +375,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "image"
version = "0.24.7"
version = "0.24.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f3dfdbdd72063086ff443e297b61695500514b1e41095b6fb9a5ab48a70a711"
checksum = "034bbe799d1909622a74d1193aa50147769440040ff36cb2baa947609b0a4e23"
dependencies = [
"bytemuck",
"byteorder",
@ -369,7 +385,6 @@ dependencies = [
"exr",
"gif",
"jpeg-decoder",
"num-rational",
"num-traits",
"png",
"qoi",
@ -443,27 +458,6 @@ dependencies = [
"simd-adler32",
]
[[package]]
name = "num-integer"
version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-rational"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
dependencies = [
"autocfg",
"num-integer",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.17"
@ -494,6 +488,71 @@ version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "caff54706df99d2a78a5a4e3455ff45448d81ef1bb63c22cd14052ca0e993a3f"
[[package]]
name = "palette"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d38e6e5ca1612e2081cc31188f08c3cba630ce4ba44709a153f1a0f38d678f2"
dependencies = [
"approx",
"fast-srgb8",
"palette_derive",
"phf",
]
[[package]]
name = "palette_derive"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05d1c929301fee6830dafa764341118829b2535c216b0571e3821ecac5c885b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "phf"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
dependencies = [
"phf_macros",
"phf_shared",
]
[[package]]
name = "phf_generator"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0"
dependencies = [
"phf_shared",
"rand",
]
[[package]]
name = "phf_macros"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b"
dependencies = [
"phf_generator",
"phf_shared",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "phf_shared"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b"
dependencies = [
"siphasher",
]
[[package]]
name = "png"
version = "0.17.10"
@ -522,6 +581,14 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "prowocessing"
version = "0.1.0"
dependencies = [
"image",
"palette",
]
[[package]]
name = "qoi"
version = "0.4.1"
@ -540,6 +607,21 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
[[package]]
name = "rayon"
version = "1.8.0"
@ -641,6 +723,12 @@ version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "siphasher"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
[[package]]
name = "smallvec"
version = "1.11.2"

View file

@ -2,7 +2,8 @@
members = [
"crates/app",
"crates/eval",
"crates/ir",
"crates/ir",
"crates/prowocessing",
]
resolver = "2"

View file

@ -11,6 +11,7 @@ clap = { workspace = true, features = [ "derive", "env" ] }
dirs = "5"
eval = { path = "../eval" }
ir = { path = "../ir" }
prowocessing = { path = "../prowocessing"}
owo-colors = "4"
ron = "0.8"
serde = { workspace = true, features = [ "derive" ] }

View file

@ -1,18 +1,11 @@
use std::path::PathBuf;
use clap::Parser;
use self::{
cli::Args,
config_file::{find_config_file, Configs},
};
use self::config_file::{find_config_file, Configs};
pub(crate) use cli::CliConfigs;
mod cli;
mod config_file;
/// this struct may hold all configuration
pub struct Config {
pub source: PathBuf,
pub evaluator: eval::Available,
pub startup_msg: bool,
@ -20,13 +13,17 @@ pub struct Config {
impl Config {
/// Get the configs from all possible places (args, file, env...)
pub fn read() -> Self {
let args = Args::parse();
let config = if let Some(config) = args.config_path {
Ok(config)
} else {
find_config_file()
};
pub fn read(args: &CliConfigs) -> Self {
// let config = if let Some(config) = &args.config_path {
// Ok(config.clone())
// } else {
// find_config_file()
// };
let config = args
.config_path
.clone()
.ok_or(())
.or_else(|()| find_config_file());
// try to read a maybe existing config file
let config = config.ok().and_then(|path| {
@ -42,7 +39,6 @@ impl Config {
if let Some(file) = config {
Self {
source: args.source,
evaluator: args.evaluator.and(file.evaluator).unwrap_or_default(),
// this is negated because to an outward api, the negative is more intuitive,
// while in the source the other way around is more intuitive
@ -50,7 +46,6 @@ impl Config {
}
} else {
Self {
source: args.source,
startup_msg: !args.no_startup_message,
evaluator: args.evaluator.unwrap_or_default(),
}

View file

@ -1,12 +1,9 @@
use std::path::PathBuf;
use clap::{builder::BoolishValueParser, ArgAction, Parser};
#[derive(Parser)]
pub(crate) struct Args {
/// What file contains the pipeline to evaluate.
pub source: PathBuf,
use clap::{builder::BoolishValueParser, ArgAction, Args};
#[derive(Args)]
pub(crate) struct CliConfigs {
/// How to actually run the pipeline.
/// Overrides the config file. Defaults to the debug evaluator.
#[arg(short, long)]

View file

@ -1,6 +1,8 @@
use std::fs;
use std::{fs, path::PathBuf};
use config::Config;
use clap::{Parser, Subcommand};
use config::{CliConfigs, Config};
use dev::DevCommands;
use welcome_msg::print_startup_msg;
mod config;
@ -9,19 +11,60 @@ mod config;
mod error_reporting;
mod welcome_msg;
#[derive(Parser)]
struct Args {
#[command(flatten)]
configs: CliConfigs,
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
Run {
/// What file contains the pipeline to evaluate.
source: PathBuf,
},
Dev {
#[command(subcommand)]
command: DevCommands,
},
}
fn main() {
// TODO: proper error handling across the whole function
// don't forget to also look inside `Config`
let cfg = Config::read();
let args = Args::parse();
let cfg = Config::read(&args.configs);
if cfg.startup_msg {
print_startup_msg();
}
let source = fs::read_to_string(cfg.source).expect("can't find source file");
let ir = ir::from_ron(&source).expect("failed to parse source to graph ir");
match args.command {
Commands::Run { source } => {
let source = fs::read_to_string(source).expect("can't find source file");
let ir = ir::from_ron(&source).expect("failed to parse source to graph ir");
let mut machine = cfg.evaluator.pick();
machine.feed(ir);
machine.eval_full();
let mut machine = cfg.evaluator.pick();
machine.feed(ir);
machine.eval_full();
}
Commands::Dev {
command: dev_command,
} => dev_command.run(),
}
}
mod dev {
use clap::Subcommand;
#[derive(Subcommand)]
pub(crate) enum DevCommands {}
impl DevCommands {
pub fn run(self) {
println!("There are currently no dev commands.");
}
}
}

View file

@ -37,7 +37,7 @@ impl Available {
#[must_use]
pub fn pick(&self) -> Box<dyn Evaluator> {
match self {
Self::Debug => Box::new(kind::debug::Evaluator::default()),
Self::Debug => Box::<kind::debug::Evaluator>::default(),
}
}
}

View file

@ -0,0 +1,13 @@
[package]
name = "prowocessing"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
image = "0.24.8"
palette = "0.7.4"
[lints]
workspace = true

View file

@ -0,0 +1,2 @@
pub mod enum_based;
pub mod trait_based;

View file

@ -0,0 +1,64 @@
pub enum Instruction {
Uppercase,
Lowercase,
}
pub struct Pipeline {
pipeline: Vec<fn(String) -> String>,
}
impl Pipeline {
pub fn run(&self, val: String) -> String {
let mut current = val;
for instr in &self.pipeline {
current = instr(current);
}
current
}
}
pub struct PipelineBuilder {
pipeline: Vec<Instruction>,
}
impl PipelineBuilder {
pub fn new() -> Self {
Self {
pipeline: Vec::new(),
}
}
#[must_use]
pub fn insert(mut self, instr: Instruction) -> Self {
self.pipeline.push(instr);
self
}
pub fn build(&self) -> Pipeline {
fn uppercase(v: String) -> String {
str::to_uppercase(&v)
}
fn lowercase(v: String) -> String {
str::to_lowercase(&v)
}
let mut res = Vec::new();
for item in &self.pipeline {
res.push(match item {
Instruction::Uppercase => uppercase,
Instruction::Lowercase => lowercase,
});
}
Pipeline { pipeline: res }
}
}
impl Default for PipelineBuilder {
fn default() -> Self {
Self::new()
}
}

View file

@ -0,0 +1,11 @@
//! An experiment for a hyper-modular trait-based architecture.
//!
//! Patterns defining this (or well, which I reference a lot while writing this):
//! - [Command pattern using trait objects](https://rust-unofficial.github.io/patterns/patterns/behavioural/command.html)
//! - [Builder pattern](https://rust-unofficial.github.io/patterns/patterns/creational/builder.html)
pub mod data;
#[macro_use]
pub mod element;
pub mod ops;
pub mod pipeline;

View file

@ -0,0 +1,5 @@
//! Definitions of the data transfer and storage types.
pub mod io;
pub mod raw;

View file

@ -0,0 +1,53 @@
//! Types for element and pipeline IO
use std::{borrow::ToOwned, convert::Into};
use super::raw::Data;
/// Newtype struct with borrowed types for pipeline/element inputs, so that doesn't force a move or clone
schrottkatze marked this conversation as resolved

Quite ambiguous doc-comment also regarding the rather lengthy doc-comment on the type itself.
How about removing this method altogether and making the content of Inputs directly public,
given that one's free to convert from/to it already?

Quite ambiguous doc-comment also regarding the rather lengthy doc-comment on the type itself. How about removing this method altogether and making the content of `Inputs` directly public, given that one's free to convert from/to it already?
#[derive(PartialEq, Eq, Debug)]
pub struct Inputs<'a>(pub Vec<&'a Data>);
impl<'a> From<Vec<&'a Data>> for Inputs<'a> {
fn from(value: Vec<&'a Data>) -> Self {
Self(value)
}
}
impl<'a, T: Into<&'a Data>> From<T> for Inputs<'a> {
fn from(value: T) -> Self {
Self(vec![value.into()])
}
}
impl<'a> From<&'a Outputs> for Inputs<'a> {
fn from(value: &'a Outputs) -> Self {
Self(value.0.iter().map(Into::into).collect())
}
}
schrottkatze marked this conversation as resolved

Unnecessary full method path, consider just using From::from or Into::into instead.

Unnecessary full method path, consider just using `From::from` or `Into::into` instead.
Review

ah yes, rust-analyzer loves completing full paths lol

ah yes, rust-analyzer loves completing full paths lol
/// Used for pipeline/element outputs
#[derive(PartialEq, Eq, Debug)]
pub struct Outputs(pub Vec<Data>);
impl Outputs {
/// consume self and return inner value(s)
pub fn into_inner(self) -> Vec<Data> {
self.0
schrottkatze marked this conversation as resolved

Wait, why is Outputs allowed to be consumed for its inner content consumed while Inputs doesn't?

Wait, why is `Outputs` allowed to be consumed for its inner content consumed while `Inputs` doesn't?
Review

Inputs only contains a Vec of Data which either contains a string slice or an integer, which are really cheap to clone. OwnedData would be much heavier to clone in this case.

(I'm currently not happy how the IO of instructions works anyway, planning on reworking that to be more sensible, clear and flexible))

`Inputs` only contains a `Vec` of `Data` which either contains a string slice or an integer, which are really cheap to clone. `OwnedData` would be much heavier to clone in this case. (I'm currently not happy how the IO of instructions works anyway, planning on reworking that to be more sensible, clear and flexible))
}
}
impl From<Vec<Data>> for Outputs {
fn from(value: Vec<Data>) -> Self {
Self(value)
}
}
impl<T: Into<Data>> From<T> for Outputs {
fn from(value: T) -> Self {
Self(vec![value.into()])
}
}
impl From<Inputs<'_>> for Outputs {
fn from(value: Inputs) -> Self {
Self(value.0.into_iter().map(ToOwned::to_owned).collect())
}
}

View file

@ -0,0 +1,20 @@
//! Dynamic data storage and transfer types for use in [`io`]
// Dynamic data type
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Data {
String(String),
Int(i32),
}
impl From<String> for Data {
fn from(value: String) -> Self {
Self::String(value)
}
}
impl From<i32> for Data {
fn from(value: i32) -> Self {
Self::Int(value)
}
}

View file

@ -0,0 +1,29 @@
//! The trait and type representations
use std::any::TypeId;
use crate::experimental::trait_based::data::io::Inputs;
use super::data::io::Outputs;
pub(crate) trait PipelineElement {
/// return a static runner function pointer to avoid dynamic dispatch during pipeline execution - Types MUST match the signature
fn runner(&self) -> fn(&Inputs) -> Outputs;
/// return the signature of the element
fn signature(&self) -> ElementSignature;
}
/// Type signature for an element used for static checking
pub(crate) struct ElementSignature {
pub inputs: Vec<TypeId>,
pub outputs: Vec<TypeId>,
}
macro_rules! signature {
($($inputs:ty),+ => $($outputs:ty),+) => (
ElementSignature {
inputs: vec![$(std::any::TypeId::of::<$inputs>(), )+],
outputs: vec![$(std::any::TypeId::of::<$outputs>(), )+]
}
)
}

View file

@ -0,0 +1,7 @@
mod num;
mod str;
pub mod prelude {
pub(crate) use super::num::*;
pub(crate) use super::str::*;
}

View file

@ -0,0 +1,62 @@
//! Operations on numeric data
use core::panic;
use std::any::TypeId;
use crate::experimental::trait_based::{
data::{
io::{Inputs, Outputs},
raw::Data,
},
element::{ElementSignature, PipelineElement},
};
/// Addition
pub struct Add(pub i32);
impl PipelineElement for Add {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::Int(i0), Data::Int(i1), ..] = input.0[..] else {
panic!("Invalid data passed")
};
(i0 + i1).into()
}
}
fn signature(&self) -> ElementSignature {
signature!(i32, i32 => i32)
}
}
/// Subtraction
pub struct Subtract(pub i32);
impl PipelineElement for Subtract {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::Int(i0), Data::Int(i1), ..] = input.0[..] else {
panic!("Invalid data passed")
};
(i0 + i1).into()
}
}
fn signature(&self) -> ElementSignature {
signature!(i32, i32 => i32)
}
}
/// Turn input to string
pub struct Stringify;
impl PipelineElement for Stringify {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::Int(int), ..] = input.0[..] else {
panic!("Invalid data passed")
};
int.to_string().into()
}
}
fn signature(&self) -> ElementSignature {
signature!(i32 => String)
}
}

View file

@ -0,0 +1,59 @@
//! Operation on String/text data
use crate::experimental::trait_based::{
data::{
io::{Inputs, Outputs},
raw::Data,
},
element::{ElementSignature, PipelineElement},
};
/// Concatenate the inputs
pub struct Concatenate(pub String);
impl PipelineElement for Concatenate {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::String(s0), Data::String(s1), ..] = input.0[..] else {
panic!("Invalid data passed")
};
format!("{s0}{s1}").into()
}
}
fn signature(&self) -> ElementSignature {
signature!(String, String => String)
}
}
/// Turn input text to uppercase
pub struct Upper;
impl PipelineElement for Upper {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::String(s), ..] = input.0[..] else {
panic!("Invalid data passed")
};
s.to_uppercase().into()
}
}
fn signature(&self) -> ElementSignature {
signature!(String => String)
}
}
/// Turn input text to lowercase
pub struct Lower;
impl PipelineElement for Lower {
fn runner(&self) -> fn(&Inputs) -> Outputs {
|input| {
let [Data::String(s), ..] = input.0[..] else {
panic!("Invalid data passed")
};
s.to_lowercase().into()
}
}
fn signature(&self) -> ElementSignature {
signature!(String => String)
}
}

View file

@ -0,0 +1,107 @@
use super::data::io::{Inputs, Outputs};
use super::element::PipelineElement;
use super::ops::prelude::*;
/// Builder for the pipelines that are actually run
///
/// TODO:
/// - Bind additional inputs if instruction has more then one and is passd without any additional
/// - allow binding to pointers to other pipelines?
/// - allow referencing earlier data
multisamplednight marked this conversation as resolved

Those TODO:s seem like they should belong in an issue, so one can

  • discuss on them
  • selectively mark them as done/undone
  • edit them easily
  • link them from a respective PR
Those `TODO:`s seem like they should belong in an issue, so one can - discuss on them - selectively mark them as done/undone - edit them easily - link them from a respective PR
Review

this is heavily WIP, remember, these are experiments. There will not be a seperate PR until this is not only out of experimental state, but a functioning image processing library that has all that fixed.

this is heavily WIP, remember, these are experiments. There will not be a seperate PR until this is not only out of experimental state, but a functioning image processing library that has all that fixed.

So you'd rather do all the work of finding all TODO:s and converting them to issues (and then creating a PR removing them) after "finishing" prowocessing?

So you'd rather do all the work of finding all `TODO:`s and converting them to issues (and then creating a PR removing them) after "finishing" `prowocessing`?

Also, that just addresses one point, while the other 3 are still standing.

Also, that just addresses one point, while the other 3 are still standing.
Review

The intention is to perfect the api beforehand, and tbh, you'll be the only other person reviewing stuff anyway. And wdym "editing them easily", tbh i find editing text in my editor much, much easier then working with a forgejo web ui...

and if they're done, i delete them.

The intention is to perfect the api beforehand, and tbh, you'll be the only other person reviewing stuff anyway. And wdym "editing them easily", tbh i find editing text in my editor much, *much* easier then working with a forgejo web ui... and if they're done, i delete them.

Sounds fine to me.

Sounds fine to me.
pub struct PipelineBuilder {
elements: Vec<Box<dyn PipelineElement>>,
}
impl PipelineBuilder {
/// Create new, empty builder
pub fn new() -> Self {
Self {
elements: Vec::new(),
}
}
/// Insert element into pipeline
fn insert<T: PipelineElement + 'static>(mut self, el: T) -> Self {
if let Some(previous_item) = self.elements.last() {
assert_eq!(
previous_item.signature().outputs[0],
el.signature().inputs[0]
);
}
self.elements.push(Box::new(el));
self
}
/// insert string concatenattion element
#[must_use]
pub fn concatenate(self, sec: String) -> Self {
self.insert(Concatenate(sec))
}
/// insert string uppercase element
#[must_use]
pub fn upper(self) -> Self {
self.insert(Upper)
}
/// insert string lowercase element
#[must_use]
pub fn lower(self) -> Self {
self.insert(Lower)
}
/// insert numeric addition element
#[must_use]
#[allow(
clippy::should_implement_trait,
reason = "is not equivalent to addition"
)]
pub fn add(self, sec: i32) -> Self {
self.insert(Add(sec))
}
/// insert numeric subtraction element
#[must_use]
pub fn subtract(self, sec: i32) -> Self {
self.insert(Subtract(sec))
}
/// insert stringify element
#[must_use]
pub fn stringify(self) -> Self {
self.insert(Stringify)
}
/// Build the pipeline. Doesn't check again - `insert` should verify correctness.
pub fn build(&self) -> Pipeline {
let mut r = Vec::new();
self.elements.iter().for_each(|el| r.push(el.runner()));
Pipeline { runners: r }
}
}
impl Default for PipelineBuilder {
fn default() -> Self {
Self::new()
}
}
/// Runnable pipeline - at the core of this library
pub struct Pipeline {
runners: Vec<fn(&Inputs) -> Outputs>,

Also regarding the enum-based arch: Why the indirection of fn(&Inputs) -> Outputs? Why does Pipeline not hold Box<dyn Element> as well?

Also regarding the enum-based arch: Why the indirection of `fn(&Inputs) -> Outputs`? Why does `Pipeline` not hold `Box<dyn Element>` as well?
Review

Unless I misunderstood rusts dyn, this avoids the overhead of dynamic dispatch by saving static function pointers, which can just be called on the fly

Unless I misunderstood rusts `dyn`, this avoids the overhead of dynamic dispatch by saving static function pointers, which can just be called on the fly

They do save 1 indirection, but that is 1 indirection which hasn't even been benchmarked yet against

  1. worse debuggability
    • when Debugging Pipeline if it were to implement Debug, all one sees at the moment is a bunch of addresses of a function in hexadecimal, e.g. 0x000056380fa85320.
  2. when writing fn runner() as opposed to a direct fn eval(&Inputs) -> Outputs or the like
    1. an extra indent for the actual logic
    2. extra noise for defining the return type and the returned closure

Sidenote: If you care this much about indirection, &Inputs is actually &Vec<&Data>, which are 2 indirections already before the element can access any contained data.

They do save 1 indirection, but that is 1 indirection which hasn't even been benchmarked yet against 1. worse debuggability - when `Debug`ging `Pipeline` if it were to implement `Debug`, all one sees at the moment is a bunch of addresses of a function in hexadecimal, e.g. `0x000056380fa85320`. 2. when writing `fn runner()` as opposed to a direct `fn eval(&Inputs) -> Outputs` or the like 1. an extra indent for the actual logic 2. extra noise for defining the return type and the returned closure Sidenote: If you care this much about indirection, `&Inputs` is actually `&Vec<&Data>`, which are 2 indirections already before the element can access any contained data.
Review
  1. worse debuggability

i have an idea what one could do for that, I'll be implementing that soon-ish when i have the energy

  1. when writing fn runner() as opposed to a direct fn eval(&Inputs) -> Outputs or the like
    1. an extra indent for the actual logic
    2. extra noise for defining the return type and the returned closure

the return types are defined as opaque types deliberately, since during actual execution the pipeline does not know those.
the only thing the pipeline is supposed to do, is to execute the runners in order and get data where it's supposed to go (which is, once again, an unsolved problem currently)

Sidenote: If you care this much about indirection, &Inputs is actually &Vec<&Data>, which are 2 indirections already before the element can access any contained data.

yes, but I don't think that's avoidable.

> 1. worse debuggability i have an idea what one could do for that, I'll be implementing that soon-ish when i have the energy > 1. when writing fn runner() as opposed to a direct fn eval(&Inputs) -> Outputs or the like > 1. an extra indent for the actual logic > 2. extra noise for defining the return type and the returned closure the return types are defined as opaque types deliberately, since during actual execution the pipeline does not know those. the only thing the pipeline is supposed to do, is to execute the runners in order and get data where it's supposed to go (which is, once again, an unsolved problem currently) > Sidenote: If you care this much about indirection, &Inputs is actually &Vec<&Data>, which are 2 indirections already before the element can access any contained data. yes, but I don't think that's avoidable.

i have an idea what one could do for that, I'll be implementing that soon-ish when i have the energy

Does this idea imply a map of fn pointer to debug info? If so, I'm not sure if the size increase outweighs the 1 (still unbenchmarked) indirection.

the return types are defined as opaque types deliberately, since during actual execution the pipeline does not know those.
the only thing the pipeline is supposed to do, is to execute the runners in order and get data where it's supposed to go (which is, once again, an unsolved problem currently)

That does not address the points you quote at all. At no point I questioned the fn pointers being opaque. You just re-stated what the pipeline should do, which I already know, without addressing what I listed.

> i have an idea what one could do for that, I'll be implementing that soon-ish when i have the energy Does this idea imply a map of fn pointer to debug info? If so, I'm not sure if the size increase outweighs the 1 (still unbenchmarked) indirection. > the return types are defined as opaque types deliberately, since during actual execution the pipeline does not know those. > the only thing the pipeline is supposed to do, is to execute the runners in order and get data where it's supposed to go (which is, once again, an unsolved problem currently) That does not address the points you quote at all. At no point I questioned the fn pointers being opaque. You just re-stated what the pipeline should do, which I already know, without addressing what I listed.

yes, but I don't think that's avoidable.

It is. One could clone the vec on every instruction call.

EDIT: Thinking about it, actually the vec will need to be created for every instruction call anyway, since in a graph, multiple instructions may be source for an instruction.

> yes, but I don't think that's avoidable. It is. One could clone the vec on every instruction call. EDIT: Thinking about it, actually the vec will need to be created for every instruction call anyway, since in a graph, multiple instructions may be source for an instruction.
}
impl Pipeline {
/// run the pipeline
pub fn run(&self, inputs: Inputs) -> Outputs {
let mut out: Outputs = inputs.into();
for runner in &self.runners {
out = runner(&(&out).into());
}
out
}
}

View file

@ -0,0 +1,40 @@
//! # This is the image processing library for iOwO
//!
//! One of the design goals for this library is, however, to be a simple, generic image processing library.
//! For now, it's just indev... lets see what comes of it!
#![feature(lint_reasons)]
/// just some experiments, to test whether the architecture i want is even possible (or how to do it). probably temporary.
/// Gonna first try string processing...
pub mod experimental;
#[cfg(test)]
mod tests {
use crate::experimental::{
enum_based,
trait_based::{self, data::io::Outputs},
};
#[test]
fn test_enums() {
let builder = enum_based::PipelineBuilder::new().insert(enum_based::Instruction::Uppercase);
let upr = builder.build();
let upr_lowr = builder.insert(enum_based::Instruction::Lowercase).build();
assert_eq!(upr.run(String::from("Test")), String::from("TEST"));
assert_eq!(upr_lowr.run(String::from("Test")), String::from("test"));
}
#[test]
fn add() {
let pipe = trait_based::pipeline::PipelineBuilder::new()
.add(0)
.stringify()
.build();
assert_eq!(
pipe.run(vec![&2.into(), &3.into()].into()),
Outputs(vec![String::from("5").into()])
);
}
}