From a0f9a5bb2a294674a28fc8ad38deb11d9bf9ecb1 Mon Sep 17 00:00:00 2001 From: Katharina Fey Date: Sat, 15 Sep 2018 22:46:39 +0100 Subject: A large batch of refactoring in FileVault Okay what am I doing here? If you're reading this in the future, it probably means I made some bad decisions. First: I'm sorry. Secondly: let me explain why I did what I did... Until this point `FileVault` has been a very transparent structure. It didn't really interact with the data much, basically solving all fs operations with streamed iterators and making sure stuff was done correctly, but not adding any depth on top of that. This needed to change... This isn't quite done and most of the code just breaks here ;) But the idea is to have `FileVault` be an intelligent wrapper around the filesystem. That means that not only does it cache certain operations for speed, it also keeps a selective index of files that exist. So for example, `Headers` were added here, that are always kept in sync with the FS. But only certain Records are stored (not only for security but also size consearns). After this refactoring is done (I shall write another long commit) the FileVault will act more as it's own type than just a simple, linear `Vault` implementation. --- lockchain-files/src/create.rs | 2 +- lockchain-files/src/fs.rs | 137 ------------------------------------ lockchain-files/src/fs/mod.rs | 123 ++++++++++++++++++++++++++++++++ lockchain-files/src/fs/primitive.rs | 34 +++++++++ lockchain-files/src/lib.rs | 125 +++++++++++++++++--------------- lockchain-files/src/load.rs | 2 +- 6 files changed, 228 insertions(+), 195 deletions(-) delete mode 100644 lockchain-files/src/fs.rs create mode 100644 lockchain-files/src/fs/mod.rs create mode 100644 lockchain-files/src/fs/primitive.rs diff --git a/lockchain-files/src/create.rs b/lockchain-files/src/create.rs index 7d6875c..e35c449 100644 --- a/lockchain-files/src/create.rs +++ b/lockchain-files/src/create.rs @@ -11,7 +11,7 @@ use lcc::{ use std::collections::HashMap; use config::{ConfigError, VaultConfig}; -use fs::{FileType, Filesystem}; +use fs::Filesystem; use FileVault; impl FileVault { diff --git a/lockchain-files/src/fs.rs b/lockchain-files/src/fs.rs deleted file mode 100644 index af47e50..0000000 --- a/lockchain-files/src/fs.rs +++ /dev/null @@ -1,137 +0,0 @@ -//! Filesystem abstraction for various data types -//! -//! All operations return io::Result<()> to indicate errors -//! and functions that have multiple file endpoints will return -//! a folded error list to indicate which ops were successful -//! and which failed. -//! -//! There is also a `From> for Result` implementation -//! which will return either `Ok(())` or the first error in the list -//! of operations. - -use lcc::traits::AutoEncoder; - -use std::collections::HashMap; -use std::error::Error; -use std::io::{self, Write}; -use std::{ - fs::{self, File, OpenOptions as OO}, - path::PathBuf, -}; - -use utils::FileToString; - -#[derive(Debug)] -pub struct Filesystem { - pub name: String, - pub path: String, - pub root: PathBuf, -} - -/// A switching enum to determine what type of file to load -#[allow(dead_code)] -pub enum FileType { - /// A data record file - Record, - /// A MetaDomain file - Metadata, - /// A simple checksum file - Checksum, - #[doc(hidden)] - __NonExhaustive, -} - -/// Construct a file ending for a specific match result -macro_rules! file_ending { - ($type:expr) => { - match $type { - FileType::Record => "record", - FileType::Metadata => "meta", - FileType::Checksum => "sum", - _ => "dat", - } - }; -} - -impl Filesystem { - /// Create a new filesystem representation - /// - /// This function does _not_ touch the disk! - pub fn new(path: &str, name: &str) -> Self { - let mut buffer = PathBuf::new(); - buffer.push(path); - buffer.push(format!("{}.vault", name)); - - Self { - name: name.to_owned(), - path: path.to_owned(), - root: buffer, - } - } - - /// Create required directories - pub fn scaffold(&self) -> Result<(), io::Error> { - fs::create_dir_all(&self.root)?; - fs::create_dir(&self.root.join("records"))?; - fs::create_dir(&self.root.join("metadata"))?; - fs::create_dir(&self.root.join("checksums"))?; - Ok(()) - } - - /// Load all files of a certain type into a Vec - pub fn fetch(&self, types: FileType) -> Result, Box> { - Ok(fs::read_dir(match types { - FileType::Record => self.root.join("records"), - FileType::Metadata => self.root.join("metadata"), - _ => self.root.clone(), - })?.into_iter() - .filter_map(|r| r.ok()) - .filter(|f| match f.file_type() { - Ok(vf) => vf.is_file(), - _ => false, - }).map(|de| de.path()) - .filter_map(|p| p.into_os_string().into_string().ok()) - .filter_map(|s| File::open(s).ok()) - .filter_map(|mut f| f.get_string().ok()) - .filter_map(|s| T::decode(&s).ok()) - .collect()) - } - - /// Retrieve a single record from the cached vault - pub fn pull(&self, types: FileType, id: &str) -> Result> { - Ok(T::decode( - &File::open(self.root.join(&format!("{}.{}", id, file_ending!(types))))? - .get_string()?, - )?) - } - - /// Respond to a sync request - pub fn sync(&self, data: &HashMap, types: FileType) -> Result<(), Box> - where - T: AutoEncoder, - { - data.into_iter() - .map(|(k, v)| (k, v.encode().ok())) - .map(|(k, v)| { - ( - match types { - FileType::Record => self.root.join("records"), - FileType::Metadata => self.root.join("metadata"), - _ => self.root.join("."), - }.join(format!("{}.{}", k, file_ending!(types))), - v, - ) - }).filter(|(_, v)| v.is_some()) - .map(|(k, v)| (k, v.unwrap())) - .map(|(path, data): (PathBuf, String)| { - (OO::new().create(true).write(true).open(path), data) - }).filter(|(path, _)| path.is_ok()) - .map(|(file, data)| (file.unwrap(), data)) - .for_each(|(mut file, data)| { - file.write_all(data.as_bytes()) - .expect("Failed to write file!") - }); - - Ok(()) - } -} diff --git a/lockchain-files/src/fs/mod.rs b/lockchain-files/src/fs/mod.rs new file mode 100644 index 0000000..6a6d898 --- /dev/null +++ b/lockchain-files/src/fs/mod.rs @@ -0,0 +1,123 @@ +//! Filesystem abstraction for various data types +//! +//! All operations return io::Result<()> to indicate errors +//! and functions that have multiple file endpoints will return +//! a folded error list to indicate which ops were successful +//! and which failed. +//! +//! There is also a `From> for Result` implementation +//! which will return either `Ok(())` or the first error in the list +//! of operations. + +use lcc::traits::{Body, AutoEncoder}; + +use std::collections::HashMap; +use std::error::Error; +use std::io::{self, Write}; +use std::{ + fs::{self, File, OpenOptions as OO}, + path::PathBuf, +}; + +use utils::FileToString; +use FileVault; + +mod primitive; +use self::primitive::*; + +#[derive(Debug)] +pub struct Filesystem { + pub name: String, + pub path: String, + pub root: PathBuf, +} + +impl Filesystem { + /// Create a new filesystem representation + /// + /// This function does _not_ touch the disk! + pub fn new(path: &str, name: &str) -> Self { + let mut buffer = PathBuf::new(); + buffer.push(path); + buffer.push(format!("{}.vault", name)); + + Self { + name: name.to_owned(), + path: path.to_owned(), + root: buffer, + } + } + + /// Create required directories + pub fn scaffold(&self) -> Result<(), io::Error> { + fs::create_dir_all(&self.root)?; + fs::create_dir(&self.root.join("records"))?; + fs::create_dir(&self.root.join("metadata"))?; + fs::create_dir(&self.root.join("checksums"))?; + Ok(()) + } + + /// Load all files of a certain type into a Vec + pub fn fetch(&self, types: FileType) -> Result, Box> { + Ok(fs::read_dir(match types { + FileType::Record => self.root.join("records"), + FileType::Metadata => self.root.join("metadata"), + _ => self.root.clone(), + })?.into_iter() + .filter_map(|r| r.ok()) + .filter(|f| match f.file_type() { + Ok(vf) => vf.is_file(), + _ => false, + }).map(|de| de.path()) + .filter_map(|p| p.into_os_string().into_string().ok()) + .filter_map(|s| File::open(s).ok()) + .filter_map(|mut f| f.get_string().ok()) + .filter_map(|s| T::decode(&s).ok()) + .collect()) + } + + /// Retrieve a single record from the cached vault + pub fn pull(&self, types: FileType, id: &str) -> Result> { + Ok(T::decode( + &File::open(self.root.join(&format!("{}.{}", id, file_ending!(types))))? + .get_string()?, + )?) + } + + pub fn sync_vault(&self, vault: &FileVault) -> Result<(), io::Error> { + vault.config.save(&self.root)?; + + + unimplemented!() + } + + /// Respond to a sync request + pub fn sync(&self, data: &HashMap, types: FileType) -> Result<(), Box> + where + T: AutoEncoder, + { + data.into_iter() + .map(|(k, v)| (k, v.encode().ok())) + .map(|(k, v)| { + ( + match types { + FileType::Record => self.root.join("records"), + FileType::Metadata => self.root.join("metadata"), + _ => self.root.join("."), + }.join(format!("{}.{}", k, file_ending!(types))), + v, + ) + }).filter(|(_, v)| v.is_some()) + .map(|(k, v)| (k, v.unwrap())) + .map(|(path, data): (PathBuf, String)| { + (OO::new().create(true).write(true).open(path), data) + }).filter(|(path, _)| path.is_ok()) + .map(|(file, data)| (file.unwrap(), data)) + .for_each(|(mut file, data)| { + file.write_all(data.as_bytes()) + .expect("Failed to write file!") + }); + + Ok(()) + } +} diff --git a/lockchain-files/src/fs/primitive.rs b/lockchain-files/src/fs/primitive.rs new file mode 100644 index 0000000..3723232 --- /dev/null +++ b/lockchain-files/src/fs/primitive.rs @@ -0,0 +1,34 @@ +//! Very simple file system primitives + +#![allow(dead_code)] + +/// A set of files that exist inside a `FileVault` +pub enum FileType { + /// A data record file + Record, + /// A MetaDomain file + Metadata, + /// A simple checksum file + Checksum, + /// _The_ config file + Config, + #[doc(hidden)] + __NonExhaustive, +} + +/// Construct a file ending for a specific match result +macro_rules! file_ending { + ($type:expr) => { + match $type { + FileType::Record => "record", + FileType::Metadata => "meta", + FileType::Checksum => "sum", + FileType::Config => "cfg" + _ => "dat", + } + }; +} + +pub fn write_file(tt: FileType) {} + +pub fn read_file() {} diff --git a/lockchain-files/src/lib.rs b/lockchain-files/src/lib.rs index 318aef6..611d260 100644 --- a/lockchain-files/src/lib.rs +++ b/lockchain-files/src/lib.rs @@ -6,8 +6,6 @@ //! //! All further documentation can be found in `FileVault` -#![feature(non_modrs_mods)] - extern crate lockchain_core as lcc; extern crate semver; extern crate toml; @@ -29,11 +27,11 @@ mod config; mod create; mod fs; mod load; -mod utils; mod userstore; +mod utils; pub use config::{ConfigError, VaultConfig}; -use fs::{FileType, Filesystem}; +use fs::Filesystem; /// Persistence mapper to a folder and file structure /// @@ -127,100 +125,115 @@ impl Vault for FileVault { /// Caches all files from disk to memory fn fetch(&mut self) { - self.records.clear(); - self.metadata.clear(); - - self.fs - .fetch::>(FileType::Record) - .unwrap() - .into_iter() - .map(|rec| (rec.header.name.clone(), rec)) - .for_each(|x| { - self.records.insert(x.0, x.1); - }); - - self.fs - .fetch::(FileType::Metadata) - .unwrap() - .into_iter() - .map(|rec| (rec.name().into(), rec)) - .for_each(|x| { - self.metadata.insert(x.0, x.1); - }); + // self.records.clear(); + // self.metadata.clear(); + + // self.fs + // .fetch::>(FileType::Record) + // .unwrap() + // .into_iter() + // .map(|rec| (rec.header.name.clone(), rec)) + // .for_each(|x| { + // self.records.insert(x.0, x.1); + // }); + + // self.fs + // .fetch::(FileType::Metadata) + // .unwrap() + // .into_iter() + // .map(|rec| (rec.name().into(), rec)) + // .for_each(|x| { + // self.metadata.insert(x.0, x.1); + // }); + unimplemented!() } /// Make sure a single record is loaded fn pull(&mut self, name: &str) { - self.records.remove(name); - self.records.insert( - name.to_owned(), - self.fs.pull::>(FileType::Record, name).unwrap(), - ); + // self.records.remove(name); + // self.records.insert( + // name.to_owned(), + // self.fs.pull::>(FileType::Record, name).unwrap(), + // ); + unimplemented!() } fn sync(&mut self) { - self.fs - .sync::>(&self.records, FileType::Record) - .unwrap(); - self.fs - .sync::(&self.metadata, FileType::Metadata) - .unwrap(); + // self.fs + // .sync::>(&self.records, FileType::Record) + // .unwrap(); + // self.fs + // .sync::(&self.metadata, FileType::Metadata) + // .unwrap(); + unimplemented!() } fn get_record(&self, name: &str) -> Option<&Record> { - self.records.get(name) + // self.records.get(name) + unimplemented!() } fn contains(&self, name: &str) -> bool { - self.records.contains_key(name) + // self.records.contains_key(name) + unimplemented!() } fn add_record(&mut self, key: &str, category: &str, tags: Vec<&str>) { - self.records - .insert(key.to_owned(), Record::new(key, category, tags)); + // self.records + // .insert(key.to_owned(), Record::new(key, category, tags)); + unimplemented!() } fn delete_record(&mut self, record: &str) -> Option> { - self.records.remove(record) + // self.records.remove(record) + unimplemented!() } fn add_data(&mut self, record: &str, key: &str, data: Payload) -> Option<()> { - self.records.get_mut(record)?.add_data(key, data) + // self.records.get_mut(record)?.add_data(key, data) + unimplemented!() } fn get_data(&self, record: &str, key: &str) -> Option<&Payload> { - self.records.get(record)?.get_data(key) + // self.records.get(record)?.get_data(key) + unimplemented!() } fn meta_add_domain(&mut self, domain: &str) -> Option<()> { - if self.metadata.contains_key(domain) { - None - } else { - self.metadata.insert(domain.into(), MetaDomain::new(domain)); - Some(()) - } + // if self.metadata.contains_key(domain) { + // None + // } else { + // self.metadata.insert(domain.into(), MetaDomain::new(domain)); + // Some(()) + // } + unimplemented!() } fn meta_pull_domain(&self, domain: &str) -> Option<&MetaDomain> { - self.metadata.get(domain) + // self.metadata.get(domain) + unimplemented!() } fn meta_push_domain(&mut self, domain: MetaDomain) -> Option<()> { - self.metadata - .insert(domain.name().into(), domain) - .map_or((), |_| ()) // We don't care about `None` - .into() + // self.metadata + // .insert(domain.name().into(), domain) + // .map_or((), |_| ()) // We don't care about `None` + // .into() + unimplemented!() } fn meta_set(&mut self, domain: &str, name: &str, data: Payload) -> Option<()> { - self.metadata.get_mut(domain)?.set_field(name, data) + // self.metadata.get_mut(domain)?.set_field(name, data) + unimplemented!() } fn meta_get(&mut self, domain: &str, name: &str) -> Option { - Some(self.metadata.get(domain)?.get_field(name)?.clone()) + // Some(self.metadata.get(domain)?.get_field(name)?.clone()) + unimplemented!() } fn meta_exists(&self, domain: &str) -> bool { - self.metadata.contains_key(domain) + // self.metadata.contains_key(domain) + unimplemented!() } } diff --git a/lockchain-files/src/load.rs b/lockchain-files/src/load.rs index fd4e66e..31da63c 100644 --- a/lockchain-files/src/load.rs +++ b/lockchain-files/src/load.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use ::FileVault; use ::config::{VaultConfig, ConfigError}; -use ::fs::{Filesystem, FileType}; +use ::fs::Filesystem; impl FileVault { -- cgit v1.2.3