aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKatharina Fey <kookie@spacekookie.de>2018-09-15 22:46:39 +0100
committerKatharina Fey <kookie@spacekookie.de>2018-09-15 22:58:19 +0100
commita0f9a5bb2a294674a28fc8ad38deb11d9bf9ecb1 (patch)
tree56ba7f22c2b7678a91b6b1bd8fc4d1184b1da69e
parentffafddfcbd07be8951701c3299407999928baddc (diff)
A large batch of refactoring in FileVault
Okay what am I doing here? If you're reading this in the future, it probably means I made some bad decisions. First: I'm sorry. Secondly: let me explain why I did what I did... Until this point `FileVault` has been a very transparent structure. It didn't really interact with the data much, basically solving all fs operations with streamed iterators and making sure stuff was done correctly, but not adding any depth on top of that. This needed to change... This isn't quite done and most of the code just breaks here ;) But the idea is to have `FileVault` be an intelligent wrapper around the filesystem. That means that not only does it cache certain operations for speed, it also keeps a selective index of files that exist. So for example, `Headers` were added here, that are always kept in sync with the FS. But only certain Records are stored (not only for security but also size consearns). After this refactoring is done (I shall write another long commit) the FileVault will act more as it's own type than just a simple, linear `Vault<T>` implementation.
-rw-r--r--lockchain-files/src/create.rs2
-rw-r--r--lockchain-files/src/fs/mod.rs (renamed from lockchain-files/src/fs.rs)38
-rw-r--r--lockchain-files/src/fs/primitive.rs34
-rw-r--r--lockchain-files/src/lib.rs125
-rw-r--r--lockchain-files/src/load.rs2
5 files changed, 117 insertions, 84 deletions
diff --git a/lockchain-files/src/create.rs b/lockchain-files/src/create.rs
index 7d6875c..e35c449 100644
--- a/lockchain-files/src/create.rs
+++ b/lockchain-files/src/create.rs
@@ -11,7 +11,7 @@ use lcc::{
use std::collections::HashMap;
use config::{ConfigError, VaultConfig};
-use fs::{FileType, Filesystem};
+use fs::Filesystem;
use FileVault;
impl<T: Body> FileVault<T> {
diff --git a/lockchain-files/src/fs.rs b/lockchain-files/src/fs/mod.rs
index af47e50..6a6d898 100644
--- a/lockchain-files/src/fs.rs
+++ b/lockchain-files/src/fs/mod.rs
@@ -9,7 +9,7 @@
//! which will return either `Ok(())` or the first error in the list
//! of operations.
-use lcc::traits::AutoEncoder;
+use lcc::traits::{Body, AutoEncoder};
use std::collections::HashMap;
use std::error::Error;
@@ -20,6 +20,10 @@ use std::{
};
use utils::FileToString;
+use FileVault;
+
+mod primitive;
+use self::primitive::*;
#[derive(Debug)]
pub struct Filesystem {
@@ -28,31 +32,6 @@ pub struct Filesystem {
pub root: PathBuf,
}
-/// A switching enum to determine what type of file to load
-#[allow(dead_code)]
-pub enum FileType {
- /// A data record file
- Record,
- /// A MetaDomain file
- Metadata,
- /// A simple checksum file
- Checksum,
- #[doc(hidden)]
- __NonExhaustive,
-}
-
-/// Construct a file ending for a specific match result
-macro_rules! file_ending {
- ($type:expr) => {
- match $type {
- FileType::Record => "record",
- FileType::Metadata => "meta",
- FileType::Checksum => "sum",
- _ => "dat",
- }
- };
-}
-
impl Filesystem {
/// Create a new filesystem representation
///
@@ -105,6 +84,13 @@ impl Filesystem {
)?)
}
+ pub fn sync_vault<T: Body>(&self, vault: &FileVault<T>) -> Result<(), io::Error> {
+ vault.config.save(&self.root)?;
+
+
+ unimplemented!()
+ }
+
/// Respond to a sync request
pub fn sync<T>(&self, data: &HashMap<String, T>, types: FileType) -> Result<(), Box<Error>>
where
diff --git a/lockchain-files/src/fs/primitive.rs b/lockchain-files/src/fs/primitive.rs
new file mode 100644
index 0000000..3723232
--- /dev/null
+++ b/lockchain-files/src/fs/primitive.rs
@@ -0,0 +1,34 @@
+//! Very simple file system primitives
+
+#![allow(dead_code)]
+
+/// A set of files that exist inside a `FileVault`
+pub enum FileType {
+ /// A data record file
+ Record,
+ /// A MetaDomain file
+ Metadata,
+ /// A simple checksum file
+ Checksum,
+ /// _The_ config file
+ Config,
+ #[doc(hidden)]
+ __NonExhaustive,
+}
+
+/// Construct a file ending for a specific match result
+macro_rules! file_ending {
+ ($type:expr) => {
+ match $type {
+ FileType::Record => "record",
+ FileType::Metadata => "meta",
+ FileType::Checksum => "sum",
+ FileType::Config => "cfg"
+ _ => "dat",
+ }
+ };
+}
+
+pub fn write_file(tt: FileType) {}
+
+pub fn read_file() {}
diff --git a/lockchain-files/src/lib.rs b/lockchain-files/src/lib.rs
index 318aef6..611d260 100644
--- a/lockchain-files/src/lib.rs
+++ b/lockchain-files/src/lib.rs
@@ -6,8 +6,6 @@
//!
//! All further documentation can be found in `FileVault`
-#![feature(non_modrs_mods)]
-
extern crate lockchain_core as lcc;
extern crate semver;
extern crate toml;
@@ -29,11 +27,11 @@ mod config;
mod create;
mod fs;
mod load;
-mod utils;
mod userstore;
+mod utils;
pub use config::{ConfigError, VaultConfig};
-use fs::{FileType, Filesystem};
+use fs::Filesystem;
/// Persistence mapper to a folder and file structure
///
@@ -127,100 +125,115 @@ impl<T: Body> Vault<T> for FileVault<T> {
/// Caches all files from disk to memory
fn fetch(&mut self) {
- self.records.clear();
- self.metadata.clear();
-
- self.fs
- .fetch::<Record<T>>(FileType::Record)
- .unwrap()
- .into_iter()
- .map(|rec| (rec.header.name.clone(), rec))
- .for_each(|x| {
- self.records.insert(x.0, x.1);
- });
-
- self.fs
- .fetch::<MetaDomain>(FileType::Metadata)
- .unwrap()
- .into_iter()
- .map(|rec| (rec.name().into(), rec))
- .for_each(|x| {
- self.metadata.insert(x.0, x.1);
- });
+ // self.records.clear();
+ // self.metadata.clear();
+
+ // self.fs
+ // .fetch::<Record<T>>(FileType::Record)
+ // .unwrap()
+ // .into_iter()
+ // .map(|rec| (rec.header.name.clone(), rec))
+ // .for_each(|x| {
+ // self.records.insert(x.0, x.1);
+ // });
+
+ // self.fs
+ // .fetch::<MetaDomain>(FileType::Metadata)
+ // .unwrap()
+ // .into_iter()
+ // .map(|rec| (rec.name().into(), rec))
+ // .for_each(|x| {
+ // self.metadata.insert(x.0, x.1);
+ // });
+ unimplemented!()
}
/// Make sure a single record is loaded
fn pull(&mut self, name: &str) {
- self.records.remove(name);
- self.records.insert(
- name.to_owned(),
- self.fs.pull::<Record<T>>(FileType::Record, name).unwrap(),
- );
+ // self.records.remove(name);
+ // self.records.insert(
+ // name.to_owned(),
+ // self.fs.pull::<Record<T>>(FileType::Record, name).unwrap(),
+ // );
+ unimplemented!()
}
fn sync(&mut self) {
- self.fs
- .sync::<Record<T>>(&self.records, FileType::Record)
- .unwrap();
- self.fs
- .sync::<MetaDomain>(&self.metadata, FileType::Metadata)
- .unwrap();
+ // self.fs
+ // .sync::<Record<T>>(&self.records, FileType::Record)
+ // .unwrap();
+ // self.fs
+ // .sync::<MetaDomain>(&self.metadata, FileType::Metadata)
+ // .unwrap();
+ unimplemented!()
}
fn get_record(&self, name: &str) -> Option<&Record<T>> {
- self.records.get(name)
+ // self.records.get(name)
+ unimplemented!()
}
fn contains(&self, name: &str) -> bool {
- self.records.contains_key(name)
+ // self.records.contains_key(name)
+ unimplemented!()
}
fn add_record(&mut self, key: &str, category: &str, tags: Vec<&str>) {
- self.records
- .insert(key.to_owned(), Record::new(key, category, tags));
+ // self.records
+ // .insert(key.to_owned(), Record::new(key, category, tags));
+ unimplemented!()
}
fn delete_record(&mut self, record: &str) -> Option<Record<T>> {
- self.records.remove(record)
+ // self.records.remove(record)
+ unimplemented!()
}
fn add_data(&mut self, record: &str, key: &str, data: Payload) -> Option<()> {
- self.records.get_mut(record)?.add_data(key, data)
+ // self.records.get_mut(record)?.add_data(key, data)
+ unimplemented!()
}
fn get_data(&self, record: &str, key: &str) -> Option<&Payload> {
- self.records.get(record)?.get_data(key)
+ // self.records.get(record)?.get_data(key)
+ unimplemented!()
}
fn meta_add_domain(&mut self, domain: &str) -> Option<()> {
- if self.metadata.contains_key(domain) {
- None
- } else {
- self.metadata.insert(domain.into(), MetaDomain::new(domain));
- Some(())
- }
+ // if self.metadata.contains_key(domain) {
+ // None
+ // } else {
+ // self.metadata.insert(domain.into(), MetaDomain::new(domain));
+ // Some(())
+ // }
+ unimplemented!()
}
fn meta_pull_domain(&self, domain: &str) -> Option<&MetaDomain> {
- self.metadata.get(domain)
+ // self.metadata.get(domain)
+ unimplemented!()
}
fn meta_push_domain(&mut self, domain: MetaDomain) -> Option<()> {
- self.metadata
- .insert(domain.name().into(), domain)
- .map_or((), |_| ()) // We don't care about `None`
- .into()
+ // self.metadata
+ // .insert(domain.name().into(), domain)
+ // .map_or((), |_| ()) // We don't care about `None`
+ // .into()
+ unimplemented!()
}
fn meta_set(&mut self, domain: &str, name: &str, data: Payload) -> Option<()> {
- self.metadata.get_mut(domain)?.set_field(name, data)
+ // self.metadata.get_mut(domain)?.set_field(name, data)
+ unimplemented!()
}
fn meta_get(&mut self, domain: &str, name: &str) -> Option<Payload> {
- Some(self.metadata.get(domain)?.get_field(name)?.clone())
+ // Some(self.metadata.get(domain)?.get_field(name)?.clone())
+ unimplemented!()
}
fn meta_exists(&self, domain: &str) -> bool {
- self.metadata.contains_key(domain)
+ // self.metadata.contains_key(domain)
+ unimplemented!()
}
}
diff --git a/lockchain-files/src/load.rs b/lockchain-files/src/load.rs
index fd4e66e..31da63c 100644
--- a/lockchain-files/src/load.rs
+++ b/lockchain-files/src/load.rs
@@ -7,7 +7,7 @@ use std::collections::HashMap;
use ::FileVault;
use ::config::{VaultConfig, ConfigError};
-use ::fs::{Filesystem, FileType};
+use ::fs::Filesystem;
impl<T: Body> FileVault<T> {