From 31f6aa4d67cc6e1361c560a6e62699b6f5dc7cdb Mon Sep 17 00:00:00 2001 From: louib Date: Tue, 14 Mar 2023 19:46:50 -0400 Subject: [PATCH] feat: add merge feature --- src/db/entry.rs | 126 +++++++++ src/db/group.rs | 725 +++++++++++++++++++++++++++++++++++++++++++++++- src/db/mod.rs | 4 + 3 files changed, 852 insertions(+), 3 deletions(-) diff --git a/src/db/entry.rs b/src/db/entry.rs index ba93556a..624a2b67 100644 --- a/src/db/entry.rs +++ b/src/db/entry.rs @@ -1,9 +1,11 @@ use std::collections::HashMap; +use std::{thread, time}; use chrono::NaiveDateTime; use secstr::SecStr; use uuid::Uuid; +use crate::db::group::MergeLog; use crate::db::{Color, CustomData, Times}; #[cfg(feature = "totp")] @@ -41,6 +43,64 @@ impl Entry { ..Default::default() } } + + pub(crate) fn merge(&self, other: &Entry) -> Result<(Entry, MergeLog), String> { + let mut log = MergeLog::default(); + + let mut source_history = match &other.history { + Some(h) => h.clone(), + None => { + log.warnings.push(format!( + "Entry {} from source database had no history.", + self.uuid + )); + History::default() + } + }; + let mut destination_history = match &self.history { + Some(h) => h.clone(), + None => { + log.warnings.push(format!( + "Entry {} from destination database had no history.", + self.uuid + )); + History::default() + } + }; + let mut history_merge_log: MergeLog = MergeLog::default(); + + let mut response = self.clone(); + + if other.has_uncommitted_changes() { + log.warnings.push(format!( + "Entry {} from source database has uncommitted changes.", + self.uuid + )); + source_history.add_entry(other.clone()); + } + + // TODO we should probably check for uncommitted changes in the destination + // database here too for consistency. + + history_merge_log = destination_history.merge_with(&source_history)?; + response.history = Some(destination_history); + + Ok((response, log.merge_with(&history_merge_log))) + } + + // Convenience function used in unit tests, to make sure that: + // 1. The history gets updated after changing a field + // 2. We wait a second before commiting the changes so that the timestamp is not the same + // as it previously was. This is necessary since the timestamps in the KDBX format + // do not preserve the msecs. + pub(crate) fn set_field_and_commit(&mut self, field_name: &str, field_value: &str) { + self.fields.insert( + field_name.to_string(), + Value::Unprotected(field_value.to_string()), + ); + thread::sleep(time::Duration::from_secs(1)); + self.update_history(); + } } impl<'a> Entry { @@ -227,6 +287,8 @@ pub struct History { } impl History { pub fn add_entry(&mut self, mut entry: Entry) { + // DISCUSS: should we make sure that the last modification time is not the same + // or older than the entry at the top of the history? if entry.history.is_some() { // Remove the history from the new history entry to avoid having // an exponential number of history entries. @@ -238,6 +300,70 @@ impl History { pub fn get_entries(&self) -> &Vec { &self.entries } + + // Determines if the entries of the history are + // ordered by last modification time. + pub(crate) fn is_ordered(&self) -> bool { + let mut last_modification_time: Option<&NaiveDateTime> = None; + for entry in &self.entries { + if last_modification_time.is_none() { + last_modification_time = entry.times.get_last_modification(); + } + + let entry_modification_time = entry.times.get_last_modification().unwrap(); + // FIXME should we also handle equal modification times?? + if last_modification_time.unwrap() < entry_modification_time { + return false; + } + last_modification_time = Some(entry_modification_time); + } + true + } + + // Merge both histories together. + pub(crate) fn merge_with(&mut self, other: &History) -> Result { + let mut log = MergeLog::default(); + let mut new_history_entries: HashMap = HashMap::new(); + + for history_entry in &self.entries { + let modification_time = history_entry.times.get_last_modification().unwrap(); + if new_history_entries.contains_key(modification_time) { + return Err(format!( + "Found history entries with the same timestamp ({}) for entry {}.", + modification_time, history_entry.uuid, + )); + } + new_history_entries.insert(modification_time.clone(), history_entry.clone()); + } + + for history_entry in &other.entries { + let modification_time = history_entry.times.get_last_modification().unwrap(); + let existing_history_entry = new_history_entries.get(modification_time); + if let Some(existing_history_entry) = existing_history_entry { + if !existing_history_entry.eq(&history_entry) { + log.warnings.push(format!("History entries for {} have the same modification timestamp but were not the same.", existing_history_entry.uuid)); + } + } else { + new_history_entries.insert(modification_time.clone(), history_entry.clone()); + } + } + + let mut all_modification_times: Vec<&NaiveDateTime> = new_history_entries.keys().collect(); + all_modification_times.sort(); + all_modification_times.reverse(); + let mut new_entries: Vec = vec![]; + for modification_time in &all_modification_times { + new_entries.push(new_history_entries.get(&modification_time).unwrap().clone()); + } + + self.entries = new_entries; + if !self.is_ordered() { + // TODO this should be unit tested. + return Err("The resulting history is not ordered.".to_string()); + } + + Ok(log) + } } #[cfg(test)] diff --git a/src/db/group.rs b/src/db/group.rs index 05ae961d..945d722b 100644 --- a/src/db/group.rs +++ b/src/db/group.rs @@ -3,10 +3,54 @@ use std::collections::VecDeque; use uuid::Uuid; use crate::db::{ + entry::{Entry, Value}, node::{Node, NodeIter, NodeRef, NodeRefMut}, CustomData, Times, }; +#[derive(Debug, Clone)] +pub enum MergeEventType { + EntryCreated, + EntryLocationUpdated, + + EntryUpdated, + GroupCreated, +} + +#[derive(Debug, Clone)] +pub struct MergeEvent { + /// The uuid of the node (entry or group) affected by + /// the merge event. + pub node_uuid: Uuid, + + pub event_type: MergeEventType, +} + +#[derive(Debug, Default, Clone)] +pub struct MergeLog { + pub warnings: Vec, + pub events: Vec, +} + +impl MergeLog { + pub fn merge_with(&self, other: &MergeLog) -> MergeLog { + let mut response = MergeLog::default(); + response.warnings.append(self.warnings.clone().as_mut()); + response.warnings.append(other.warnings.clone().as_mut()); + response.events.append(self.events.clone().as_mut()); + response.events.append(other.events.clone().as_mut()); + response + } +} + +#[derive(Debug, Default, Eq, PartialEq, Clone)] +pub(crate) struct GroupRef { + pub uuid: Uuid, + pub name: String, +} + +pub(crate) type NodeLocation = Vec; + /// A database group with child groups and entries #[derive(Debug, Default, Eq, PartialEq, Clone)] #[cfg_attr(feature = "serialization", derive(serde::Serialize))] @@ -166,6 +210,389 @@ impl Group { pub fn get_expiry_time(&self) -> Option<&chrono::NaiveDateTime> { self.times.get_expiry() } + + pub fn entries(&self) -> Vec<&Entry> { + let mut response: Vec<&Entry> = vec![]; + for node in &self.children { + if let Node::Entry(e) = node { + response.push(e) + } + } + response + } + + pub fn entries_mut(&mut self) -> Vec<&mut Entry> { + let mut response: Vec<&mut Entry> = vec![]; + for node in &mut self.children { + if let Node::Entry(e) = node { + response.push(e) + } + } + response + } + + pub fn groups_mut(&mut self) -> Vec<&mut Group> { + let mut response: Vec<&mut Group> = vec![]; + for node in &mut self.children { + if let Node::Group(g) = node { + response.push(g); + } + } + response + } + + fn replace_entry(&mut self, entry: &Entry) { + for node in &mut self.children { + match node { + Node::Group(g) => { + g.replace_entry(entry); + } + Node::Entry(e) => { + if e.uuid == entry.uuid { + *e = entry.clone(); + } + } + } + } + } + + pub(crate) fn has_group(&self, uuid: Uuid) -> bool { + for node in &self.children { + if let Node::Group(g) = node { + if g.uuid == uuid { + return true; + } + } + } + false + } + + pub(crate) fn get_group_mut( + &mut self, + location: &NodeLocation, + create_groups: bool, + ) -> Result<&mut Group, String> { + if location.len() == 0 { + return Err("Empty location.".to_string()); + } + + let mut remaining_location = location.clone(); + remaining_location.remove(0); + + if remaining_location.len() == 0 { + return Ok(self); + } + + let next_location = &remaining_location[0]; + let mut next_location_uuid = next_location.uuid; + + if !self.has_group(next_location_uuid) && create_groups { + let mut current_group: Option = None; + for i in (0..(remaining_location.len())).rev() { + let mut new_group = Group::new(&remaining_location[i].name); + if let Some(group) = ¤t_group { + new_group.add_node(group.clone()); + } + current_group = Some(new_group); + } + + let current_group = current_group.unwrap(); + next_location_uuid = current_group.uuid; + self.add_node(current_group); + } + + for node in &mut self.children { + if let Node::Group(g) = node { + if g.uuid != next_location_uuid { + continue; + } + return g.get_group_mut(&remaining_location, create_groups); + } + } + + return Err("The group was not found.".to_string()); + } + + pub fn add_node(&mut self, n: T) + where + T: Into, + { + self.children.push(n.into()) + } + + pub(crate) fn insert_entry( + &mut self, + entry: Entry, + location: &NodeLocation, + ) -> Result<(), String> { + let mut group: &mut Group = self.get_group_mut(&location, true)?; + group.add_node(entry); + Ok(()) + } + + pub(crate) fn remove_entry( + &mut self, + uuid: &Uuid, + location: &NodeLocation, + ) -> Result { + let mut group: &mut Group = self.get_group_mut(&location, false)?; + + let mut removed_entry: Option = None; + let mut new_nodes: Vec = vec![]; + println!("Searching for entry {} in {}", uuid, group.name); + for node in &group.children { + match node { + Node::Entry(e) => { + println!("Saw entry {}", &e.uuid); + if &e.uuid != uuid { + new_nodes.push(node.clone()); + continue; + } + removed_entry = Some(e.clone()); + } + Node::Group(_) => { + new_nodes.push(node.clone()); + } + } + } + + if let Some(entry) = removed_entry { + group.children = new_nodes; + return Ok(entry); + } else { + return Err(format!( + "Could not find entry {} in group {}.", + uuid, group.name + )); + } + } + + pub(crate) fn find_entry_location(&self, id: Uuid) -> Option { + let mut current_location = vec![GroupRef { + uuid: self.uuid.clone(), + name: self.name.clone(), + }]; + for node in &self.children { + match node { + Node::Entry(e) => { + if e.uuid == id { + return Some(current_location); + } + } + Node::Group(g) => { + if let Some(mut location) = g.find_entry_location(id) { + current_location.append(&mut location); + return Some(current_location); + } + } + } + } + None + } + + pub fn find_entry_by_uuid(&self, id: Uuid) -> Option<&Entry> { + for node in &self.children { + match node { + Node::Group(g) => { + if let Some(e) = g.find_entry_by_uuid(id) { + return Some(e); + } + } + Node::Entry(e) => { + if e.uuid == id { + return Some(e); + } + } + } + } + None + } + + pub(crate) fn add_entry(&mut self, entry: Entry, location: &NodeLocation) { + if location.len() == 0 { + panic!("TODO handle this with a Response."); + } + + let mut remaining_location = location.clone(); + remaining_location.remove(0); + + if remaining_location.len() == 0 { + self.add_node(entry.clone()); + return; + } + + let next_location = &remaining_location[0]; + + println!( + "Searching for group {} {:?}", + next_location.name, next_location.uuid + ); + for node in &mut self.children { + if let Node::Group(g) = node { + if g.uuid != next_location.uuid { + continue; + } + g.add_entry(entry, &remaining_location); + return; + } + } + + // The group was not found, so we create it. + let mut new_group = Group { + name: next_location.name.clone(), + uuid: next_location.uuid.clone(), + ..Default::default() + }; + new_group.add_entry(entry, &remaining_location); + self.add_node(new_group); + } + + /// Merge this group with another group + pub fn merge(&mut self, other: &Group) -> Result { + let mut log = MergeLog::default(); + + // Handle entry relocation. + for (entry, entry_location) in other.get_all_entries(&vec![]) { + let existing_entry = match self.find_entry_by_uuid(entry.uuid) { + Some(e) => e, + None => continue, + }; + let existing_entry_location = match self.find_entry_location(entry.uuid) { + Some(l) => l, + None => continue, + }; + + let source_location_changed_time = match entry.times.get_location_changed() { + Some(t) => *t, + None => { + log.warnings.push(format!( + "Entry {} did not have a location updated timestamp", + entry.uuid + )); + Times::epoch() + } + }; + let destination_location_changed = match existing_entry.times.get_location_changed() { + Some(t) => *t, + None => { + log.warnings.push(format!( + "Entry {} did not have a location updated timestamp", + entry.uuid + )); + Times::now() + } + }; + if source_location_changed_time > destination_location_changed { + log.events.push(MergeEvent { + event_type: MergeEventType::EntryLocationUpdated, + node_uuid: entry.uuid, + }); + let removed_entry = self.remove_entry(&entry.uuid, &existing_entry_location)?; + self.insert_entry(entry.clone(), &entry_location)?; + } + } + + // Handle entry updates + for (entry, entry_location) in other.get_all_entries(&vec![]) { + if let Some(existing_entry) = self.find_entry_by_uuid(entry.uuid) { + if existing_entry == entry { + continue; + } + + let source_last_modification = match entry.times.get_last_modification() { + Some(t) => *t, + None => { + log.warnings.push(format!( + "Entry {} did not have a last modification timestamp", + entry.uuid + )); + Times::epoch() + } + }; + let destination_last_modification = + match existing_entry.times.get_last_modification() { + Some(t) => *t, + None => { + log.warnings.push(format!( + "Entry {} did not have a last modification timestamp", + entry.uuid + )); + Times::now() + } + }; + + if destination_last_modification == source_last_modification { + if !existing_entry.eq(&entry) { + // This should never happen. + // This means that an entry was updated without updating the last modification + // timestamp. + return Err( + "Entries have the same modification time but are not the same!" + .to_string(), + ); + } + continue; + } + + let mut merged_entry: Entry = Entry::default(); + let mut entry_merge_log: MergeLog = MergeLog::default(); + + if destination_last_modification > source_last_modification { + (merged_entry, entry_merge_log) = existing_entry.merge(entry)?; + } else { + (merged_entry, entry_merge_log) = entry.clone().merge(existing_entry)?; + } + + if existing_entry.eq(&merged_entry) { + continue; + } + + self.replace_entry(&merged_entry); + log.events.push(MergeEvent { + event_type: MergeEventType::EntryUpdated, + node_uuid: merged_entry.uuid, + }); + log = log.merge_with(&entry_merge_log); + } else { + self.add_entry(entry.clone(), &entry_location); + // TODO should we update the time info for the entry? + log.events.push(MergeEvent { + event_type: MergeEventType::EntryCreated, + node_uuid: entry.uuid, + }); + } + } + + // TODO handle deleted objects + Ok(log) + } + + // Recursively get all the entries in the group, along with their + // location. + pub(crate) fn get_all_entries( + &self, + current_location: &NodeLocation, + ) -> Vec<(&Entry, NodeLocation)> { + let mut response: Vec<(&Entry, NodeLocation)> = vec![]; + let mut new_location = current_location.clone(); + new_location.push(GroupRef { + uuid: self.uuid.clone(), + name: self.name.clone(), + }); + + for node in &self.children { + match node { + Node::Entry(e) => { + response.push((&e, new_location.clone())); + } + Node::Group(g) => { + let mut new_entries = g.get_all_entries(&new_location); + response.append(&mut new_entries); + } + } + } + response + } } impl<'a> Group { @@ -188,9 +615,12 @@ impl<'a> IntoIterator for &'a Group { #[cfg(test)] mod group_tests { - use super::Group; - use crate::db::Entry; - use crate::Database; + use std::{thread, time}; + + use super::{Entry, Group, GroupRef, Node, Times, Value}; + use crate::db::NodeRef; + use crate::{Database, DatabaseKey}; + use std::{fs::File, path::Path}; #[test] fn get() { @@ -208,4 +638,293 @@ mod group_tests { assert!(db.root.get(&["General", "Sample Entry #2"]).is_some()); assert!(db.root.get(&["General"]).is_some()); } + + #[test] + fn test_merge_idempotence() { + let mut destination_group = Group::new("group1"); + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + destination_group.add_node(entry); + + let mut source_group = destination_group.clone(); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + assert_eq!(destination_group.children.len(), 1); + // The 2 groups should be exactly the same after merging, since + // nothing was performed during the merge. + assert_eq!(destination_group, source_group); + + let mut entry = &mut destination_group.entries_mut()[0]; + entry.set_field_and_commit("Title", "entry1_updated"); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + let destination_group_just_after_merge = destination_group.clone(); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + // Merging twice in a row, even if the first merge updated the destination group, + // should not create more changes. + assert_eq!(destination_group_just_after_merge, destination_group); + } + + #[test] + fn test_merge_add_new_entry() { + let mut destination_group = Group::new("group1"); + let mut source_group = Group::new("group1"); + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + source_group.add_node(entry); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + assert_eq!(destination_group.children.len(), 1); + let new_entry = destination_group.find_entry_by_uuid(entry_uuid); + assert!(new_entry.is_some()); + assert_eq!( + new_entry.unwrap().get_title().unwrap(), + "entry1".to_string() + ); + + // Merging the same group again should not create a duplicate entry. + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + assert_eq!(destination_group.children.len(), 1); + } + + #[test] + fn test_merge_add_new_non_root_entry() { + let mut destination_group = Group::new("group1"); + let mut destination_sub_group = Group::new("subgroup1"); + destination_group.add_node(destination_sub_group); + + let mut source_group = destination_group.clone(); + let mut source_sub_group = &mut source_group.groups_mut()[0]; + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + source_sub_group.add_node(entry); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + let destination_entries = destination_group.get_all_entries(&vec![]); + assert_eq!(destination_entries.len(), 1); + let (created_entry, created_entry_location) = destination_entries.get(0).unwrap(); + println!("{:?}", created_entry_location); + assert_eq!(created_entry_location.len(), 2); + } + + #[test] + fn test_merge_add_new_entry_new_group() { + let mut destination_group = Group::new("group1"); + let mut destination_sub_group = Group::new("subgroup1"); + let mut source_group = Group::new("group1"); + let mut source_sub_group = Group::new("subgroup1"); + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + source_sub_group.add_node(entry); + source_group.add_node(source_sub_group); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + let destination_entries = destination_group.get_all_entries(&vec![]); + assert_eq!(destination_entries.len(), 1); + let (created_entry, created_entry_location) = destination_entries.get(0).unwrap(); + assert_eq!(created_entry_location.len(), 2); + } + + #[test] + fn test_merge_entry_relocation_existing_group() { + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + let mut destination_group = Group::new("group1"); + let mut destination_sub_group1 = Group::new("subgroup1"); + let mut destination_sub_group2 = Group::new("subgroup2"); + destination_sub_group1.add_node(entry.clone()); + destination_group.add_node(destination_sub_group1.clone()); + destination_group.add_node(destination_sub_group2.clone()); + + let mut source_group = destination_group.clone(); + assert!(source_group.get_all_entries(&vec![]).len() == 1); + + let mut removed_entry = source_group + .remove_entry( + &entry_uuid, + &vec![ + GroupRef { + uuid: destination_group.uuid.clone(), + name: "".to_string(), + }, + GroupRef { + uuid: destination_sub_group1.uuid.clone(), + name: "".to_string(), + }, + ], + ) + .unwrap(); + removed_entry.times.set_location_changed(Times::now()); + assert!(source_group.get_all_entries(&vec![]).len() == 0); + // FIXME we should not have to update the history here. We should + // have a better compare function in the merge function instead. + removed_entry.update_history(); + source_group + .insert_entry( + removed_entry, + &vec![ + GroupRef { + uuid: destination_group.uuid.clone(), + name: "".to_string(), + }, + GroupRef { + uuid: destination_sub_group2.uuid.clone(), + name: "".to_string(), + }, + ], + ) + .unwrap(); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + + let destination_entries = destination_group.get_all_entries(&vec![]); + assert_eq!(destination_entries.len(), 1); + let (moved_entry, moved_entry_location) = destination_entries.get(0).unwrap(); + assert_eq!(moved_entry_location.len(), 2); + assert_eq!(moved_entry_location[0].name, "group1".to_string()); + assert_eq!(moved_entry_location[1].name, "subgroup2".to_string()); + } + + #[test] + fn test_merge_entry_relocation_new_group() { + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + + let mut destination_group = Group::new("group1"); + let mut destination_sub_group = Group::new("subgroup1"); + destination_sub_group.add_node(entry.clone()); + destination_group.add_node(destination_sub_group); + + let mut source_group = destination_group.clone(); + let mut source_sub_group = Group::new("subgroup2"); + thread::sleep(time::Duration::from_secs(1)); + entry.times.set_location_changed(Times::now()); + // FIXME we should not have to update the history here. We should + // have a better compare function in the merge function instead. + entry.update_history(); + source_sub_group.add_node(entry.clone()); + source_group.children = vec![]; + source_group.add_node(source_sub_group); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + + let destination_entries = destination_group.get_all_entries(&vec![]); + assert_eq!(destination_entries.len(), 1); + let (created_entry, created_entry_location) = destination_entries.get(0).unwrap(); + assert_eq!(created_entry_location.len(), 2); + assert_eq!(created_entry_location[0].name, "group1".to_string()); + assert_eq!(created_entry_location[1].name, "subgroup2".to_string()); + } + + #[test] + fn test_update_in_destination_no_conflict() { + let mut destination_group = Group::new("group1"); + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + + destination_group.add_node(entry); + + let mut source_group = destination_group.clone(); + + let mut entry = &mut destination_group.entries_mut()[0]; + entry.set_field_and_commit("Title", "entry1_updated"); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + + let entry = destination_group.entries()[0]; + assert_eq!(entry.get_title(), Some("entry1_updated")); + } + + #[test] + fn test_update_in_source_no_conflict() { + let mut destination_group = Group::new("group1"); + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + destination_group.add_node(entry); + + let mut source_group = destination_group.clone(); + + let mut entry = &mut source_group.entries_mut()[0]; + entry.set_field_and_commit("Title", "entry1_updated"); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + + let entry = destination_group.entries()[0]; + assert_eq!(entry.get_title(), Some("entry1_updated")); + } + + #[test] + fn test_update_with_conflicts() { + let mut destination_group = Group::new("group1"); + + let mut entry = Entry::new(); + let entry_uuid = entry.uuid.clone(); + entry.set_field_and_commit("Title", "entry1"); + destination_group.add_node(entry); + + let mut source_group = destination_group.clone(); + + let mut entry = &mut destination_group.entries_mut()[0]; + entry.set_field_and_commit("Title", "entry1_updated_from_destination"); + + let mut entry = &mut source_group.entries_mut()[0]; + entry.set_field_and_commit("Title", "entry1_updated_from_source"); + + let merge_result = destination_group.merge(&source_group).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 1); + + let entry = destination_group.entries()[0]; + assert_eq!(entry.get_title(), Some("entry1_updated_from_source")); + + let merged_history = entry.history.clone().unwrap(); + assert!(merged_history.is_ordered()); + assert_eq!(merged_history.entries.len(), 3); + let merged_entry = &merged_history.entries[1]; + assert_eq!( + merged_entry.get_title(), + Some("entry1_updated_from_destination") + ); + + // Merging again should not result in any additional change. + let merge_result = destination_group.merge(&destination_group.clone()).unwrap(); + assert_eq!(merge_result.warnings.len(), 0); + assert_eq!(merge_result.events.len(), 0); + } } diff --git a/src/db/mod.rs b/src/db/mod.rs index 7cdbdced..db2ff3ba 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -212,6 +212,10 @@ impl Times { chrono::NaiveDateTime::from_timestamp_opt(now, 0).unwrap() } + pub fn epoch() -> NaiveDateTime { + chrono::NaiveDateTime::from_timestamp_opt(0, 0).unwrap() + } + pub fn new() -> Times { let mut response = Times::default(); let now = Times::now();