commit 3f0a9ce49062be96da2ad25be41a52f5aa4b09d1 Author: James Hoffman Date: Fri Dec 6 12:41:02 2024 -0700 Initial public commit diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..d41b2d5 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,1268 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +dependencies = [ + "anstyle", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-trait" +version = "0.1.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.85", +] + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "brotli" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bsdiff" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7f2e6c4f2a017f63b5a1fd7cc437f061b53a3e890bcca840ef756d72f6b72f2" + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" + +[[package]] +name = "cc" +version = "1.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.52.6", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.52.0", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +dependencies = [ + "libc", +] + +[[package]] +name = "crossterm" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" +dependencies = [ + "bitflags 1.3.2", + "crossterm_winapi", + "libc", + "mio", + "parking_lot", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + +[[package]] +name = "env_filter" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + +[[package]] +name = "file-time-machine" +version = "0.1.0" +dependencies = [ + "brotli", + "bsdiff", + "chrono", + "directories", + "env_logger", + "gumdrop", + "hex", + "indicatif", + "inquire", + "log", + "num_cpus", + "serde", + "serde_json", + "sha2", + "sha256", + "walkdir", + "xxhash-rust", +] + +[[package]] +name = "fuzzy-matcher" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54614a3312934d066701a80f20f15fa3b56d67ac7722b39eea5b4c9dd1d66c94" +dependencies = [ + "thread_local", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "gumdrop" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bc700f989d2f6f0248546222d9b4258f5b02a171a431f8285a81c08142629e3" +dependencies = [ + "gumdrop_derive", +] + +[[package]] +name = "gumdrop_derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "indicatif" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +dependencies = [ + "console", + "instant", + "number_prefix", + "portable-atomic", + "unicode-width", +] + +[[package]] +name = "inquire" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fddf93031af70e75410a2511ec04d49e758ed2f26dad3404a934e0fb45cc12a" +dependencies = [ + "bitflags 2.6.0", + "crossterm", + "dyn-clone", + "fuzzy-matcher", + "fxhash", + "newline-converter", + "once_cell", + "unicode-segmentation", + "unicode-width", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.161" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "newline-converter" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b6b097ecb1cbfed438542d16e84fd7ad9b0c76c8a65b7f9039212a3d14dc7f" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + +[[package]] +name = "object" +version = "0.36.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" + +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" + +[[package]] +name = "proc-macro2" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom", + "libredox", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.213" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.213" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.85", +] + +[[package]] +name = "serde_json" +version = "1.0.132" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha256" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18278f6a914fa3070aa316493f7d2ddfb9ac86ebc06fa3b83bffda487e9065b0" +dependencies = [ + "async-trait", + "bytes", + "hex", + "sha2", + "tokio", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +dependencies = [ + "libc", + "mio", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thiserror" +version = "1.0.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.85", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tokio" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +dependencies = [ + "backtrace", + "bytes", + "pin-project-lite", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.85", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.85", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "xxhash-rust" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a5cbf750400958819fb6178eaa83bee5cd9c29a26a40cc241df8c70fdd46984" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..39b9bc9 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "file-time-machine" +version = "0.1.0" +edition = "2021" +authors = ["Mizuki Zou + +

File Time Machine

+

+ A snapshotting program as a standalone application +
+

+ + [![Build Linux](https://github.com/timothyhay256/ftm/actions/workflows/build-linux.yml/badge.svg)](https://github.com/timothyhay256/ftm/actions/workflows/build-linux.yml) + [![Build Windows](https://github.com/timothyhay256/ftm/actions/workflows/build-windows.yml/badge.svg)](https://github.com/timothyhay256/ftm/actions/workflows/build-windows.yml) + [![.github/workflows/build-release.yml](https://github.com/timothyhay256/ftm/actions/workflows/build-release.yml/badge.svg)](https://github.com/timothyhay256/ftm/actions/workflows/build-release.yml) + [![Codacy Badge](https://app.codacy.com/project/badge/Grade/afcd3d438c764d18b85299e4c3691262)](https://app.codacy.com?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) + ![No AI](https://img.shields.io/badge/free_of-AI_code-blue) + + +> [!CAUTION] +> This program is NOT safe for regular usage, and will most likely result in data loss if used in such a way! This is my first Rust project, so it will be unstable! +> This program has been tested fairly well on Linux, but catastrophic bugs still may be present. +### What is this? +In order to start learning Rust, I decided to make a incremental snapshotting program, like Apples Time Machine, but in userspace and cross-platform. And so this is what this is. It allows you to take snapshots of folders, and restore these snapshots, allowing you to go backwards and forwards in time. So like Git, but easier to use, and less powerful. And with a messy codebase. And dangerous and data-loss prone. And slower. +### Installation +#### Linux +Arch: Install `todo` from the AUR, or use Cargo. Optionally, install `todo-gui` as well. +Others: Use cargo to install `file-time-machine` or download the binary from releases. To get the GUI, download it from the releases page and run it with Python. +#### Windows +Download the .msi file from the releases page, and run it. The program and gui will both be installed, and the gui can be launched from the start menu. +#### MacOS (UNTESTED!) +First of all, you already have time machine. +But if you want it anyway, use cargo to install `file-time-machine`. +#### Making +Clone/download the source code, and run the following commands: + - `cargo run --release` # if you just want to run the program/test it without installing it + - `cargo install --path .` # if you want to install the program to ~/.cargo/bin +### Configuration +Create a configuration file inside `~/.file-time-machine/config.json` to automatically reference it when running `ftm` without any arguments, or create one at any path you want and pass it with `-c`, and add the following content: +``` +[ + { + "folder_path": "/folder/path/you/want/to/snapshot", + "get_hashes": false, + "thread_count": 8, + "brotli_compression_level": 5, + "snapshot_mode": "fastest", + "its_my_fault_if_i_lose_data": false + } +] +``` +`folder_path` is the folder path that you want to take and restore snapshots inside of. +`get_hashes` is if you want to find modified files using hashes instead of a faster method such as change date/size. This is much slower. +`thread_count` is how many threads you want to use. Set this to 0 to automatically select a thread count based on your CPU core count. +`brotli_compression_level` is the compression level for snapshot files. As you go higher you will get better compression ratios, but much worse speeds. 5 seems to be a good level. Ranges from 1-11. +`its_my_fault_if_i_lose_data` is you agreeing that it is YOUR fault if you lose data by using this software, and not mine. Set it to true to skip the 5 second warning on each run. +`snapshot_mode` is the way to take snapshots. There are three modes, which are described in more detail below. *Currently ONLY fastest is supported! I might or might not add other modes later.* +`standard` is the normal method. It takes as little disk space as possible, but takes much longer to take snapshots or move backwards in time. If your files are small, this time difference won't be noticable. +`faster` is a mode that makes taking snapshots much faster, but results in increased disk space usage. This doesn't increase the speed of restoring backwards though. If you have the disk space and want the speed, this is a good option. +`fastest` is a mode that makes both taking snapshots much faster, and makes restoring backwards much faster. It does however use nearly twice the disk space as previous modes. +> [!WARNING] +> Once you select a snapshot mode, there is currently no way to switch to another one! + +If you want to pass a specific config file (to snapshot a different path for example), simply use the `-c` flag. + +### Usage +##### Note that .time (used for storing snapshots) and .git are ignored. The ability to specify directories to ignore will be added in the future. +#### GUI +If you are on Windows, launch File Time Machine. On Linux/MacOS, run the gui/gui.py script. +Once it has started, ensure the square in the top right is green and says "Found FTM binary!". Operation of the GUI is fairly self explanatory, but here are some details about it's operation. +**Select Folder**: Select the folder you want to create snapshots for. If the folder has been tracked, `folder_path/.time/gui-config.conf` will be checked for an config. If one is present, the program is ready for usage. If there is not one present, you will be prompted to select the config file location. If the folder has not been tracked, you will be prompted to start doing so. If you say yes, a simple config will be placed in `folder_path/.time/gui-config.conf`, and the program is ready for usage. +**Select Config**: If a config could not be autodetected, then you will need to specify the location of one manually. On Unix systems, the default one (the one used when no options are passed) should be at `~/.file-time-machine/config.json` +**Create Snapshot**: Pretty self explanatory. Creates a snapshot. A valid folder and config file must be selected however. +**Restore Snapshot**: Restores a snapshot. One must be selected in the main box. +##### Issues +If you have any issues, you can check the console for further output. Additionally, the console will show the progress of creating a snapshot, while the GUI does not provide it. The console should open automatically on Windows. +#### CLI +Once you have finished configuration, run `ftm` to collect the initial run of metadata. (Or if specifying a config file `ftm -c /path/to/config`, it will be the same) +On this run, a compressed copy of each file will be created, along with any other metafiles needed. These will be stored in `.time`. +After this initial run, make some changes! You can create new files, delete old ones, and modify existing ones. Now run `ftm` again to create a snapshot. On this run, every file that has been changed will get a diff created between it, and the original file. This can be used to restore yourself to this state in time. +Every time that you run `ftm` and changes have been detected, a new snapshot will be created. +In order to restore a snapshot, first create one with `ftm` so you don't lose any working changes, then run `ftm restore`, and select the snapshot you wish to restore. Optionally, you can also use `ftm restore --restore-index n` to restore the nth snapshot. (Starting at 1 being oldest) +You can safely make changes while a snapshot is restored, but they will be overwritten when a snapshot is restored. You can also safely create additional snapshots while one is restored. + +In order to return to the present, run `ftm restore` and select the most recent snapshot. + +### Notes +In the future, I want to make a daemon that tracks various folders and creates snapshots in defined increments of time. +Until then, you can pass a config file to the binary in order to use those specific paths and settings. This means you can track multiple directories, you just have to have multiple config files. + +Since all snapshots and associated data is stored within the `.time` directory in the target directory, if you want to reset the timeline of snapshots, simply remove the folder. Just know that if you do so, ALL past snapshots and changes will be lost, and if you are currently in the "past" you will NOT be able to go back to the future! + +### How does it work +Please see (unfinished) for more details on how it actually works. Below is only for the unimplemented regular mode. +#### Regular mode +Let our demo folder contain two files. `demo/test` and `demo/other`. +We modify `demo/test`, and take a new snapshot, and we have two patch files: +`.time/000` and `.time/000-reverse` (note that the ID is actually a hash from the date and path). +`.time/000` is created from a empty file, and the new file. It is thus our compressed copy of the current version of the file. Using this on a empty file will yield the file in the state it was in when the snapshot was taken. +`.time/000-reverse` is a placebo, there is nothing inside it. This is because we would never want to go from our first version of the file, to nothing. When read by `restore.rs`, it will be ignored. + +Now we will modify `demo/test`, and then take another snapshot. This is where things get interesting. What we will now do, is load `.time/000-reverse` and `demo/test` to memory, and then attempt to apply `000-reverse` to `demo/test` and keep it in a new variable, lets say `ref`. But, remember that `000-reverse` is not a valid patch file (since we never want to go from a real file to a empty file), so as a reference we will need to use `000` and apply it to a empty "file", yielding the original file. So now `ref` is our original file. Now we take our `demo/test` we loaded to memory, and create two new patches; `001` which is made from `ref` as old and `demo/test` as new (allowing us to recover `demo/test` given `ref`), and `001-reverse` which is created in reverse, alllowing us to recover `ref` given `demo/test`. + +Now we will make one more modification to `demo/test`, and take just one more snapshot. This let's us explain what happens when our `-reverse` IS valid, which was not the case last time. All further snapshots will follow the formula of this specific snapshot. + +We want to make two patches once again, so we will load `.time/001-reverse` and `demo/test` to memory, and apply `001-reverse` to `test`. Since `001-reverse` IS valid this time, we will yield the version of the file right before the last snapshot, AKA the original file. So now `ref` is our original file. And again we take `demo/test` in memory and create two more patches, `002` from `ref` as old and `demo/test` as new (which again allows us to recover `demo/test` given `ref`) and `002-reverse` which recovers `ref` given `demo/test`. + +#### Restoring backwards +Ok, finally we can get to restoring a snapshot. At this point we have 3 snapshots, so let's try to restore our very first one. + +Once it is selected, we see that there is no `activeSnapshot` so we can assume we are in the past. We check the snapshots, and see that there are two snapshots to restore in order to reach our target snapshot, so we restore the second one we took. + +For our first snapshot to restore, the only changed file is `demo/test`, and it is associated with snapshot `002`. Since we are moving into the past, we want to recover `demo/test` at the time of the snapshot given `ref`, so we are going to use `002`. Now we take the patch entry and check the reference patch. It is `001-reverse`. So now we take `demo/test` and load it to memory, and apply `001-reverse`, giving us `ref`, which is identical to the `ref` we got while making that snapshot. Now we can apply `001` to `ref`, giving us our target state. We are now half way to our target snapshot state. + +For our second snapshot, once again the only changed file is `demo/test`, which is this time associated with snapshot `001`. We are again moving into the past, so we will want to recover `ref` from our first snapshot, and so we look at what our reference patch is. We see that it is `000-reverse`, which when read, is not a valid patch file. Since it is not, we will load `000` to memory, and apply it against a empty "file", yielding the target file. But wait- why did we even do that last thing if we could just have just done this, yielding the target file instantly? Because this is a special case where `000-reverse` was not valid. So that last step was not needed. But in a case where the initial state was not the target, we would still have needed that step, since all the patches at that point were created with that reference in mind. + +#### Restoring forwards + +Now lets restore our third snapshot, so we can return to our normal state. +We check the `activeSnapshot` and see that the target is in the future, and we will need to restore two snapshots to get there. Since we are restoring into the future, no references will be necessary, since the patch right in front of the current snapshot used our state as a reference. This means only one patch per patch, instead of two like when restoring backwards! But right before doing any restoring, we will need to check `000-reverse` to make sure it isn't a invalid patch. And what would you know, it is! What this means is that the final target snapshot actually does use our current file state as a ref, since it couldn't do it with the `-reverse` file. This saves us a step, and means we can go directly to the target! + +Great, now lets go to the final, and target snapshot. We load `demo/test` to memory, check if `001-reverse` is valid, see that it is, and determine that we can safely directly apply the patch to the file, so we loa dup `002` to it, yielding our target file. + +Ok, but let's just go over a case where we do have another snapshot ahead, just for examples sake. Ok, so we have a snapshot `003` that has a reference of two snapshots ago, since we restored + + +### .time structure +The .time folder contains all the information related to snapshots of the directory. Inside are 3 `json` files: + - `metadata.json` - This contains stored metadata for every file (date changed, file size, and optionally hash), and is used to detect changed files. + - `patches.json` - Every time a patch is created, the ID (more on that below) and reference patch that was used will be stored here. And of course the target path. There is a layer of abstraction in `diffs.rs` that will handle this file. + - `snapshots.json` - Every time a snapshot is created, every patch that was created and its target path is stored in here. + + Whenever a patch of a file is created, two files will be created. They will be named `ID` and `ID-reverse`. The way the `ID` is generated is by taking the current date and target path, and creating a SHA256 hash from them. This way every patch will have a unique path within `.time` and the path can be easily generated from the `patches.json` file. The way the actual patch is generated is by creating a "patch" from the old (usually a reference in memory) and new (current file), and compressing it with brotli. The `reverse` patch is created in the opposite direction. + + `ID` is just a diff between the old file (which can either be a empty file on the first snapshot or a reference patched file), and `ID-reverse` is just a diff between the new file and old file, allowing us to travel in reverse (since patches are not reversible with `bsdiff`.) + + When we restore a snapshot, we want to check if the snapshot is in the past (relative to the current "state/date"), so we store this in `.time/activeSnapshot`. And if none exists, we can safely assume the most recent snapshot is the current state. Otherwise, everytime a snapshot is restored, we write the snapshot date to this file. + +### Modes explanation + + #### Standard + When a snapshot is created, we will restore upwards from the initial patch, and then create only a forward snapshot. This means only one patch is needed per patch. This however also means we can't truly move backwards into the past, we have to restore upwards from the initial snapshot until we reach our target. + #### Faster and bigger + This is the same as the fastest and biggest approach (see below), except for one thing: The reference is always just the initial stored copy of the file. This means creating snapshots is much much faster, but it also means we don't get any potential reduced disk usage due to deduplication. + #### Fastest and biggest + This is the same as the broken approach, except that to generate a reference, we will need to restore up to the most recent version, and use that. Then, we create two patches like before. This means that going forward is faster, but much more storage is required. + +### Notes +You can place any files you want to inside `demo.bak`, and then run `test.sh`. Just don't remove `config.json` or the test script will break. +Multiple snapshots will be made with various folders that already exist within the repository, and then restoring each of those snapshots will be tested for accuracy. All files will be checksummed as a way to ensure the program is working properly. + +### TODO +Hashing: Use xxhash for file hashing since it is so bloody fast. Currently used to verify existing files. +Optionally change .time location. +Be able to ignore directories, like a .gitignore diff --git a/build/file-time-machine.wxs b/build/file-time-machine.wxs new file mode 100755 index 0000000..3bbf50e --- /dev/null +++ b/build/file-time-machine.wxs @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/demo.bak/config.json b/demo.bak/config.json new file mode 100644 index 0000000..ea546eb --- /dev/null +++ b/demo.bak/config.json @@ -0,0 +1,10 @@ +[ + { + "folder_path": "./demo", + "get_hashes": false, + "thread_count": 0, + "brotli_compression_level": 5, + "snapshot_mode": "fastest", + "its_my_fault_if_i_lose_data": true + } +] diff --git a/gui/button_1.png b/gui/button_1.png new file mode 100644 index 0000000..70e471a Binary files /dev/null and b/gui/button_1.png differ diff --git a/gui/button_2.png b/gui/button_2.png new file mode 100644 index 0000000..9d2820f Binary files /dev/null and b/gui/button_2.png differ diff --git a/gui/button_3.png b/gui/button_3.png new file mode 100644 index 0000000..92300c9 Binary files /dev/null and b/gui/button_3.png differ diff --git a/gui/button_4.png b/gui/button_4.png new file mode 100644 index 0000000..b572f29 Binary files /dev/null and b/gui/button_4.png differ diff --git a/gui/gui.py b/gui/gui.py new file mode 100644 index 0000000..2ef1f13 --- /dev/null +++ b/gui/gui.py @@ -0,0 +1,324 @@ + +# This file was generated by the Tkinter Designer by Parth Jadhav +# https://github.com/ParthJadhav/Tkinter-Designer +# And modified by Mizuki Zou +# This was not made with quality in mind, but rather as a quick and dirty GUI. Keep this in mind while using it. + + +from pathlib import Path +from shutil import which +import os +import json +import time +import subprocess +import threading + +# from tkinter import * +# Explicit imports to satisfy Flake8 +from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage, filedialog, messagebox, Listbox, LEFT, BOTH, Scrollbar, RIGHT, END, HORIZONTAL, Toplevel +from tkinter.ttk import Progressbar +from tkinter.filedialog import askopenfilename + + +OUTPUT_PATH = Path(__file__).parent +ASSETS_PATH = OUTPUT_PATH / Path(r".") + +config_path = "" +target_path = "" + +windows_paths = ["C:\\Progam Files\\File Time Machine\\ftm.exe", "C:\\Program Files (x86)\\File Time Machine\\ftm.exe"] +path_windows = "" + +if os.path.exists(windows_paths[0]): + path_windows = windows_paths[0] +elif os.path.exists(windows_paths[1]): + path_windows = windows_paths[1] + +platform = os.name # nt for Windows, posix for Linux. + +def relative_to_assets(path: str) -> Path: + return ASSETS_PATH / Path(path) + +def select_dir_or_file(dir: bool): # If dir, we know it is the target path. Otherwise, it is the config file + global config_path, target_path, exists + if not exists: + messagebox.showerror("No binary", "FTM binary not found! You need to install it before using the gui.") + return + + if dir: + messagebox.showwarning("Warning!", "This software is NOT stable, and will probably result in data loss if you use it!") + folder_selected = filedialog.askdirectory() + target_path = folder_selected + if folder_selected != "": + print(folder_selected) + window.title(folder_selected) + + if not os.path.isdir(folder_selected+"/.time"): + if messagebox.askquestion('Config file','This folder is not currently being tracked, do you want to begin tracking it? A config folder will be created for you, and default settings will be applied. (No hashing, compression level 5, multithreading enabled)'): + os.mkdir(folder_selected+"/.time") + print("Starting to track "+folder_selected) + if platform == "posix": + config_path = folder_selected+"/.time/gui-config.json" + else: + config_path = folder_selected+"\\.time\\gui-config.json" + config_file = open(config_path, 'w') + print("Writing config to "+str(config_file)) + config_file.write('''[ +{ + "folder_path": "'''+folder_selected+'''", + "get_hashes": false, + "thread_count": 0, + "brotli_compression_level": 5, + "snapshot_mode": "fastest", + "its_my_fault_if_i_lose_data": true +} +]''') + config_file.close() + else: + if not os.path.exists(folder_selected+"/.time/gui-config.json"): + print(folder_selected+"/.time/gui-config.json") + messagebox.showinfo("No config", "Could not find a config file, please specify one") + else: + config_path = folder_selected+"/.time/gui-config.json" + get_snap_list() + else: + config_path = askopenfilename() + print(config_path) + +def get_snap_list(): + global listbox, target_path + listbox.delete(0, END) + print(target_path+'/.time/snapshots.json') + if os.path.exists(target_path+'/.time/snapshots.json'): + with open(target_path+'/.time/snapshots.json') as f: + d = json.load(f) + for i in range(len(d)): + print(d[i]["date_created"]) + listbox.insert(END, d[i]["date_created"]) + else: + messagebox.showinfo("No Snapshots", "Did not find any snapshots to list.") + +def create_snapshot(): + global config_path + if config_path == "": + messagebox.showerror("Select folder", "You need to select a folder before you can create a snapshot!") + return + output = "" + progress_window = Toplevel() + progress_window.resizable(width=False, height=False) + progress_window.title("Creating snapshot...") + progress_window.geometry("300x100") + + # Create a progress bar in the new window + progress = Progressbar(progress_window, orient=HORIZONTAL, length=280, mode='indeterminate') + progress.pack(pady=20) + if platform == "posix": + print("Running command 'ftm -c "+config_path+"'") + progress.start() + p1 = subprocess.Popen(['ftm', '-c', config_path], stdout=subprocess.PIPE) + else: + print("Running command '"+path_windows+" -c "+config_path+"'") + progress.start() + p1 = subprocess.Popen([path_windows, '-c', config_path], stdout=subprocess.PIPE) + # p1 = subprocess.Popen(['sleep', '3'], stdout=subprocess.PIPE) + output = p1.communicate()[0] + print(output) + progress.stop() + progress_window.destroy() + if "No files changed" in str(output): + messagebox.showwarning("No changed files", "There were no changed files, so I cannot take a snapshot!") + get_snap_list() + +def restore_snapshot(): + global listbox + # print(listbox.curselection()[0]) + selection = listbox.curselection()[0]+1 + if listbox.curselection() == (): + messagebox.showerror("No snapshot", "No snapshot is selected!") + return + progress_window = Toplevel() + progress_window.resizable(width=False, height=False) + progress_window.title("Restoring snapshot") + progress_window.geometry("300x100") + + # Create a progress bar in the new window + progress = Progressbar(progress_window, orient=HORIZONTAL, length=280, mode='indeterminate') + progress.pack(pady=20) + progress.start() + print("Running 'ftm -c "+config_path+" restore --restore-index "+str(selection)+"'") + p1 = subprocess.Popen( + ['ftm', '-c', config_path, 'restore', '--restore-index', str(selection)], stdout=subprocess.PIPE) + output = p1.communicate()[0] + print(output) + progress.stop() + progress_window.destroy() + if "Finished restoring" not in str(output): + messagebox.showerror("Error", "There was an issue restoring a snapshot! Error: "+str(output)) + +window = Tk() + +window.geometry("443x428") +window.configure(bg = "#313244") + +if platform == "posix": + exists = which("ftm") +else: + exists = os.path.exists(path_windows) + +canvas = Canvas( + window, + bg = "#313244", + height = 428, + width = 443, + bd = 0, + highlightthickness = 0, + relief = "ridge" +) + +canvas.place(x = 0, y = 0) +canvas.create_text( + 90.0, + 19.0, + anchor="nw", + text="FTM-GUI", + fill="#CDD6F4", + font=("Jost Regular", 32 * -1) +) + +if exists: + canvas.create_rectangle( + 228.0, + 20.0, + 416.0, + 70.0, + fill="#A6E3A1", + outline="") + + canvas.create_text( + 260.0, + 32.0, + anchor="nw", + text="Found ftm binary!", + fill="#000000", + font=("Jost Regular", 15 * -1) + ) +else: + canvas.create_rectangle( + 228.0, + 20.0, + 416.0, + 70.0, + fill="#e78284", + outline="") + + canvas.create_text( + 250.0, + 32.0, + anchor="nw", + text="No ftm binary found!", + fill="#000000", + font=("Jost Regular", 15 * -1) + ) + +listbox = Listbox(window, bg="#9399B2", selectmode='single') +scrollbar = Scrollbar(window, bg="#9399B2") +# Create scrollbox for list of snapshots +listbox.place(x=29, y=154, width=387, height=209) # Listbox within the rectangle +scrollbar.place(x=416, y=154, height=209) # Scrollbar on the right side of the Listbox + +# Attach the Listbox to the Scrollbar +# for values in range(100): +# listbox.insert(END, values) + +listbox.config(yscrollcommand=scrollbar.set) +scrollbar.config(command=listbox.yview) +canvas.create_rectangle( # Scrollbox rectangle + 29.0, + 154.0, + 416.0, + 363.0, + fill="#9399B2", + outline="") + +canvas.create_text( + 29.0, + 128.0, + anchor="nw", + text="Available snapshots", + fill="#CDD6F4", + font=("Jost Regular", 15 * -1) +) + +button_image_1 = PhotoImage( + file=relative_to_assets("button_1.png")) +button_1 = Button( + image=button_image_1, + borderwidth=0, + highlightthickness=0, + command=lambda: threading.Thread(target=restore_snapshot, daemon=True).start(), + relief="flat" +) +button_1.place( + x=26.0, + y=376.0, + width=196.0, + height=27.0 +) + +button_image_2 = PhotoImage( + file=relative_to_assets("button_2.png")) +button_2 = Button( + image=button_image_2, + borderwidth=0, + highlightthickness=0, + command=lambda: threading.Thread(target=create_snapshot, daemon=True).start(), + relief="flat" +) +button_2.place( + x=225.0, + y=376.0, + width=194.0, + height=26.0 +) + +button_image_3 = PhotoImage( + file=relative_to_assets("button_3.png")) +button_3 = Button( + image=button_image_3, + borderwidth=0, + highlightthickness=0, + command=lambda: select_dir_or_file(True), + relief="flat" +) +button_3.place( + x=17.0, + y=86.0, + width=194.0, + height=26.0 +) + +button_image_4 = PhotoImage( + file=relative_to_assets("button_4.png")) +button_4 = Button( + image=button_image_4, + borderwidth=0, + highlightthickness=0, + command=lambda: select_dir_or_file(False), + relief="flat" +) +button_4.place( + x=225.0, + y=86.0, + width=194.0, + height=26.0 +) + +image_image_1 = PhotoImage( + file=relative_to_assets("image_1.png")) +image_1 = canvas.create_image( + 42.0, + 42.0, + image=image_image_1 +) +window.resizable(False, False) +window.mainloop() diff --git a/gui/image_1.png b/gui/image_1.png new file mode 100644 index 0000000..426be47 Binary files /dev/null and b/gui/image_1.png differ diff --git a/gui/logo.ico b/gui/logo.ico new file mode 100644 index 0000000..2930374 Binary files /dev/null and b/gui/logo.ico differ diff --git a/logo.png b/logo.png new file mode 100644 index 0000000..3cf08a0 Binary files /dev/null and b/logo.png differ diff --git a/src/compression.rs b/src/compression.rs new file mode 100644 index 0000000..fdbbf38 --- /dev/null +++ b/src/compression.rs @@ -0,0 +1,26 @@ +use brotli::{CompressorWriter, Decompressor}; +use std::io; +use std::io::Read; +use std::io::Write; + +pub fn compress_data(input: Vec, compression_level: u32) -> io::Result> { + // Create a Vec to hold the compressed data + let mut compressed_data = Vec::new(); + { + let mut compressor = + CompressorWriter::new(&mut compressed_data, 4096, compression_level, 22); + compressor.write_all(&input)?; + compressor.flush()?; + } // The compressor goes out of scope here, and its resources are released. + + Ok(compressed_data) +} + +pub fn decompress_data(compressed: Vec) -> io::Result> { + let mut decompressed_data = Vec::new(); + { + let mut decompressor = Decompressor::new(&compressed[..], 4096); + decompressor.read_to_end(&mut decompressed_data)?; + } + Ok(decompressed_data) +} diff --git a/src/diffs.rs b/src/diffs.rs new file mode 100644 index 0000000..72deb09 --- /dev/null +++ b/src/diffs.rs @@ -0,0 +1,869 @@ +use std::error::Error; +use std::collections::HashSet; +use std::sync::{Arc, Mutex}; +use sha2::{Digest, Sha256}; +use std::path::Path; +use std::fs::File; +use bsdiff::diff; +use walkdir::WalkDir; +use std::fs::metadata; +use std::{io, time::UNIX_EPOCH}; +use std::io::ErrorKind; +use indicatif::{MultiProgress, ProgressBar, ProgressStyle, ProgressState}; +use std::thread; +use chrono::DateTime; +use std::io::Write; +use std::io::Read; +use std::process; +use log::debug; +use xxhash_rust::xxh3::xxh3_64; + +use crate::compression; +use crate::restore; +use crate::DiffEntry; +use crate::MetaFile; +use crate::ModifiedList; + +pub fn create_diff( // Never call this on a directory. Do checks outside of the function + mut old_file: String, + new_file: String, + target_path: String, + time_dir: String, + ref_patch: String, + old_raw: Vec, + compression_level: u32, + patch_store: &Arc>>, + create_reverse: bool, +) -> Result> { + /* This handles everything related to creating a diff, including storing its metadata/location. + If old_raw is set, then we will use it as the target file. Will create a forward diff and backward diff. + Backward diff will be {diff_id}-reverse. Every diff is compressed with brotli before being written. + */ + // println!("create_diff called"); + // println!("New: {new_file}"); + // println!("Old: {old_file}"); + let mut sha256 = Sha256::new(); + let old: Vec; + let current_time: String = chrono::offset::Local::now().to_string(); + + if !Path::new(&old_file).exists() || !Path::new(&old_file).is_file() { + // In this case, we assume there is a new file, so old_file is directed to an empty file + old_file = time_dir.clone() + "/tmp_empty"; + } + + if !old_raw.is_empty() { + // Handle case where old is stored in memory + debug!("create_diff: Old stored in memory!"); + old = old_raw; + } else { + old = std::fs::read(old_file.clone()).unwrap_or_else(|_| panic!("Could not open {old_file}!")); + } + // println!("Old file is {}", old_file); + let new = std::fs::read(new_file.clone()).unwrap_or_else(|_| panic!("Could not open {new_file}!")); + + sha256.update(current_time.clone() + &target_path); // Generate an ID to identify the patch. This can be derived from the data stored in DiffEntry, which can then be used to identify where the patch file is. + let patch_id: String = format!("{:X}", sha256.finalize()); + + let mut patch_target = File::create(Path::new(&(time_dir.clone() + "/" + &patch_id))).unwrap_or_else(|_| panic!("Could not create patch_target at {}", + time_dir.clone() + "/" + &patch_id)); + if create_reverse { + debug!("Creating reverse!"); + let mut patch_target_reverse = + File::create(Path::new(&(time_dir.clone() + "/" + &patch_id + "-reverse"))).unwrap_or_else(|_| panic!("Could not create patch_target at {}", + time_dir.clone() + "/" + &patch_id)); + + let mut patch_reverse = Vec::new(); + // println!("{:?}", new); + // println!("{:?}", old); + diff(&new, &old, &mut patch_reverse)?; + // println!("Compressing reverse..."); + + let temp_compressed = compression::compress_data(patch_reverse, compression_level)?; + // let elapsed = now.elapsed(); + // println!("Compressing reverse: {:.2?}", elapsed); + + patch_target_reverse + .write_all(&temp_compressed) + .expect("Unable to write to patch file!"); + } else { + debug!("Creating false reverse!"); + let mut patch_target_reverse = + File::create(Path::new(&(time_dir.clone() + "/" + &patch_id + "-reverse"))).unwrap_or_else(|_| panic!("Could not create patch_target at {}", + time_dir.clone() + "/" + &patch_id)); + write!(patch_target_reverse, ":3").unwrap_or_else(|_| panic!("There was an issue writing to {}!", time_dir.clone() + "/" + &patch_id + "-reverse")); + } + + let mut patch = Vec::new(); + + + + // let now = Instant::now(); + diff(&old, &new, &mut patch)?; + // let elapsed = now.elapsed(); + // println!("Diff calc: {:.2?}", elapsed); + + // let now = Instant::now(); + // println!("Compressing patch..."); + let temp_compressed = compression::compress_data(patch, compression_level)?; + // let elapsed = now.elapsed(); + // println!("Compressing orig: {:.2?}", elapsed); + + patch_target + .write_all(&temp_compressed) + .expect("Unable to write to patch file!"); + + // let now = Instant::now(); + + // let mut writer = brotli::Compressor::new(&mut io::stdout(), 4096, 4, 20); + let patch_store_file = time_dir.clone() + "/patches.json"; + + let patch_entry = DiffEntry { + date_created: current_time, + target_path, + ref_patch, + }; + + { + let mut patch_store = patch_store.lock().unwrap(); + patch_store.push(patch_entry); + + let json = + serde_json::to_string_pretty(&*patch_store).expect("Unable to serialize metadata!"); + let mut file = File::create(Path::new(&patch_store_file)).unwrap_or_else(|_| panic!("Unable to create metadata file at {patch_store_file}")); + file.write_all(json.as_bytes()).unwrap_or_else(|_| panic!("Unable to write to metadata file at {patch_store_file}")); + } + Ok(patch_id) +} + +pub fn get_diffs( + check_hash: bool, + metadata_holder: &HashSet, + folder_path: &str, +) -> Result, Box> { + + let mut different_files: HashSet = HashSet::new(); + let mut temp_hold: HashSet = HashSet::new(); + let mut current_files: HashSet = HashSet::new(); + debug!("folder_path is {folder_path}"); + for entry in WalkDir::new(folder_path) { + let entry = entry?; + let path = entry.path(); + // debug!("{:?}", path); + if let Some(path_str) = path.to_str() { + if !path_str.contains(".time") && !path_str.contains(".git") && path_str != folder_path { + current_files.insert(ModifiedList { + path: path_str.to_string(), + exists: true, + modified: false, // We don't know yet, but we will change this if needed. false will be the default. + }); + } + } else { + // Handle the case where the path is not valid UTF-8 + eprintln!("Error: Path is not valid UTF-8: {}", path.display()); + } + } + for path in metadata_holder.iter() { + temp_hold.insert(ModifiedList { + path: path.path.to_string(), + exists: true, + modified: false, + }); + } + + for path in current_files.iter() { + if !temp_hold.contains(&ModifiedList { + path: path.path.clone(), + exists: true, + modified: false, + }) { + debug!("Found new file:{}", path.path.clone()); + different_files.insert(ModifiedList { + path: path.path.clone(), + exists: true, + modified: true, + }); + } + } + for meta in metadata_holder.iter() { + // println!("Got: {}", meta.path); + match metadata(&meta.path) { + Ok(metadata) => { + // File exists, continue + // let metadata = metadata(&meta.path)?; + // Get the modification time from the metadata + let modified_time = metadata.modified()?; // Replace ? with proper error handling if we want to do it here. Otherwise, we handle it outside the function. + + // Convert SystemTime to UNIX epoch + let duration_since_epoch = modified_time.duration_since(UNIX_EPOCH)?; + let epoch_seconds = duration_since_epoch.as_secs(); + // Checking date modified and size is prioritized over hash since it is much faster. + // if Path::new(&meta.path.clone()).is_file() { + // Ensure the parent directory is not counted as updated file + if epoch_seconds != meta.date_modified { + // Check if file is modified using date modified + debug!( + "File is different: {} (discovered using modify date)", + meta.path + ); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: true, + modified: true, + }); + } else if metadata.len() != meta.size { + // If date modified is the same, check if file size has changed + debug!("File is different: {} (discovered using size)", meta.path); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: true, + modified: true, + }); + } else if check_hash { + // check_hash enabled, check hash as last resort + if hash(&meta.path)? != meta.hash { + debug!("File is different: {} (discovered using hash)", meta.path); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: true, + modified: true + }); + } else { + // println!("Confirmed file is not modified. (Used hash)"); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: true, + modified: false, + }); + } + } else { + // println!("Confirmed file is not modified. (Used modify date and size)"); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: true, + modified: false, + }); + } + // } else if meta.path != folder_path { + // // println!("insert {}", meta.path); + // different_files.insert(ModifiedList { + // path: meta.path.clone(), + // exists: true, + // modified: true, + // }); + // } + } + Err(error) => match error.kind() { + ErrorKind::NotFound => { + debug!("File no longer exists: {}", meta.path); + different_files.insert(ModifiedList { + path: meta.path.clone(), + exists: false, + modified: true, + }); + } + other_error => { + panic!( + "Problem reading file: {} with error: {}", + meta.path, other_error + ); + } + }, + } + } + // println!("{:?}", different_files); + Ok(different_files) +} + +pub fn update_metadata( + metadata_holder: &mut HashSet, + modified_list: &HashSet, + hash_enabled: bool, +) -> Result<(), Box> { + // Update metadata with modified_list to update data. + let mut paths_to_update = Vec::new(); // Paths that need updating + let mut temp_hold: HashSet = HashSet::new(); + let mut updated_files = HashSet::new(); // Temp set to hold elements that we will add at the end + + // for meta in metadata_holder.iter() { + // let item_to_check = ModifiedList { path: meta.path.clone(), exists: true }; + + // if modified_list.contains(&item_to_check) { + // paths_to_update.push(meta.path.clone()); // Collect paths that need updates + // } + // } + for path in metadata_holder.iter() { + temp_hold.insert(ModifiedList { + path: path.path.to_string(), + exists: true, + modified: false, + }); + } + + for path in modified_list.iter() { + if temp_hold.contains(&ModifiedList { + path: path.path.clone(), + exists: true, + modified: false, + }) { + if path.exists { + paths_to_update.push(path.path.clone()); + } + } else if !temp_hold.contains(&ModifiedList { + path: path.path.clone(), + exists: false, + modified: false, + }) { + paths_to_update.push(path.path.clone()); + } + } + + // for path in modified_list.iter() { + // paths_to_update.push(path.path.clone()); + // } + + println!("Finished generating list. Recalculating metadata..."); + // debug!("{:?}", modified_list); + // println!("{:?}", modified_list); + { + let mut modified_files = false; + for modified in modified_list { + if modified.modified { + modified_files = true; + break + } + } + if !modified_files { + println!("No files changed, nothing to do!"); + process::exit(1); + } + } + + for path in paths_to_update { + let _hash_str: String = Default::default(); + if hash_enabled { + let _hash_str: String = hash(&path).unwrap_or_else(|_| panic!("There was a unhandled issue getting the hash of {path}")); + } else { + let _hash_str: String = "".to_string(); + } + let file_metadata = metadata(&path)?; + let size = file_metadata.len(); // Get file size + + // Get the modification time from the metadata + let modified_time = file_metadata.modified()?; + + // Convert SystemTime to UNIX epoch + let duration_since_epoch = modified_time.duration_since(UNIX_EPOCH)?; + let epoch_seconds = duration_since_epoch.as_secs(); + + let updated_meta_file = MetaFile { + date_modified: epoch_seconds, + hash: _hash_str, + size, + path: path.clone(), + }; + + // Remove the old element + metadata_holder.retain(|meta| meta.path != path); + + // Insert the updated element + updated_files.insert(updated_meta_file); // updated_files gets extended at the end + } + + metadata_holder.extend(updated_files); + + let paths_to_remove: HashSet<_> = metadata_holder + .iter() + .filter_map(|meta| { + let item_to_check = ModifiedList { + path: meta.path.clone(), + exists: false, + modified: true, + }; + if modified_list.contains(&item_to_check) { + Some(meta.path.clone()) + } else { + None + } + }) + .collect(); + + metadata_holder.retain(|meta| !paths_to_remove.contains(&meta.path)); + + Ok(()) +} + +pub fn get_properties( + folder_path: &str, + mut metadata_holder: HashSet, + hash_enabled: bool, +) -> Result, Box> { + let mut file_count = 0; + let mut file_index = 0; + + for _entry in WalkDir::new(folder_path) { + file_count += 1; + } + + let pb = ProgressBar::new(file_count); + pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos:>3}/{len:3} ({eta})") + .unwrap() + .with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()) + .progress_chars("#>-")); + + for entry in WalkDir::new(folder_path) { + file_index += 1; + pb.set_position(file_index); // Update progress bar. + + let entry = entry?; + let path = entry.path(); + + // Convert Path to &str + if let Some(path_str) = path.to_str() { + if !path_str.contains(".time") && !path_str.contains(".git") && path_str != folder_path { + // Use the path as a &str + let _hash_str: String = Default::default(); + if hash_enabled { + let _hash_str: String = hash(path_str).unwrap_or_else(|_| panic!("There was a unhandled issue getting the hash of {path_str}")); + } else { + let _hash_str: String = "".to_string(); + } + let metadata = metadata(path)?; + let size = metadata.len(); // Get file size + + // Get the modification time from the metadata + let modified_time = metadata.modified()?; + + // Convert SystemTime to UNIX epoch + let duration_since_epoch = modified_time.duration_since(UNIX_EPOCH)?; + let epoch_seconds = duration_since_epoch.as_secs(); + // println!("{}", size); + // println!("{}", epoch_seconds); + // println!("{}", path_str); + + let meta_file = MetaFile { + date_modified: epoch_seconds, + hash: _hash_str, + size, + path: path_str.to_string(), + }; + metadata_holder.insert(meta_file); + } + // metadata_holder.push(MetaFile {hash: hash}); + } else { + // Handle the case where the path is not valid UTF-8 + eprintln!("Error: Path is not valid UTF-8: {}", path.display()); + } + } + pb.finish(); + Ok(metadata_holder) +} + +pub fn hash(path: &str) -> Result> { + // println!("hash called"); + let mut file = match File::open(Path::new(path)) { + Ok(file) => file, + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + eprintln!("Error: The file '{}' was not found.", path); + panic!("quit"); + } else { + // Handle other kinds of I/O errors + eprintln!("Error: Unable to open file '{}': {}", path, e); + } + return Err(Box::new(e)); + } + }; + + let mut hasher = Sha256::new(); + + let mut buffer = [0u8; 1024]; + while let Ok(bytes_read) = file.read(&mut buffer) { + // Run the loop as long as file.read returns Ok(bytes_read) + if bytes_read == 0 { + break; + } + hasher.update(&buffer[..bytes_read]); // Slice of buffer that starts at 0 and ends at bytes_read + } + + let result = hasher.finalize(); + let hash_string = hex::encode(result); + + // println!("Hash is {:x}", result); + + Ok(hash_string) +} + +pub fn create_diffs_multithread( + patch_ids: &Arc>>, + ref_patch_ids: &Arc>>, + target_paths: &Arc>>, + modified: &Arc>>, + folder_path: &String, + changed_files_vec: Vec, // We need it to be a vec since hashset doesn't support slices + changed_count: u32, + thread_count: u32, + compression_level: u32, + patch_store: &Arc>>, // This will be populated if first run, otherwise it must be pre populated + mut create_reverse: bool, + inital_run: bool, + snapshot_mode: &String, +) { + /* + Get the amount that we should give to each thread via split_into. Then calculate slice begin and end + and pass a cloned slice, the thread can own this. The thread will need to lock and unlock patch_ids and target_paths + however. + */ + debug!("create_diffs_multithread called"); + let mut children = Vec::new(); + let split_into = changed_count / thread_count; + let split_into_rem = changed_count % thread_count; + + let mut path_temp_hold_ref = HashSet::new(); + { + let patch_store = patch_store.lock().unwrap(); + for path in patch_store.iter() { + path_temp_hold_ref.insert(ModifiedList { + path: path.target_path.clone().to_string(), + exists: true, + modified: true, // Not needed. This is not really proper usage of ModifiedList. + }); + } + } + + let m = MultiProgress::new(); + + for i in 0..thread_count { + // Spawn our childrenfolder_path + let folder_path_new = folder_path.clone(); // To prevent moving ownership, we need to clone this value. + let slice_begin: usize = (i * split_into).try_into().unwrap(); + let mut slice_end: usize = ((i * split_into) + split_into).try_into().unwrap(); + // println!("slice_begin: {}", slice_begin); + // println!("slice_end: {}", slice_end); + if i == thread_count-1 { + slice_end += split_into_rem as usize; + } + let patch_ids = Arc::clone(patch_ids); + let target_paths = Arc::clone(target_paths); + let ref_patch_ids = Arc::clone(ref_patch_ids); + let patch_store = Arc::clone(patch_store); + let modified = Arc::clone(modified); + + + + let slice = changed_files_vec[slice_begin..slice_end].to_vec(); // Create new vector since our reference will die + // println!("{:?}", slice); + if inital_run { + children.push(thread::spawn(move || { + for path in slice.iter() { + if path.modified { + if Path::new(&path.path.clone()).is_file() { + let patch_id = create_diff( + "".to_string(), // This will never exist, so we can always create a temp file instead. + path.path.clone(), + path.path.clone(), + folder_path_new.clone() + "/.time", + "First patch".to_string(), + Vec::new(), + compression_level, + &patch_store, + create_reverse, + ) + .unwrap_or_else(|_| panic!("Was unable to create a diff between a new empty file and {}", + path.path)); + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push(patch_id); // Deref is automatic when using a `.` + target_paths.push(path.path.clone()); + ref_patch_ids.push("First patch".to_string()); + modified.push(true); // We want to push true since technically going from no file to a file is "modified". + } // Go out of scope to release our lock + } else { + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("DIR".to_string()); + target_paths.push(path.path.clone()); + ref_patch_ids.push("DIR".to_string()); + modified.push(true); // We want to push true since technically going from no file to a file is "modified". + } + } + } else { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + if Path::new(&path.path).is_file() { + let file_contents = std::fs::read(&path.path).unwrap_or_else(|_| panic!("Could not open {} to check if it has been modified! Do I have read permission?", + path.path)); + let hash = xxh3_64(&file_contents); + patch_ids.push(hash.to_string()); + } else { + patch_ids.push("UNMODIFIED_DIRECTORY".to_string()); + } + target_paths.push(path.path.clone()); + ref_patch_ids.push("UNMODIFIED".to_string()); + modified.push(false); + } + } + })); + } else { + create_reverse = true; + debug!("create_reverse is true"); + let path_temp_hold = path_temp_hold_ref.clone(); + let folder_path_clone = folder_path.clone(); + let m = m.clone(); + let snapshot_mode = snapshot_mode.clone(); // Is this creating correct snapshots? + children.push(thread::spawn(move || { + let total: u64 = slice.len() as u64; + let pb = m.add(ProgressBar::new(total)); + pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos:>3}/{len:3} ({eta})") + .unwrap() + .with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()) + .progress_chars("#>-")); + for path in slice.iter() { + if path.modified { + pb.inc(1); + // println!("{}", path.path.clone()); + + if path_temp_hold.contains(&ModifiedList { + path: path.path.clone().to_string(), + exists: path.exists, + modified: true, + }) { + debug!("Snapshot that can be used for reference exists!"); + // Snapshot exists that we can restore for reference + let search_path = path.path.clone().to_string(); // File that we want to snapshot + // let mut matching_items: Vec<&DiffEntry>; + let patch_unguard; + let patch_store = Arc::clone(&patch_store); + { + let patch_store = Arc::clone(&patch_store); + patch_unguard = patch_store.lock().unwrap().clone(); + + } + let matching_items: Vec<&DiffEntry> = patch_unguard + .iter() + .filter(|item| item.target_path == search_path) + .collect(); // Collect all items inside patch_store that have target_path equal to search_path + // Print all matching items + if !matching_items.is_empty() { + if matching_items.len() > 1 { + // println!("Found matching items:"); + // println!("{:?}", matching_items); + let mut date_check; + let mut target_path: String; + if let Some(first_item) = matching_items.first() { + let first_date_string = first_item.date_created.clone(); + // println!("{first_date_string}"); + date_check = DateTime::parse_from_str( + &first_date_string, + "%Y-%m-%d %H:%M:%S%.9f %z", + ) + .unwrap(); + target_path = first_item.target_path.clone(); + } else { + panic!("There was an issue parsing the patch store! Is this a valid date: {:?}", matching_items); + } + // Find correct patch to restore + debug!("{:?}", matching_items); + for item in matching_items { + // Files with snapshots + let date_check_string = item.date_created.clone(); + let new_date_check = DateTime::parse_from_str( + &date_check_string, + "%Y-%m-%d %H:%M:%S%.9f %z", + ) + .unwrap(); + // println!("{}", new_date_check); + // println!("{}", date_check); + if new_date_check > date_check { + // println!("Setting!"); + date_check = new_date_check; + target_path = item.target_path.clone(); + } + } + if Path::new(&target_path).is_file() { + let patch_id = restore::restore_and_diff( + &date_check.to_string(), + &target_path, + &folder_path_clone.clone(), + compression_level, + &patch_store, + create_reverse, + &snapshot_mode + ).expect("There was an issue restoring a reference patch and creating a new patch, did the .time folder go corrupt?"); + + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push(patch_id); + target_paths.push(target_path.clone()); + let mut sha256 = Sha256::new(); + sha256.update(date_check.to_string() + &target_path); + ref_patch_ids.push(format!("{:X}", sha256.finalize())); + modified.push(path.modified); + } + } else { + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("DIR".to_string()); + target_paths.push(target_path.clone()); + ref_patch_ids.push("DIR".to_string()); + modified.push(path.modified); + } + } + } else { + // Restore only existing patch + { + // let mut patch_store = patch_store.lock().unwrap(); + if let Some(first_item) = matching_items.first() { + if Path::new(&first_item.target_path).is_file() { + let patch_id = restore::restore_and_diff( + &first_item.date_created, + &first_item.target_path, + &folder_path_clone.clone(), + compression_level, + &patch_store, + create_reverse, + &snapshot_mode + + ).expect("There was an issue restoring a reference patch and creating a new patch, did the .time folder go corrupt?"); + + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push(patch_id); + target_paths.push(first_item.target_path.clone()); + let mut sha256 = Sha256::new(); + sha256.update(first_item.date_created.clone() + &first_item.target_path); + ref_patch_ids.push(format!("{:X}", sha256.finalize())); + modified.push(path.modified); + } + } else { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("DIR".to_string()); + target_paths.push(first_item.target_path.clone()); + ref_patch_ids.push("DIR".to_string()); + modified.push(path.modified); + } + } + } + } + } else { + panic!("Did not find a valid patch in the patch store, even though there should be one!"); + } + + } else if path.exists { + debug!("No existing patch! I will create a compressed copy of the original file. "); + if Path::new(&path.path).is_file() { + let patch_id = create_diff( + "".to_string(), + path.path.clone(), + path.path.clone(), + folder_path_clone.clone() + "/.time", + "First patch".to_string(), + Vec::new(), + compression_level, + &patch_store, + create_reverse, + ) + .unwrap_or_else(|_| panic!("Was unable to create a diff from a new empty file and {}", + path.path)); + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push(patch_id); + target_paths.push(path.path.clone()); + ref_patch_ids.push("First patch".to_string()); + modified.push(true); + } + } else { + + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("DIR".to_string()); + target_paths.push(path.path.clone()); + ref_patch_ids.push("DIR".to_string()); + modified.push(true); + + } + } else { + /* + When we detect a removed file, mark it as such without creating a patch. We will create a special case to + detect the removed file and thus remove it when restoring and moving forward/create it when restoring and + moving backwards. + */ + debug!("Detected removed file!"); + + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("REMOVED".to_string()); + target_paths.push(path.path.clone()); + ref_patch_ids.push("NONE".to_string()); + modified.push(true); + } + } + } else { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + // We will take a hash of the date modified and size of the file to use as an way to identify when the file has been changed. + + if Path::new(&path.path).is_file() { + let file_contents = std::fs::read(&path.path).unwrap_or_else(|_| panic!("Could not open {} to check if it has been modified! Do I have read permission?", + path.path)); + let hash = xxh3_64(&file_contents); + patch_ids.push(hash.to_string()); + } else { + patch_ids.push("UNMODIFIED_DIRECTORY".to_string()); + } + target_paths.push(path.path.clone()); + ref_patch_ids.push("UNMODIFIED".to_string()); + modified.push(false); + // debug!("Skipping {} because it is not modified!", path.path); + } + } // Code for checking existing snapshot goes here + pb.finish(); + })) + } + } + for handle in children { + // Wait for our children to die + handle.join().expect("There was an issue joining all the threads, did a child die?"); + } +} + diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..a5cd890 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,755 @@ +// Made with pain by someone who desperately needed a distraction from the 2024 election. +// Trans rights are human rights. +// TODO: Optional exclude directories +// TODO: Restore directly, don't restore intermediates. +#![windows_subsystem = "windows"] // Prevents console from opening when on Windows. +use chrono::DateTime; +use directories::BaseDirs; +use gumdrop::Options; +use inquire::Select; +use log::{debug, warn}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashSet, + env, + fs::{self, File}, + hash::Hash, + io::{Read, Write}, + path::Path, + process, + sync::{Arc, Mutex}, + thread, + time::Duration, +}; +use xxhash_rust::xxh3::xxh3_64; +// use std::time::Instant; // For debugging + +pub mod compression; +pub mod diffs; +pub mod metadata_manager; +pub mod restore; + +#[derive(Deserialize, Serialize, Hash, PartialEq, Eq, Debug, Clone)] + +pub struct DiffEntry { + // TODO: Depreceate in favor of SnapshotEntries? + date_created: String, + target_path: String, + ref_patch: String, +} + +#[derive(PartialEq, Hash, Eq, Debug, Clone)] +pub struct ModifiedList { + path: String, + exists: bool, + modified: bool, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Hash, Eq)] // Derive Serialize for JSON serialization +pub struct MetaFile { + date_modified: u64, + hash: String, + size: u64, + path: String, +} +#[derive(Debug, Deserialize, Serialize)] +pub struct SnapshotEntries { + date_created: String, + patch_ids: Vec, + target_path: Vec, + ref_patch_ids: Vec, + modified: Vec, +} +#[derive(Deserialize, Serialize, Debug, Clone)] +struct Config { + // This will grow with time + folder_path: String, + get_hashes: bool, + thread_count: u32, + brotli_compression_level: u32, + snapshot_mode: String, + its_my_fault_if_i_lose_data: bool, +} + +#[derive(Debug, Options)] +struct MyOptions { + #[options(help = "print help message")] + help: bool, + #[options(help = "be verbose")] + verbose: bool, + #[options(help = "specify a specific config file")] + config: String, + + // The `command` option will delegate option parsing to the command type, + // starting at the first free argument. + #[options(command)] + command: Option, +} + +#[derive(Debug, Options)] +enum Command { + // Command names are generated from variant names. + // By default, a CamelCase name will be converted into a lowercase, + // hyphen-separated name; e.g. `FooBar` becomes `foo-bar`. + // + // Names can be explicitly specified using `#[options(name = "...")]` + #[options(help = "take a snapshot")] + Snapshot(SnapshotOptions), + #[options(help = "restore a snapshot")] + Restore(RestoreOptions), +} + +// Options accepted for the `snapshot` command +#[derive(Debug, Options)] // TODO: Add options +struct SnapshotOptions {} + +// Options accepted for the `restore` command +#[derive(Debug, Options)] // TODO: Add options (list snapshots, restore specific one) +struct RestoreOptions { + #[options(help = "restore nth snapshot starting from most recent")] + restore_index: u32, +} +fn main() { + let mut want_restore = false; + let mut skip_snap = false; + let mut man_conf = false; + let opts = MyOptions::parse_args_default_or_exit(); + + let conf_dir; + + if opts.verbose { + println!("Enabling verbosity by setting env var RUST_LOG to debug"); + + env::set_var("RUST_LOG", "debug"); + } + + env_logger::init(); + + if !opts.config.is_empty() { + println!("Using specific config file {}!", opts.config); + conf_dir = opts.config; + man_conf = true; + } else { + let home_dir = if let Some(user_dirs) = BaseDirs::new() { + if let Some(path_str) = user_dirs.home_dir().to_str() { + path_str.to_string() + } else { + panic!("Home directory is not valid UTF-8! What is wrong with your system??"); + } + } else { + panic!("Unable to retrieve user directories."); + }; + // println!("{home_dir}"); + conf_dir = home_dir + "/.file-time-machine"; + } + + if let Some(Command::Snapshot(ref _snapshot_options)) = opts.command { + println!("Taking snapshot!"); + } else if let Some(Command::Restore(ref _restore_options)) = opts.command { + println!("Restoring!"); + want_restore = true; + } else { + println!("No valid option was provided, taking a snapshot!"); + } + // if args.len() < 2 { + // println!("No arguments provided, attempting to snapshot if config is valid."); + // } else if args[1] == "snapshot" { + // println!("Attempting to snapshot if config is valid."); + // } else if args[1] == "restore" { + // println!("Attempting to restore a snapshot. Fixme!"); + // want_restore = true; + // } else { + // panic!( + // "Invalid command {}\nValid commands are: snapshot, restore.", + // args[1] + // ); + // } + // println!("{conf_dir}"); + if !Path::new(&conf_dir).exists() { + if man_conf { + panic!("Could not locate config file {}!", conf_dir); + } + fs::create_dir(Path::new(&conf_dir.clone())).expect( + "Could not create .file-time-machine in home directory! I should not be run as root.", + ); + println!("Creating .file-time-machine"); + } + let conf_path; + if man_conf { + conf_path = conf_dir; + } else { + conf_path = conf_dir + "/config.json"; + } + let mut config_file = File::open(Path::new(&conf_path)).expect("Could not open config file! Create one at $HOME/.file-time-machine/config.json as specified in documentation."); + + let mut config_file_contents = String::new(); + config_file + .read_to_string(&mut config_file_contents) + .expect( + "The config file contains non UTF-8 characters, what in the world did you put in it??", + ); + let config_holder: Vec = serde_json::from_str(&config_file_contents) + .expect("The config file was not formatted properly and could not be read."); + + let mut folder_path = config_holder[0].folder_path.clone(); // Shut up, I am tired + let hash_enabled = config_holder[0].get_hashes; + let mut thread_count = config_holder[0].thread_count; + let compression_level = config_holder[0].brotli_compression_level; + let snapshot_mode = config_holder[0].snapshot_mode.clone(); + let supress_warn = config_holder[0].its_my_fault_if_i_lose_data; + + if snapshot_mode != "fastest" { + println!("Only fastest snapshot mode is currently implemented!"); + process::exit(1); + } + debug!("Snapshot mode is {}", snapshot_mode); + + if !supress_warn { + warn!("\nWARNING WARNING WARNING\nThis program is NOT production ready! You probably WILL lose data using it!\nSet its_my_fault_if_i_lose_data to true to suppress this warning.\n"); + thread::sleep(Duration::from_secs(3)); + } + + folder_path = folder_path.trim_end_matches('/').to_string(); + let create_reverse; // Disabled only on first run to reduce disk usage + + if thread_count == 0 { + thread_count = num_cpus::get() as u32; + debug!("thread_count automatically set to {}", thread_count); + } + if want_restore { + skip_snap = true; + let snapshot_store_file = folder_path.clone() + "/.time/snapshots.json"; + let snapshot_store: Vec; + let mut change_count = 0; + let mut options = Vec::new(); + + // println!("{}", snapshot_store_file); + if !Path::new(&snapshot_store_file).exists() { + panic!("Did not find a valid snapshot store, have you created any snapshots yet?"); + } + + let mut file = File::open(Path::new(&snapshot_store_file)) + .unwrap_or_else(|_| panic!("Could not open {}!", snapshot_store_file)); + + let mut file_contents = String::new(); + file.read_to_string(&mut file_contents) + .unwrap_or_else(|_| panic!("Unable to read file {}!", snapshot_store_file)); + if !file_contents.is_empty() { + snapshot_store = + serde_json::from_str(&file_contents).expect("Snapshot store is corrupt!"); + } else { + panic!("Snapshot store exists, but is empty! No snapshots available."); + } + /*struct Point { + x: f64, + y: f64, + } + + enum Shape { + Circle(Point, f64), + Rectangle(Point, Point), + } + + fn main() { + let my_shape = Shape::Circle(Point { x: 0.0, y: 0.0 }, 10.0); + + match my_shape { + Shape::Circle(_, value) => println!("value: {}", value), + _ => println!("Something else"), + } + } */ + let selected_item; + if let Some(Command::Restore(ref restore_options)) = opts.command { + // println!("{}", snapshot_store.len()); + if snapshot_store.len() >= restore_options.restore_index.try_into().unwrap() + && 0 < restore_options.restore_index.try_into().unwrap() + { + selected_item = DateTime::parse_from_str( + &snapshot_store[restore_options.restore_index as usize - 1].date_created, + "%Y-%m-%d %H:%M:%S%.9f %z", + ) + .unwrap(); + } else { + if restore_options.restore_index != 0 { + // Needed because afaik Gumdrop sets it to 0 if it wasn't passed. This is not desired behaviour. + println!( + "{} is an invalid snapshot. Entering interactive.", + restore_options.restore_index + ); + } + for snapshot in &snapshot_store { + for _change in snapshot.patch_ids.clone() { + change_count += 1; + } + let date_entry = DateTime::parse_from_str( + &snapshot.date_created, + "%Y-%m-%d %H:%M:%S%.9f %z", + ) + .unwrap(); + let formatted_date = date_entry.format("%Y-%m-%d %H:%M:%S %z").to_string(); + // debug!("formatted_date is {}", formatted_date); + options.push(formatted_date + " files changed: " + &change_count.to_string()); + change_count = 0; + } + let selection = Select::new("Select a snapshot to restore:", options).prompt(); + + let selected_item_pretty: String = match selection { + Ok(choice) => choice, + Err(_) => panic!("There was an issue, please try again."), + }; + // Extract true option from human readable format + let selected_item_str = selected_item_pretty[0..25].to_string(); + debug!("{selected_item_str}"); + selected_item = + DateTime::parse_from_str(&selected_item_str, "%Y-%m-%d %H:%M:%S %z") + .expect("Could not correctly parse date in activeSnapshot, is it corrupt?"); + } + } else { + panic!("Could not parse a valid command."); + } + + /* + We have a entry that we want to restore, if it is in the past: + In fastest mode, restore directly, don't restore intermediates + + if it is in the future: + Restore up until we restore the proper patch. + */ + + let active_snapshot_path = folder_path.clone() + "/.time/activeSnapshot"; + + if !Path::new(&active_snapshot_path).exists() { + debug!("No activeSnapshot found, assuming target has to be in past."); + + // In fastest, restore_snapshot_until will NOT iterate. In this case, the name is misleading. + + restore::restore_snapshot_until( + snapshot_store, + &folder_path, + &selected_item, + true, + &snapshot_mode, + ); + + let mut active_snapshot = File::create(Path::new(&active_snapshot_path)) + .unwrap_or_else(|_| { + panic!("Could not create {active_snapshot_path}, do I have write permission?") + }); + active_snapshot + .write_all(selected_item.to_string().as_bytes()) + .unwrap_or_else(|_| { + panic!("Unable to write to active_snapshot file at {active_snapshot_path}") + }); + } else { + let mut file = File::open(Path::new(&(folder_path.clone() + "/.time/activeSnapshot"))) + .unwrap_or_else(|_| { + panic!( + "Could not read {}!", + folder_path.clone() + "/.time/activeSnapshot" + ) + }); + + let mut file_contents = String::new(); + file.read_to_string(&mut file_contents).unwrap_or_else(|_| { + panic!( + "Could not read from {}! Do I have correct permissions?", + folder_path.clone() + "/.time/activeSnapshot" + ) + }); + + let active_snapshot_date_stupid = // Please fix me this is stupid + DateTime::parse_from_str(&file_contents, "%Y-%m-%d %H:%M:%S%.9f %z") + .unwrap() + .format("%Y-%m-%d %H:%M:%S %z") + .to_string(); + let active_snapshot_date = + DateTime::parse_from_str(&active_snapshot_date_stupid, "%Y-%m-%d %H:%M:%S %z") + .unwrap(); + + if selected_item > active_snapshot_date { + debug!("Snapshot is in future!"); + restore::restore_snapshot_until( + snapshot_store, + &folder_path, + &selected_item, + false, + &snapshot_mode, + ); + fs::remove_file(&active_snapshot_path).unwrap_or_else(|_| { + panic!( + "Could not remove {}, it needs to be writeable!", + active_snapshot_path + ) + }); + let mut active_snapshot = File::create(Path::new(&active_snapshot_path)) + .unwrap_or_else(|_| { + panic!( + "Could not create {active_snapshot_path}, do I have write permission?" + ) + }); + active_snapshot + .write_all(selected_item.to_string().as_bytes()) + .unwrap_or_else(|_| { + panic!("Unable to write to activeSnapshot file at {active_snapshot_path}") + }); + } else if selected_item < active_snapshot_date { + debug!("Snapshot is in past!"); + restore::restore_snapshot_until( + snapshot_store, + &folder_path, + &selected_item, + true, + &snapshot_mode, + ); + fs::remove_file(&active_snapshot_path).unwrap_or_else(|_| { + panic!( + "Could not remove {}, it needs to be writeable!", + active_snapshot_path + ) + }); + let mut active_snapshot = File::create(Path::new(&active_snapshot_path)) + .unwrap_or_else(|_| { + panic!( + "Could not create {active_snapshot_path}, do I have write permission?" + ) + }); + active_snapshot + .write_all(selected_item.to_string().as_bytes()) + .unwrap_or_else(|_| { + panic!("Unable to write to activeSnapshot file at {active_snapshot_path}") + }); + } else { + println!( + "The snapshot you selected is already the active snapshot! Nothing to do." + ); + process::exit(1); + } + } + println!("Finished restoring. You can safely make changes, but they will not be saved unless a new snapshot is created."); + } + + if !skip_snap { + let mut initial_run = false; + debug!("take snapshot"); + if !Path::new(&(folder_path.clone() + "/.time/metadata.json")).exists() { + debug!("{folder_path}/.time/metadata.json"); + if !Path::new(&(folder_path.clone() + "/.time")).exists() { + fs::create_dir(folder_path.clone() + "/.time").unwrap_or_else(|_| { + panic!( + "Unable to create a .time folder at {}!", + folder_path.clone() + "/.time" + ) + }); + } + File::create(Path::new(&(folder_path.clone() + "/.time/tmp_empty"))).unwrap_or_else( + |_| { + panic!( + "Unable to create a temporary empty file at {}!", + folder_path.clone() + "/.time/tmp_empty" + ) + }, + ); + println!("No .time or metadata found, creating."); + + println!("Collecting metadata of: {folder_path}"); + if hash_enabled { + warn!("Hashes are enabled. Collecting metadata may take a while."); + } + + // hash(folder_path).expect("msg"); + let metadata_holder: HashSet = HashSet::new(); + let metadata_holder = + diffs::get_properties(&folder_path, metadata_holder, hash_enabled) + .expect("Issue getting hashes of files in folder {folder_path}"); + metadata_manager::write_metadata_to_file( + &metadata_holder, + &(folder_path.clone() + "/.time/metadata.json"), + ); + + debug!("Running a initial snapshot..."); + initial_run = true; // Use to indicate that despite there being zero changes, we still want to run on all the files + } + println!("Existing .time folder found, looking for changes..."); + debug!("Looking for changes in directory {}", folder_path); + let metafile = folder_path.clone() + "/.time/metadata.json"; + let mut metadata_holder: HashSet = HashSet::new(); + + if !initial_run { + debug!("initial_run is false, reading metadata!"); + metadata_holder = metadata_manager::read_metadata_from_file(&metafile) + .unwrap_or_else(|_| panic!("Couldn't read the metadata file at {metafile}")); + } + let changed_files = diffs::get_diffs(false, &metadata_holder, &folder_path) + .expect("Couldn't check for diffs! No files have been written."); + // for meta in changed_files { + // println!("File Path: {}", meta.path); + // } + File::create(Path::new(&(folder_path.clone() + "/.time/tmp_empty"))).unwrap_or_else(|_| { + panic!( + "Unable to create a temporary empty file at {}!", + folder_path.clone() + "/.time/tmp_empty" + ) + }); + diffs::update_metadata(&mut metadata_holder, &changed_files, hash_enabled) + .expect("Something went wrong when collecting metadata. Do you have read permission?"); + if !initial_run { + debug!("initial_run is false, writing metadata!"); + metadata_manager::write_metadata_to_file(&metadata_holder, &metafile); + } + println!("Finished updating metadata."); + + println!("Creating snapshot with {} threads...", thread_count); + let mut patch_store: Arc>> = Arc::new(Mutex::new(Vec::new())); + let patch_store_file = folder_path.clone() + "/.time/patches.json"; + let snapshot_store_file = folder_path.clone() + "/.time/snapshots.json"; + let patch_ids = Arc::new(Mutex::new(Vec::new())); // These need to be communicated through threads, thus Arc and Mutex. + let target_paths = Arc::new(Mutex::new(Vec::new())); + let ref_patch_ids = Arc::new(Mutex::new(Vec::new())); + let modified = Arc::new(Mutex::new(Vec::new())); + + let mut snapshot_store: Vec = Vec::new(); + + if !Path::new(&snapshot_store_file).exists() { + File::create(Path::new(&snapshot_store_file)).unwrap_or_else(|_| { + panic!( + "Could not create snapshot store at {}!", + snapshot_store_file + ) + }); + } else { + let mut file = File::open(Path::new(&snapshot_store_file)) + .unwrap_or_else(|_| panic!("Could not open {}!", snapshot_store_file)); + + let mut file_contents = String::new(); + file.read_to_string(&mut file_contents) + .unwrap_or_else(|_| panic!("Unable to read file {}!", snapshot_store_file)); + if !file_contents.is_empty() { + snapshot_store = + serde_json::from_str(&file_contents).expect("Snapshot store is corrupt!"); + } + } + + if !Path::new(&patch_store_file).exists() { + println!("Did not find patch store! An original compressed copy of every file will be made to use as reference."); + create_reverse = false; // Since this is the first snapshot, there is no need to create a reverse snapshot and use 2*n storage + // Split here if changed_files is greater than thread count! + let mut changed_files_vec: Vec = Vec::new(); + let mut changed_count: u32 = 0; + + File::create(Path::new(&patch_store_file)).unwrap_or_else(|_| { + panic!( + "Unable to create patch store at {}", + patch_store_file.clone() + "/patches.json" + ) + }); + let mut patch_store_path = + File::open(Path::new(&patch_store_file)).expect("Unable to open patch store file!"); + + let mut patch_store_contents = String::new(); + patch_store_path + .read_to_string(&mut patch_store_contents) + .expect("Unable to open patch store file!"); + patch_store = Arc::new(Mutex::new(Vec::new())); + + for item in &changed_files { + // Allows us to split the Vec to give to threads + changed_count += 1; + changed_files_vec.push(ModifiedList { + path: item.path.clone(), + exists: item.exists, + modified: item.modified, + }); + } + if changed_files_vec.len() > thread_count.try_into().unwrap() { + debug!("Running as initial run!"); + diffs::create_diffs_multithread( + &patch_ids, + &ref_patch_ids, + &target_paths, + &modified, + &folder_path, + changed_files_vec, + changed_count, + thread_count, + compression_level, + &patch_store, + create_reverse, + true, // Inital run + &snapshot_mode, + ); + } else { + // Run regularily here! + debug!("Run regularily"); + for path in changed_files.iter() { + /* + Get relative path of backup directory, go through changed_files, and reference relative path of backup directory. ModifiedList will handle removed files. + A non-existing file can be passed, and it will be handled within get_diffs. + */ + if path.modified { + if Path::new(&path.path).is_file() { + let patch_id = diffs::create_diff( + "".to_string(), // This will never exist, so we can always create a temp file instead. + path.path.clone(), + path.path.clone(), + folder_path.clone() + "/.time", + "First patch".to_string(), + Vec::new(), + compression_level, + &patch_store, + create_reverse, + ) + .unwrap_or_else(|_| { + panic!( + "Was unable to create a diff between a new empty file and {}", + path.path + ) + }); + { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push(patch_id); + target_paths.push(path.path.clone()); + ref_patch_ids.push("First patch".to_string()); + modified.push(path.modified); + } + } else { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + patch_ids.push("DIR".to_string()); + target_paths.push(path.path.clone()); + ref_patch_ids.push("DIR".to_string()); + modified.push(true); + } + } else { + let mut patch_ids = patch_ids.lock().unwrap(); + let mut target_paths = target_paths.lock().unwrap(); + let mut ref_patch_ids = ref_patch_ids.lock().unwrap(); + let mut modified = modified.lock().unwrap(); + + if Path::new(&path.path).is_file() { + let file_contents = std::fs::read(&path.path).unwrap_or_else(|_| panic!("Could not open {} to check if it has been modified! Do I have read permission?", + path.path)); + let hash = xxh3_64(&file_contents); + patch_ids.push(hash.to_string()); + } else { + patch_ids.push("UNMODIFIED_DIRECTORY".to_string()); + } + target_paths.push(path.path.clone()); + ref_patch_ids.push("UNMODIFIED".to_string()); + modified.push(false); + // debug!("Skipping {} because it is not modified!", path.path); + } + } + } + } else { + debug!("Found patch store!"); + // let path_temp_hold: HashSet = HashSet::new(); + let mut patch_store_path = File::open(Path::new(&patch_store_file)) + .unwrap_or_else(|_| panic!("Could not open {patch_store_file}!")); + + let mut patch_store_contents = String::new(); + patch_store_path + .read_to_string(&mut patch_store_contents) + .expect("Patch store contains non UTF-8 characters which are unsupported!"); + { + let mut patch_store = patch_store.lock().unwrap(); + + *patch_store = serde_json::from_str(&patch_store_contents) + .expect("Patch store is corrupt. Sorgy :("); + } + /* + Cycle through changed files, and check if a snapshot exists. If it does, restore snapshot to memory, to use as reference file. + Then we create a new patch from the two. + + If no snapshot exists yet, use backup directory as reference file to create snapshot. + */ + // REMEMBER TO PASS patch_store + // println!("{:?}", path_temp_hold); + // println!("fdsfsd"); + // println!("{:?}", changed_files); // populate patch_store and pass it + let mut changed_files_vec: Vec = Vec::new(); + let mut changed_count: u32 = 0; + for item in &changed_files { + // Allows us to split the Vec to give to threads + changed_files_vec.push(ModifiedList { + path: item.path.clone(), + exists: item.exists, + modified: item.modified, + }); + changed_count += 1; + } + debug!("Inital run is false!"); + let real_thread_count = if changed_count >= thread_count { + thread_count + } else { + 1 + }; // Only do true multithreading if necessary + debug!("real_thread_count is {real_thread_count}"); + diffs::create_diffs_multithread( + &patch_ids, + &ref_patch_ids, + &target_paths, + &modified, + &folder_path, + changed_files_vec, + changed_count, + real_thread_count, + compression_level, + &patch_store, + false, + false, + &snapshot_mode, + ); + } + + { + // Create a new scope to unlock mutex + debug!("Writing snapshot to store!"); + let patch_ids = patch_ids.lock().unwrap(); + let target_paths = target_paths.lock().unwrap(); + let ref_patch_ids = ref_patch_ids.lock().unwrap(); + let modified = modified.lock().unwrap(); + if patch_ids.len() > 0 { + // println!("Writing snapshot to store!"); + let current_time: String = chrono::offset::Local::now().to_string(); + snapshot_store.push(SnapshotEntries { + date_created: current_time, + patch_ids: patch_ids.to_vec(), + target_path: target_paths.to_vec(), + ref_patch_ids: ref_patch_ids.to_vec(), + modified: modified.to_vec(), + }); + + let json = serde_json::to_string_pretty(&snapshot_store) + .expect("Unable to serialize metadata!"); + + // Write the JSON string to a file + let mut file = File::create(Path::new(&snapshot_store_file)).unwrap_or_else(|_| { + panic!("Unable to open snapshot file at {}", snapshot_store_file) + }); + file.write_all(json.as_bytes()).unwrap_or_else(|_| { + panic!( + "Unable to write to metadata file at {}", + snapshot_store_file + ) + }); + } + } + + // for meta in metadata_holder { + // println!("File Path: {}", meta.path); + // println!("File Hash: {}", meta.hash); + // println!("File Size: {} bytes", meta.size); + // println!("Last Modified Time: {} seconds since UNIX epoch", meta.date_modified); + // } + // Remove our tmp file we used + fs::remove_file(folder_path.clone() + "/.time/tmp_empty") + .expect("Unable to remove old tmp file"); + } +} diff --git a/src/metadata_manager.rs b/src/metadata_manager.rs new file mode 100644 index 0000000..a844dfa --- /dev/null +++ b/src/metadata_manager.rs @@ -0,0 +1,32 @@ +use std::collections::HashSet; +use std::error::Error; +use std::fs::File; +use std::io::Read; +use std::io::Write; +use std::path::Path; + +use crate::MetaFile; + +pub fn write_metadata_to_file(metadata_holder: &HashSet, filename: &str) { + // Serialize the vector to a JSON string + let json = + serde_json::to_string_pretty(metadata_holder).expect("Unable to serialize metadata!"); + + // Write the JSON string to a file + let mut file = File::create(Path::new(filename)) + .unwrap_or_else(|_| panic!("Unable to create metadata file at {filename}")); + file.write_all(json.as_bytes()) + .unwrap_or_else(|_| panic!("Unable to write to metadata file at {filename}")); +} + +pub fn read_metadata_from_file(filename: &str) -> Result, Box> { + // Load file to string, and use serde to turn it into Vec + let mut file = File::open(Path::new(filename))?; + + let mut file_contents = String::new(); + file.read_to_string(&mut file_contents)?; + + let metadata_holder: HashSet = serde_json::from_str(&file_contents)?; + + Ok(metadata_holder) +} diff --git a/src/restore.rs b/src/restore.rs new file mode 100644 index 0000000..7493eae --- /dev/null +++ b/src/restore.rs @@ -0,0 +1,737 @@ +use bsdiff::patch; // TODO: In fastest mode, we can restore directly the target since the reference is always just the original file. So restore_until needs to implement this. +use chrono::DateTime; // TODO: Snapshots should include a list of every single file at it's current state. This way we can actually ensure we get to the correct state. +use chrono::FixedOffset; +use log::debug; +use sha2::{Digest, Sha256}; +use std::error::Error; +use std::fs::{create_dir_all, exists, remove_dir_all, remove_file, File}; +use std::io::Read; +use std::path::Path; +use std::sync::{Arc, Mutex}; +use walkdir::WalkDir; +use xxhash_rust::xxh3::xxh3_64; + +use crate::compression; +use crate::diffs; +use crate::DiffEntry; +use crate::SnapshotEntries; + +pub fn restore_and_diff( + _date_created: &String, + target_path: &String, + folder_path: &String, + compression_level: u32, + patch_store: &Arc>>, + create_reverse: bool, + snapshot_mode: &String, +) -> Result> { + debug!("Creating a patch using reference patch!"); + let mut target_date = "".to_string(); + let mut valid_target_path = "".to_string(); + if snapshot_mode == "fastest" { + debug!("Trying to find initial patch to use as for fastest mode"); + { + let patch_store = patch_store.lock().unwrap(); + + for patch in patch_store.iter() { + if patch.ref_patch == "First patch" && patch.target_path == *target_path { + debug!("Found good patch"); + target_date = patch.date_created.clone(); + valid_target_path = patch.target_path.clone(); + } + } + if target_date.is_empty() || valid_target_path.is_empty() { + panic!("Could not find a valid initial patch for {}!", target_path); + } + } + } else { + panic!("Invalid snapshot mode {}!", snapshot_mode); + } + let mut sha256 = Sha256::new(); + sha256.update(target_date + &valid_target_path); // Generate an ID to identify the patch. This can be derived from the data stored in DiffEntry, which can then be used to identify where the patch file is. + let patch_id: String = format!("{:X}", sha256.finalize()); // We now have the ID of the patch, so we can restore it. + let patch_file; + let target_file; + + let mut patch_file_compressed = std::fs::read( + folder_path.clone() + "/.time/" + &patch_id + "-reverse", + ) + .unwrap_or_else(|_| { + panic!( + "Could not open patch file! Try removing {} from the patch store.", + &target_path + ) + }); + if patch_file_compressed == [58, 51] { + debug!("Detected fake patch!"); + // Not a valid patch, so we need to recover original file to use as reference. + patch_file_compressed = std::fs::read(folder_path.clone() + "/.time/" + &patch_id) + .unwrap_or_else(|_| { + panic!( + "Could not open patch file! Try removing {} from the patch store.", + &target_path + ) + }); + patch_file = compression::decompress_data(patch_file_compressed).unwrap_or_else(|_| { + panic!( + "Could not decompress patch file {}! Is it corrupt?", + target_path + ) + }); + target_file = Vec::new(); + } else { + patch_file = compression::decompress_data(patch_file_compressed).unwrap_or_else(|_| { + panic!( + "Could not decompress patch file {}! Is it corrupt?", + target_path + ) + }); + target_file = std::fs::read(target_path).unwrap_or_else(|_| { + panic!( + "Could not open {} to restore reference patch! Metadata needs updating!", + &target_path + ) + }); + } + let mut ref_file = Vec::new(); + + patch(&target_file, &mut patch_file.as_slice(), &mut ref_file).unwrap_or_else(|_| { + panic!( + "There was an error restoring a reference patch to memory! Target file was {}", + &target_path + ) + }); + + let patch_id = diffs::create_diff( + "".to_string(), + target_path.clone(), + target_path.clone(), + folder_path.clone() + "/.time", + patch_id, + ref_file, + compression_level, + patch_store, + create_reverse, + ) + .expect("There was an issue while creating a diff!"); + Ok(patch_id) +} + +pub fn restore_snapshot( + entry: &SnapshotEntries, + time_dir: String, + past: bool, + snapshot_mode: &String, +) { + let mut patch_path = "".to_string(); + let mut first_cycle = true; + println!("Restoring snapshot {}!", entry.date_created); + let mut dirs_to_remove = Vec::new(); // Remove dirs at the end since we need to cleanup the insides first + // println!("{}", entry.patch_ids.len()); + // println!("{}", entry.ref_patch_ids.len()); + for (index_counter, id) in entry.patch_ids.clone().iter().enumerate() { + // println!("{:?}", &entry.target_path); + // TODO: Remove file if it is supposed to be removed + // TODO: Check if is first patch, if so, don't attempt to restore + debug!("Restoring patch {}", id); + debug!("Restoring past version: {}", past); + let mut skip_file = false; + if id == "REMOVED" { + skip_file = true; + debug!("Detected removed file!"); + if past { + // Going to past where file used to exist, so we need to restore upwards to recreate it. + // Open patch store so we can restore + + let patch_store_file = time_dir.clone() + "/patches.json"; + + // let path_temp_hold: HashSet = HashSet::new(); + let mut patch_store_path = File::open(Path::new(&patch_store_file)) + .unwrap_or_else(|_| panic!("Could not open {patch_store_file}!")); + + let mut patch_store_contents = String::new(); + + patch_store_path + .read_to_string(&mut patch_store_contents) + .expect("Patch store contains non UTF-8 characters which are unsupported!"); + let patch_store: Vec = serde_json::from_str(&patch_store_contents) + .expect("Patch store is corrupt. Sorgy :("); + + for patch_entry in patch_store.iter() { + let mut sha256 = Sha256::new(); + // As long as patch store is properly ordered, we can go through and restore all matching paths. + if patch_entry.target_path == entry.target_path[index_counter] { + if &patch_entry.ref_patch == "First patch" { + let mut new_file: Vec = Vec::new(); + if first_cycle { + check_and_create(&patch_entry.target_path); + first_cycle = false; + } //else { + // panic!("Detected patches.json is out of order! Cannot safely continue."); + // } + check_and_create(&patch_entry.target_path); + let target_file = std::fs::read(&patch_entry.target_path).unwrap(); + sha256.update( + patch_entry.date_created.clone() + &patch_entry.target_path, + ); + let patch_id = format!("{:X}", sha256.finalize()); + let patch_path = time_dir.clone() + "/" + &patch_id; + let patch_file_compressed = + std::fs::read(&patch_path).unwrap_or_else(|_| panic!("Could not open {} to restore snapshot! Do I have read permission?", + patch_path)); + let patch_file = compression::decompress_data(patch_file_compressed) + .unwrap_or_else(|_| { + panic!( + "Could not decompress data in file {}! Is it corrupt?", + patch_path + ) + }); + patch(&target_file, &mut patch_file.as_slice(), &mut new_file) + .unwrap_or_else(|_| { + panic!("Unable to restore patch {}! Is it corrupt?", patch_id) + }); + std::fs::write(&patch_entry.target_path, &new_file).unwrap_or_else( + |_| { + panic!( + "Unable to open file for writing: {}", + &patch_entry.target_path + ) + }, + ); + } else if &patch_entry.ref_patch != "NONE" { + let mut new_file: Vec = Vec::new(); + let target_file = + std::fs::read(&patch_entry.target_path).unwrap_or_else(|_| panic!("Could not open {} to restore snapshot. Metadata needs updating!", + &patch_entry.target_path)); + sha256.update( + patch_entry.date_created.clone() + &patch_entry.target_path, + ); + let patch_id = format!("{:X}", sha256.finalize()); + let patch_path = time_dir.clone() + "/" + &patch_id; + let patch_file_compressed = + std::fs::read(&patch_path).unwrap_or_else(|_| panic!("Could not open {} to restore snapshot! Do I have read permission?", + patch_path)); + let patch_file = compression::decompress_data(patch_file_compressed) + .unwrap_or_else(|_| { + panic!( + "Could not decompress data in file {}! Is it corrupt?", + patch_path + ) + }); + patch(&target_file, &mut patch_file.as_slice(), &mut new_file) + .unwrap_or_else(|_| { + panic!("Unable to restore patch {}! Is it corrupt?", patch_id) + }); + std::fs::write(&patch_entry.target_path, &new_file).unwrap_or_else( + |_| { + panic!( + "Unable to open file for writing: {}", + &patch_entry.target_path + ) + }, + ); + } else { + debug!("Skipping file since ref_id is NONE"); + } + } + } + } else { + // In future, so we simply remove the file. + let target_file = &entry.target_path[index_counter]; + let path = Path::new(target_file); + if path.is_dir() { + debug!("Adding directory to queue to be removed: {}", target_file); + dirs_to_remove.push(target_file); + } else { + let true_path = Path::new(target_file); + if true_path.exists() { + debug!("Removing file {}", target_file); + remove_file(Path::new(&target_file)) + .unwrap_or_else(|_| panic!("Could not remove file {}!", &target_file)); + } + } + } + } else if id == "DIR" || id == "UNMODIFIED_DIRECTORY" { + skip_file = true; + debug!( + "Creating dir if not exists: {}", + &entry.target_path[index_counter] + ); + + let dir = Path::new(&entry.target_path[index_counter]); + + if !dir.exists() { + create_dir_all(dir) + .unwrap_or_else(|_| panic!("Could not create directory {:?}!", dir)); + } + } else if id.len() < 64 { + // Assume this is a unmodified file hash. As such, check if the file is modified, and if it is, restore the original file. + let file_contents = std::fs::read(&entry.target_path[index_counter]).unwrap_or_else(|_| panic!("Could not open {} to check if it has been modified! Do I have read permission?", + entry.target_path[index_counter])); + let hash = xxh3_64(&file_contents); + + if &hash.to_string() == id { + debug!( + "{} is unmodified, leaving it alone!", + entry.target_path[index_counter] + ); + skip_file = true; + } else { + debug!( + "{} is modified, restoring original", + entry.target_path[index_counter] + ); + } + } + if !skip_file { + debug!("No special conditions met, restoring file."); + // Not a removed file + if !past && entry.modified[index_counter] { + // Target is in future. + // In fastest mode, the reference is ALWAYS the first patch (which is just a compressed copy of the file.) + // So we load this and then apply our patch to it. Thus we are fast, but also hog disk usage. + if snapshot_mode == "fastest" { + debug!("Going towards future in fastest mode"); + let mut patch_store_file = + File::open(Path::new(&(time_dir.clone() + "/patches.json"))) + .unwrap_or_else(|_| { + panic!( + "Unable to open patch store at {}!", + time_dir.clone() + "/patches.json" + ) + }); + + let mut patch_store_contents = String::new(); + patch_store_file + .read_to_string(&mut patch_store_contents) + .expect("Unable to open patch store file!"); + // patch_path = time_dir.clone() + "/" + &id + "-reverse"; + let patch_store: Vec = serde_json::from_str(&patch_store_contents) + .expect("Patch store is corrupt. Sorgy :("); + // let mut iter = patch_store.iter().peekable(); // Wrong mode dipshit, you can use this in the future for other modes. + // let mut target_id: String = "".to_string(); + // while let Some(patch) = iter.next() { + // let mut sha256 = Sha256::new(); + // sha256.update(patch.date_created.clone() + &patch.target_path); + // let check_id = format!("{:X}", sha256.finalize()); // We now have the correct target id + + // if check_id == id { + // debug!( + // "Found current patch inside store, getting patch directly ahead..." + // ); + // let mut sha256 = Sha256::new(); + // sha256.reset(); + + // if let Some(next_patch) = iter.peek() { + // sha256.update( + // next_patch.date_created.clone() + &next_patch.target_path, + // ); + // target_id = format!("{:X}", sha256.finalize()); + // debug!("Actually applying patch {}", target_id); + // } else { + // debug!("UNFINISHED UNFINISHED UNFINISHED: Need to handle case where there is no next patch!"); + // } + // } + // } + let mut target_date = "".to_string(); + let mut valid_target_path = "".to_string(); + + for patch in patch_store.iter() { + if patch.ref_patch == "First patch" + && patch.target_path == entry.target_path[index_counter] + { + debug!("Found correct initial patch"); + target_date = patch.date_created.clone(); + valid_target_path = patch.target_path.clone(); + } + } + + let true_path = Path::new(&entry.target_path[index_counter]); + if true_path.is_dir() { + debug!("Got First patch on a directory, creating {:?}", true_path); + create_dir_all(true_path).unwrap_or_else(|_| { + panic!("Unable to create directory {:?}!", true_path) + }); + } else { + if target_date.is_empty() || valid_target_path.is_empty() { + panic!( + "Could not find a valid initial patch in the patch store for {}", + entry.target_path[index_counter] + ) + } + + let mut sha256 = Sha256::new(); + + sha256.update(target_date + &valid_target_path); + let patch_id = format!("{:X}", sha256.finalize()); + + debug!("Applying patch found from patch store"); + + debug!("Checking if file exists"); + if !exists(&entry.target_path[index_counter]).unwrap_or_else(|_| { + panic!( + "Could not check if file exists at {}", + entry.target_path[index_counter] + ) + }) { + debug!( + "File doesn't exist yet, creating {}", + entry.target_path[index_counter] + ); + check_and_create(&entry.target_path[index_counter]); + } + + let mut final_file = Vec::new(); + let mut ref_file: Vec = Vec::new(); + let patch_path = time_dir.clone() + "/" + &patch_id; // Note that this will never be the first patch, so we don't need to handle that case. + let patch_final = time_dir.clone() + "/" + &id; + let target_path = &entry.target_path[index_counter]; + // let target_file = std::fs::read(&target_path).expect(&format!( + // "Could not open {} to restore snapshot. Metadata needs updating!", + // &target_path + // )); + let patch_file_compressed = std::fs::read(&patch_path).unwrap_or_else(|_| panic!("Could not open {} to restore snapshot! Do I have read permission?", + patch_path)); + let patch_file = compression::decompress_data(patch_file_compressed) + .unwrap_or_else(|_| { + panic!( + "Could not decompress data in file {}! Is it corrupt?", + patch_path + ) + }); + let patch_final_file_compressed = + std::fs::read(&patch_final).unwrap_or_else(|_| panic!("Could not open {} to restore snapshot! Do I have read permission?", + patch_path)); + let patch_file_final = + compression::decompress_data(patch_final_file_compressed) + .unwrap_or_else(|_| { + panic!( + "Could not decompress data in file {}! Is it corrupt?", + patch_path + ) + }); + // Generate initial version of file to be used as the reference + patch(&final_file, &mut patch_file.as_slice(), &mut ref_file) + .unwrap_or_else(|_| { + panic!("There was an issue applying patch {}!", patch_path) + }); + + patch(&ref_file, &mut patch_file_final.as_slice(), &mut final_file) + .unwrap_or_else(|_| { + panic!("There was an issue applying patch {}!", patch_path) + }); + debug!("Writing final target file"); + std::fs::write(target_path, &final_file) + .unwrap_or_else(|_| panic!("Unable to write to {}!", target_path)); + // index_counter += 1; + } + } else { + panic!( + "Only fastest snapshot mode supported, not {}!", + snapshot_mode + ); + } + } else if entry.modified[index_counter] { + // Target is in past. Currently works for "fastest" mode. Others untested + let mut ref_patch_compressed: Vec = [58, 51].to_vec(); // The default state will fail the validity check, so we don't need a brand new variable to track if this is "First patch" or not. + let mut ref_path = "".to_string(); + if &entry.ref_patch_ids[index_counter] != "First patch" { + debug!("Restoring into the past!"); + patch_path = time_dir.clone() + "/" + &id; + + ref_path = + time_dir.clone() + "/" + &entry.ref_patch_ids[index_counter] + "-reverse"; + debug!("Found reference patch {}", ref_path); + ref_patch_compressed = std::fs::read(&ref_path).unwrap_or_else(|_| { + panic!("Could not read reference patch at {}!", ref_path) + }); + } + + if ref_patch_compressed == [58, 51] { + // Either this is first patch, or we tried to read a false patch. Either way, we will just restore the initial compressed patch. + + if &entry.ref_patch_ids[index_counter] == "First patch" { + // First patch, we need to get the proper id to restore. Unfortunately, this means we need to load and process patches.json. + debug!("Got a first patch, loading patches.json..."); + + let patch_store_file = time_dir.clone() + "/patches.json"; + + // let path_temp_hold: HashSet = HashSet::new(); + let mut patch_store_path = File::open(Path::new(&patch_store_file)) + .unwrap_or_else(|_| panic!("Could not open {patch_store_file}!")); + + let mut patch_store_contents = String::new(); + + patch_store_path + .read_to_string(&mut patch_store_contents) + .expect( + "Patch store contains non UTF-8 characters which are unsupported!", + ); + let patch_store: Vec = + serde_json::from_str(&patch_store_contents) + .expect("Patch store is corrupt. Sorgy :("); + let mut target_id = "".to_string(); + for item in patch_store.iter() { + if item.target_path == entry.target_path[index_counter] { + let mut sha256 = Sha256::new(); + sha256.update(item.date_created.clone() + &item.target_path); + target_id = format!("{:X}", sha256.finalize()); // We now have the correct target id + break; + } + } + if target_id.is_empty() { + panic!( + "Could not find a target_id that should exist for file {:?}", + &entry.target_path + ); + } + ref_path = time_dir.clone() + "/" + &target_id; + debug!("Got ref_path as {}", ref_path); + } else { + // Read a false patch, so remove the reverse and restore it + ref_path = time_dir.clone() + "/" + &entry.ref_patch_ids[index_counter]; + } + ref_patch_compressed = std::fs::read(&ref_path).unwrap_or_else(|_| { + panic!("Could not read reference patch at {}!", ref_path) + }); + let mut final_target: Vec = Vec::new(); + let empty: Vec = Vec::new(); + let ref_patch_full_file = compression::decompress_data(ref_patch_compressed) + .unwrap_or_else(|_| { + panic!("There was an error decompressing {}!", ref_path) + }); + patch( + &empty, + &mut ref_patch_full_file.as_slice(), + &mut final_target, + ) + .unwrap_or_else(|_| { + panic!( + "There was an error applying patch {} to an empty vec!", + ref_path + ) + }); + let target_path = &entry.target_path[index_counter]; + check_and_create(target_path); + debug!("Restoring original file {}", target_path); + std::fs::write(target_path, &final_target) + .unwrap_or_else(|_| panic!("Unable to write to {}!", target_path)); + } else { + // This is a valid patch/regular case + // TODO: Detect if we are going to the original version and skip the middle steps. + let mut ref_file: Vec = Vec::new(); + let mut final_target: Vec = Vec::new(); + let target_file; + { + let ref_patch = compression::decompress_data(ref_patch_compressed) + .unwrap_or_else(|_| { + panic!("There was an issue decompressing {}!", ref_path) + }); + + let target_path = &entry.target_path[index_counter]; + + target_file = std::fs::read(target_path).unwrap_or_else(|_| { + panic!( + "Could not open {} to restore snapshot. Metadata needs updating!", + &target_path + ) + }); + + patch(&target_file, &mut ref_patch.as_slice(), &mut ref_file) + .unwrap_or_else(|_| { + panic!("There was an issue applying reference patch {}!", ref_path) + }); // TODO: This is impossible, right? We cannot apply this patch against a new unkown file. We need to build upwards. + } + let patch_file_compressed = std::fs::read(&patch_path).unwrap_or_else(|_| { + panic!( + "Could not open {} to restore snapshot! Do I have read permission?", + patch_path + ) + }); + let patch_file = compression::decompress_data(patch_file_compressed) + .unwrap_or_else(|_| { + panic!( + "Could not decompress data in file {}! Is it corrupt?", + patch_path + ) + }); + patch(&ref_file, &mut patch_file.as_slice(), &mut final_target).unwrap_or_else( + |_| panic!("There was an issue applying patch {}!", patch_path), + ); + let target_path = &entry.target_path[index_counter]; + + debug!("Restoring file {}", target_path); + std::fs::write(target_path, &final_target) + .unwrap_or_else(|_| panic!("Unable to write to {}!", target_path)); + } + } else { + debug!("{:?} is not modified, leaving it alone!", entry.target_path); + } + } + } + + // We need to do a walkthrough of the directory and remove any files that are not part of the snapshot. This way files added in the future won't be there when we restore a past snapshot. + let folder_path = Path::new(&time_dir).parent(); + match folder_path { + // Ok what the fuck is even going on :< clearly I need to read the rust book better + Some(x) => { + for path in WalkDir::new(x) { + match path { + Ok(v) => { + let v_parent = v.path().parent(); + match v_parent { + Some(vp) => { + if !entry.target_path.contains(&v.path().display().to_string()) + && v.path() != x + && v.path() != Path::new(&time_dir) + && vp != Path::new(&time_dir) + { + // println!("{:?}", v.path()); + if v.path().is_file() { + debug!("Removing {}", v.path().display()); + remove_file(v.path()).unwrap_or_else(|_| { + panic!("Unable to remove {}!", v.path().display()) + }) + // } else if v + // .path() + // .read_dir() + // .unwrap_or_else(|_| { + // panic!("Could not peek into directory {:?}", v.path()) + // }) + // .next() + // .is_none() + } else { + // println!("{}", v.path().display()); + // Check if directory to be removed is referenced in list at all, and if the reference is NOT to remove it, and if so, don't remove it. + let mut id_count = 0; + for path in entry.target_path.iter() { + // This ensures we don't accidentally remove some empty directory that we want to keep. + if !path.contains(&v.path().display().to_string()) + && v.path().exists() + && entry.patch_ids[id_count] != "REMOVED" + { + debug!("Removing {}", v.path().display()); + remove_dir_all(v.path()).unwrap_or_else(|_| { + panic!( + "Unable to remove {}!", + v.path().display() + ) + }); + } + id_count += 1; + } + } + } + } + None => panic!("Error parsing {:?}", v_parent), + } + } + Err(e) => println!("Error parsing {}", e), + } + } + } + None => panic!( + "There was an issue trying to get the parent directory of {:?}!", + folder_path + ), + } + + for path in dirs_to_remove.iter() { + let true_path = Path::new(path); + if true_path.exists() { + remove_dir_all(path).unwrap_or_else(|_| panic!("Could not remove dir {}!", path)); + } + // We can do all, since we know at this point the only remaining directories will just have other empty directories in it (assuming nothing went wrong when collecting metadata.) + } +} + +pub fn restore_snapshot_until( + // In fastest mode, reference always being the initial file means we can restore directly when going forward or backward, making restoring much much faster. + snapshot_store: Vec, + folder_path: &String, + selected_item: &DateTime, + in_past: bool, + snapshot_mode: &String, +) { + if snapshot_mode == "fastest" { + // If we are in fastest mode, we don't care about restoring anything in between since the reference is alwyas the initial version of the file. + debug!("restoring_until in fastest mode. Skipping intermediates."); + debug!("Target date is {}", selected_item); + for snapshot in snapshot_store.iter() { + let date_entry = + DateTime::parse_from_str(&snapshot.date_created, "%Y-%m-%d %H:%M:%S%.9f %z") + .unwrap(); + let formatted_date = date_entry.format("%Y-%m-%d %H:%M:%S%.9f %z").to_string(); + debug!("formatted_date is {}", formatted_date); + if formatted_date == *selected_item.format("%Y-%m-%d %H:%M:%S%.9f %z").to_string() { + debug!("Found correct snapshot to restore in fastest mode."); + restore_snapshot( + snapshot, + folder_path.clone() + "/.time", + in_past, + snapshot_mode, + ); + } + } + } else if in_past { + for snapshot in snapshot_store.iter().rev() { + let date_entry = + DateTime::parse_from_str(&snapshot.date_created, "%Y-%m-%d %H:%M:%S%.9f %z") + .unwrap(); + + if date_entry == *selected_item { + break; + } + restore_snapshot( + snapshot, + folder_path.clone() + "/.time", + in_past, + snapshot_mode, + ); + // Past is true since we want to restore the reverse patch + } + } else { + debug!("Not reversing!"); + // println!("{:?}", snapshot_store); + for snapshot in snapshot_store.iter() { + let date_entry = + DateTime::parse_from_str(&snapshot.date_created, "%Y-%m-%d %H:%M:%S%.9f %z") + .unwrap(); + + if date_entry == *selected_item { + break; + } + restore_snapshot( + snapshot, + folder_path.clone() + "/.time", + in_past, + snapshot_mode, + ); + // Past is true since we want to restore the reverse patch + } + } +} + +fn check_and_create(target_path: &String) { + if !exists(target_path) + .unwrap_or_else(|_| panic!("Could not check if file exists at {}", target_path)) + { + let true_path = Path::new(target_path).parent(); // Turns target_path into a Path. I know I should do this everywhere. + match true_path { + Some(x) => { + if !exists(x).unwrap() { + debug!("Parent directory doesn't exist, creating {:?}", true_path); + create_dir_all(x) + .unwrap_or_else(|_| panic!("Could not create parent directory at {:?}", x)); + } + } + None => panic!( + "There was an issue trying to get the parent directory of {}!", + target_path + ), + } + debug!("File doesn't exist yet, creating {}", target_path); + File::create(Path::new(&target_path)) + .unwrap_or_else(|_| panic!("Could not create file at {}!", target_path)); + } +} diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..779b62b --- /dev/null +++ b/test.sh @@ -0,0 +1,48 @@ +#!/bin/bash +#set -e + +rm -r demo +cp -r demo.bak demo +find demo -not -path "./demo/.time/*" -type f -exec md5sum {} \; > checklist.chk +cargo run --release -- -c demo.bak/config.json + +rm -r demo/* +cp -r src/* demo/ +find demo -not -path "demo/.time/*" -type f -exec md5sum {} \; > checklist-two.chk +cargo run --release -- -c demo.bak/config.json + +cp -r gui/* demo/ # Do a test that includes pre-existing files. +find demo -not -path "demo/.time/*" -type f -exec md5sum {} \; > checklist-three.chk +cargo run --release -- -c demo.bak/config.json + +echo "Checking demo..." +cargo run --release -- -c demo.bak/config.json restore --restore-index 1 +if ! md5sum -c --quiet checklist.chk +then + echo "demo failed check!" + exit 0 +fi + +echo "Checking src..." +cargo run --release -- -c demo.bak/config.json restore --restore-index 2 +if ! md5sum -c --quiet checklist-two.chk +then + echo "src failed check!" + exit 0 +fi + +echo "Checking src+gui..." +cargo run --release -- -c demo.bak/config.json restore --restore-index 3 +if ! md5sum -c --quiet checklist-three.chk +then + echo "src+gui failed check!" + exit 0 +fi + + +printf "\nAll tests passed!" + +rm checklist.chk +rm checklist-two.chk +rm checklist-three.chk +cp -r demo.bak demo