From 80450de13fa78b6a63c4e032f72e9d58110c4e73 Mon Sep 17 00:00:00 2001 From: "https://www.google.com/accounts/o8/id?id=AItOawkRGMQkg9ck_pr47JXZV_C2DJQXrO8LgpI" Date: Sat, 13 Sep 2014 06:28:01 +0000 Subject: Added a comment: Hard linking on local clone --- .../comment_1_16b13b2510183a9da5f960ae5765e581._comment | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/devblog/day_219__catching_up_and_looking_back/comment_1_16b13b2510183a9da5f960ae5765e581._comment diff --git a/doc/devblog/day_219__catching_up_and_looking_back/comment_1_16b13b2510183a9da5f960ae5765e581._comment b/doc/devblog/day_219__catching_up_and_looking_back/comment_1_16b13b2510183a9da5f960ae5765e581._comment new file mode 100644 index 000000000..5b839b55c --- /dev/null +++ b/doc/devblog/day_219__catching_up_and_looking_back/comment_1_16b13b2510183a9da5f960ae5765e581._comment @@ -0,0 +1,10 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawkRGMQkg9ck_pr47JXZV_C2DJQXrO8LgpI" + nickname="Michael" + subject="Hard linking on local clone" + date="2014-09-13T06:28:01Z" + content=""" +Thanks for this feature. It will save a lot of space when working on one-off projects with big scientific datasets. + +Unfortunately, there is probably no easy solution to achieve similar savings across file systems. On our shared cluster individual labs have their data in separate ZFS volumes (to ease individual backup handling), but data is often shared (i.e. copied) across volumes when cloning an annex. We need expensive de-duplication on the backup-server to, at least, prevent this kind of waste to hit the backups -- but the master file server still suffers (de-duplication ratio sometimes approaching a factor of 2.0). +"""]] -- cgit v1.2.3