summaryrefslogtreecommitdiff
path: root/Remote
diff options
context:
space:
mode:
authorGravatar Joey Hess <joey@kitenet.net>2012-11-19 13:18:23 -0400
committerGravatar Joey Hess <joey@kitenet.net>2012-11-19 13:18:23 -0400
commit798c3d45c11cfe3d7604b27f3271f50d0f810db7 (patch)
tree946bf6b717e069623d8c48a93402e08db564dcd5 /Remote
parent8ed7d090ab319bcc45e1593cd2613d38a3615120 (diff)
directory special remote: Made more efficient and robust.
Files are now written to a tmp directory in the remote, and once all chunks are written, etc, it's moved into the final place atomically. For now, checkpresent still checks every single chunk of a file, because the old method could leave partially transferred files with some chunks present and others not.
Diffstat (limited to 'Remote')
-rw-r--r--Remote/Directory.hs50
-rw-r--r--Remote/Helper/Chunked.hs27
-rw-r--r--Remote/WebDAV.hs10
3 files changed, 46 insertions, 41 deletions
diff --git a/Remote/Directory.hs b/Remote/Directory.hs
index 01dc00c8f..f166c2a0d 100644
--- a/Remote/Directory.hs
+++ b/Remote/Directory.hs
@@ -48,7 +48,7 @@ gen r u c = do
storeKey = store dir chunksize,
retrieveKeyFile = retrieve dir chunksize,
retrieveKeyFileCheap = retrieveCheap dir chunksize,
- removeKey = remove dir chunksize,
+ removeKey = remove dir,
hasKey = checkPresent dir chunksize,
hasKeyCheap = True,
whereisKey = Nothing,
@@ -73,10 +73,19 @@ directorySetup u c = do
gitConfigSpecialRemote u c' "directory" dir
return $ M.delete "directory" c'
-{- Locations to try to access a given Key in the Directory. -}
+{- Locations to try to access a given Key in the Directory.
+ - We try more than since we used to write to different hash directories. -}
locations :: FilePath -> Key -> [FilePath]
locations d k = map (d </>) (keyPaths k)
+{- Directory where the file(s) for a key are stored. -}
+storeDir :: FilePath -> Key -> FilePath
+storeDir d k = addTrailingPathSeparator $ d </> hashDirLower k </> keyFile k
+
+{- Where we store temporary data for a key as it's being uploaded. -}
+tmpDir :: FilePath -> Key -> FilePath
+tmpDir d k = addTrailingPathSeparator $ d </> "tmp" </> keyFile k
+
withCheckedFiles :: (FilePath -> IO Bool) -> ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
withCheckedFiles _ _ [] _ _ = return False
withCheckedFiles check Nothing d k a = go $ locations d k
@@ -159,18 +168,22 @@ storeSplit' meterupdate chunksize (d:dests) bs c = do
storeHelper :: FilePath -> ChunkSize -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool
storeHelper d chunksize key storer = check <&&> go
where
- basedest = Prelude.head $ locations d key
- dir = parentDir basedest
+ tmpdir = tmpDir d key
+ destdir = storeDir d key
{- The size is not exactly known when encrypting the key;
- this assumes that at least the size of the key is
- needed as free space. -}
- check = checkDiskSpace (Just dir) key 0
- go = liftIO $ catchBoolIO $ do
- createDirectoryIfMissing True dir
- allowWrite dir
- preventWrite dir `after` storeChunks basedest chunksize storer recorder finalizer
- finalizer f dest = do
- renameFile f dest
+ check = do
+ liftIO $ createDirectoryIfMissing True tmpdir
+ checkDiskSpace (Just tmpdir) key 0
+ go = liftIO $ catchBoolIO $
+ storeChunks key tmpdir destdir chunksize storer recorder finalizer
+ finalizer tmp dest = do
+ void $ tryIO $ allowWrite dest -- may already exist
+ void $ tryIO $ removeDirectoryRecursive dest -- or not exist
+ createDirectoryIfMissing True (parentDir dest)
+ renameDirectory tmp dest
+ mapM_ preventWrite =<< dirContents dest
preventWrite dest
recorder f s = do
void $ tryIO $ allowWrite f
@@ -201,16 +214,13 @@ retrieveCheap d _ k f = liftIO $ withStoredFiles Nothing d k go
go [file] = catchBoolIO $ createSymbolicLink file f >> return True
go _files = return False
-remove :: FilePath -> ChunkSize -> Key -> Annex Bool
-remove d chunksize k = liftIO $ withStoredFiles chunksize d k go
+remove :: FilePath -> Key -> Annex Bool
+remove d k = liftIO $ catchBoolIO $ do
+ allowWrite dir
+ removeDirectoryRecursive dir
+ return True
where
- go = all id <$$> mapM removefile
- removefile file = catchBoolIO $ do
- let dir = parentDir file
- allowWrite dir
- removeFile file
- _ <- tryIO $ removeDirectory dir
- return True
+ dir = storeDir d k
checkPresent :: FilePath -> ChunkSize -> Key -> Annex (Either String Bool)
checkPresent d chunksize k = liftIO $ catchMsgIO $ withStoredFiles chunksize d k $
diff --git a/Remote/Helper/Chunked.hs b/Remote/Helper/Chunked.hs
index e609e6354..4f04a1c38 100644
--- a/Remote/Helper/Chunked.hs
+++ b/Remote/Helper/Chunked.hs
@@ -58,33 +58,28 @@ chunkStream = map (\n -> ".chunk" ++ show n) [1 :: Integer ..]
{- Given the base destination to use to store a value,
- generates a stream of temporary destinations (just one when not chunking)
- and passes it to an action, which should chunk and store the data,
- - and return the destinations it stored to, or [] on error.
- -
- - Then calles the finalizer to rename the temporary destinations into
- - their final places (and do any other cleanup), and writes the chunk count
- - (if chunking)
+ - and return the destinations it stored to, or [] on error. Then
+ - calls the storer to write the chunk count (if chunking). Finally, the
+ - fianlizer is called to rename the tmp into the dest
+ - (and do any other cleanup).
-}
-storeChunks :: FilePath -> ChunkSize -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
-storeChunks basedest chunksize storer recorder finalizer =
+storeChunks :: Key -> FilePath -> FilePath -> ChunkSize -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
+storeChunks key tmp dest chunksize storer recorder finalizer =
either (const $ return False) return
=<< (E.try go :: IO (Either E.SomeException Bool))
where
go = do
stored <- storer tmpdests
- forM_ stored $ \d -> do
- let dest = detmpprefix d
- finalizer d dest
when (chunksize /= Nothing) $ do
- let chunkcount = basedest ++ chunkCount
+ let chunkcount = basef ++ chunkCount
recorder chunkcount (show $ length stored)
+ finalizer tmp dest
return (not $ null stored)
- tmpprefix = ".tmp"
- detmpprefix f = take (length f - tmpprefixlen) f
- tmpprefixlen = length tmpprefix
+ basef = tmp ++ keyFile key
tmpdests
- | chunksize == Nothing = [basedest ++ tmpprefix]
- | otherwise = map (++ tmpprefix) $ map (basedest ++) chunkStream
+ | chunksize == Nothing = [basef]
+ | otherwise = map (basef ++ ) chunkStream
{- Given a list of destinations to use, chunks the data according to the
- ChunkSize, and runs the storer action to store each chunk. Returns
diff --git a/Remote/WebDAV.hs b/Remote/WebDAV.hs
index 113e59046..b3d342d19 100644
--- a/Remote/WebDAV.hs
+++ b/Remote/WebDAV.hs
@@ -90,7 +90,7 @@ store r k _f p = metered (Just p) k $ \meterupdate ->
let url = davLocation baseurl k
f <- inRepo $ gitAnnexLocation k
liftIO $ withMeteredFile f meterupdate $
- storeHelper r url user pass
+ storeHelper r k url user pass
storeEncrypted :: Remote -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
storeEncrypted r (cipher, enck) k p = metered (Just p) k $ \meterupdate ->
@@ -98,12 +98,12 @@ storeEncrypted r (cipher, enck) k p = metered (Just p) k $ \meterupdate ->
let url = davLocation baseurl enck
f <- inRepo $ gitAnnexLocation k
liftIO $ encrypt cipher (streamMeteredFile f meterupdate) $
- readBytes $ storeHelper r url user pass
+ readBytes $ storeHelper r enck url user pass
-storeHelper :: Remote -> DavUrl -> DavUser -> DavPass -> L.ByteString -> IO Bool
-storeHelper r urlbase user pass b = catchBoolIO $ do
+storeHelper :: Remote -> Key -> DavUrl -> DavUser -> DavPass -> L.ByteString -> IO Bool
+storeHelper r k urlbase user pass b = catchBoolIO $ do
davMkdir (urlParent urlbase) user pass
- storeChunks urlbase chunksize storer recorder finalizer
+ storeChunks k undefined undefined chunksize storer recorder finalizer
where
chunksize = chunkSize $ config r
storer urls = storeChunked chunksize urls storehttp b