summaryrefslogtreecommitdiff
path: root/Remote
diff options
context:
space:
mode:
Diffstat (limited to 'Remote')
-rw-r--r--Remote/Directory.hs212
-rw-r--r--Remote/Directory/LegacyChunked.hs112
-rw-r--r--Remote/External.hs60
-rw-r--r--Remote/Glacier.hs3
-rw-r--r--Remote/Helper/Chunked.hs395
-rw-r--r--Remote/Helper/Chunked/Legacy.hs14
-rw-r--r--Remote/Helper/ChunkedEncryptable.hs200
-rw-r--r--Remote/Helper/Encryptable.hs59
-rw-r--r--Remote/WebDAV.hs49
9 files changed, 810 insertions, 294 deletions
diff --git a/Remote/Directory.hs b/Remote/Directory.hs
index 62c01e370..37942a295 100644
--- a/Remote/Directory.hs
+++ b/Remote/Directory.hs
@@ -1,16 +1,16 @@
{- A "remote" that is just a filesystem directory.
-
- - Copyright 2011-2012 Joey Hess <joey@kitenet.net>
+ - Copyright 2011-2014 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
+{-# LANGUAGE Rank2Types #-}
module Remote.Directory (remote) where
import qualified Data.ByteString.Lazy as L
-import qualified Data.ByteString as S
import qualified Data.Map as M
import Common.Annex
@@ -21,10 +21,8 @@ import Config.Cost
import Config
import Utility.FileMode
import Remote.Helper.Special
-import Remote.Helper.Encryptable
-import Remote.Helper.Chunked
-import qualified Remote.Helper.Chunked.Legacy as Legacy
-import Crypto
+import Remote.Helper.ChunkedEncryptable
+import qualified Remote.Directory.LegacyChunked as Legacy
import Annex.Content
import Annex.UUID
import Utility.Metered
@@ -41,15 +39,15 @@ gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remot
gen r u c gc = do
cst <- remoteCost gc cheapRemoteCost
let chunkconfig = chunkConfig c
- return $ Just $ encryptableRemote c
- (storeEncrypted dir (getGpgEncParams (c,gc)) chunkconfig)
- (retrieveEncrypted dir chunkconfig)
+ return $ Just $ chunkedEncryptableRemote c
+ (prepareStore dir chunkconfig)
+ (retrieve dir chunkconfig)
Remote {
uuid = u,
cost = cst,
name = Git.repoDescribe r,
- storeKey = store dir chunkconfig,
- retrieveKeyFile = retrieve dir chunkconfig,
+ storeKey = storeKeyDummy,
+ retrieveKeyFile = retreiveKeyFileDummy,
retrieveKeyFileCheap = retrieveCheap dir chunkconfig,
removeKey = remove dir,
hasKey = checkPresent dir chunkconfig,
@@ -84,125 +82,49 @@ directorySetup mu _ c = do
gitConfigSpecialRemote u c' "directory" absdir
return (M.delete "directory" c', u)
-{- Locations to try to access a given Key in the Directory.
- - We try more than since we used to write to different hash directories. -}
+{- Locations to try to access a given Key in the directory.
+ - We try more than one since we used to write to different hash
+ - directories. -}
locations :: FilePath -> Key -> [FilePath]
locations d k = map (d </>) (keyPaths k)
+{- Returns the location off a Key in the directory. If the key is
+ - present, returns the location that is actually used, otherwise
+ - returns the first, default location. -}
+getLocation :: FilePath -> Key -> IO FilePath
+getLocation d k = do
+ let locs = locations d k
+ fromMaybe (Prelude.head locs) <$> firstM doesFileExist locs
+
{- Directory where the file(s) for a key are stored. -}
storeDir :: FilePath -> Key -> FilePath
storeDir d k = addTrailingPathSeparator $ d </> hashDirLower k </> keyFile k
-{- Where we store temporary data for a key as it's being uploaded. -}
+{- Where we store temporary data for a key, in the directory, as it's being
+ - written. -}
tmpDir :: FilePath -> Key -> FilePath
tmpDir d k = addTrailingPathSeparator $ d </> "tmp" </> keyFile k
-withCheckedFiles :: (FilePath -> IO Bool) -> ChunkConfig -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
-withCheckedFiles _ _ [] _ _ = return False
-withCheckedFiles check (LegacyChunks _) d k a = go $ locations d k
- where
- go [] = return False
- go (f:fs) = do
- let chunkcount = f ++ Legacy.chunkCount
- ifM (check chunkcount)
- ( do
- chunks <- Legacy.listChunks f <$> readFile chunkcount
- ifM (allM check chunks)
- ( a chunks , return False )
- , do
- chunks <- Legacy.probeChunks f check
- if null chunks
- then go fs
- else a chunks
- )
-withCheckedFiles check _ d k a = go $ locations d k
- where
- go [] = return False
- go (f:fs) = ifM (check f) ( a [f] , go fs )
-
-withStoredFiles :: ChunkConfig -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
-withStoredFiles = withCheckedFiles doesFileExist
-
-store :: FilePath -> ChunkConfig -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
-store d chunkconfig k _f p = sendAnnex k (void $ remove d k) $ \src ->
- metered (Just p) k $ \meterupdate ->
- storeHelper d chunkconfig k k $ \dests ->
- case chunkconfig of
- LegacyChunks chunksize ->
- storeLegacyChunked meterupdate chunksize dests
- =<< L.readFile src
- _ -> do
- let dest = Prelude.head dests
- meteredWriteFile meterupdate dest
- =<< L.readFile src
- return [dest]
-
-storeEncrypted :: FilePath -> [CommandParam] -> ChunkConfig -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
-storeEncrypted d gpgOpts chunkconfig (cipher, enck) k p = sendAnnex k (void $ remove d enck) $ \src ->
- metered (Just p) k $ \meterupdate ->
- storeHelper d chunkconfig enck k $ \dests ->
- encrypt gpgOpts cipher (feedFile src) $ readBytes $ \b ->
- case chunkconfig of
- LegacyChunks chunksize ->
- storeLegacyChunked meterupdate chunksize dests b
- _ -> do
- let dest = Prelude.head dests
- meteredWriteFile meterupdate dest b
- return [dest]
-
-{- Splits a ByteString into chunks and writes to dests, obeying configured
- - chunk size (not to be confused with the L.ByteString chunk size).
- - Note: Must always write at least one file, even for empty ByteString. -}
-storeLegacyChunked :: MeterUpdate -> ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
-storeLegacyChunked _ _ [] _ = error "bad storeLegacyChunked call"
-storeLegacyChunked meterupdate chunksize alldests@(firstdest:_) b
- | L.null b = do
- -- must always write at least one file, even for empty
- L.writeFile firstdest b
- return [firstdest]
- | otherwise = storeLegacyChunked' meterupdate chunksize alldests (L.toChunks b) []
-storeLegacyChunked' :: MeterUpdate -> ChunkSize -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath]
-storeLegacyChunked' _ _ [] _ _ = error "ran out of dests"
-storeLegacyChunked' _ _ _ [] c = return $ reverse c
-storeLegacyChunked' meterupdate chunksize (d:dests) bs c = do
- bs' <- withFile d WriteMode $
- feed zeroBytesProcessed chunksize bs
- storeLegacyChunked' meterupdate chunksize dests bs' (d:c)
- where
- feed _ _ [] _ = return []
- feed bytes sz (l:ls) h = do
- let len = S.length l
- let s = fromIntegral len
- if s <= sz || sz == chunksize
- then do
- S.hPut h l
- let bytes' = addBytesProcessed bytes len
- meterupdate bytes'
- feed bytes' (sz - s) ls h
- else return (l:ls)
-
-storeHelper :: FilePath -> ChunkConfig -> Key -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool
-storeHelper d chunkconfig key origkey storer = check <&&> liftIO go
- where
- tmpdir = tmpDir d key
- destdir = storeDir d key
-
- {- An encrypted key does not have a known size,
- - so check that the size of the original key is available as free
- - space. -}
- check = do
- liftIO $ createDirectoryIfMissing True tmpdir
- checkDiskSpace (Just tmpdir) origkey 0
-
- go = case chunkconfig of
- NoChunks -> flip catchNonAsync (\e -> print e >> return False) $ do
- let tmpf = tmpdir </> keyFile key
- void $ storer [tmpf]
+{- Check if there is enough free disk space in the remote's directory to
+ - store the key. Note that the unencrypted key size is checked. -}
+prepareStore :: FilePath -> ChunkConfig -> Preparer Storer
+prepareStore d chunkconfig = checkPrepare
+ (\k -> checkDiskSpace (Just d) k 0)
+ (byteStorer $ store d chunkconfig)
+
+store :: FilePath -> ChunkConfig -> Key -> L.ByteString -> MeterUpdate -> Annex Bool
+store d chunkconfig k b p = liftIO $ do
+ void $ tryIO $ createDirectoryIfMissing True tmpdir
+ case chunkconfig of
+ LegacyChunks chunksize -> Legacy.store chunksize finalizer k b p tmpdir destdir
+ _ -> do
+ let tmpf = tmpdir </> keyFile k
+ meteredWriteFile p tmpf b
finalizer tmpdir destdir
return True
- UnpaddedChunks _ -> error "TODO: storeHelper with UnpaddedChunks"
- LegacyChunks _ -> Legacy.storeChunks key tmpdir destdir storer recorder finalizer
-
+ where
+ tmpdir = tmpDir d k
+ destdir = storeDir d k
finalizer tmp dest = do
void $ tryIO $ allowWrite dest -- may already exist
void $ tryIO $ removeDirectoryRecursive dest -- or not exist
@@ -212,38 +134,21 @@ storeHelper d chunkconfig key origkey storer = check <&&> liftIO go
void $ tryIO $ do
mapM_ preventWrite =<< dirContents dest
preventWrite dest
-
- recorder f s = do
- void $ tryIO $ allowWrite f
- writeFile f s
- void $ tryIO $ preventWrite f
-
-retrieve :: FilePath -> ChunkConfig -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Bool
-retrieve d chunkconfig k _ f p = metered (Just p) k $ \meterupdate ->
- liftIO $ withStoredFiles chunkconfig d k $ \files ->
- catchBoolIO $ do
- meteredWriteFileChunks meterupdate f files L.readFile
- return True
-retrieveEncrypted :: FilePath -> ChunkConfig -> (Cipher, Key) -> Key -> FilePath -> MeterUpdate -> Annex Bool
-retrieveEncrypted d chunkconfig (cipher, enck) k f p = metered (Just p) k $ \meterupdate ->
- liftIO $ withStoredFiles chunkconfig d enck $ \files ->
- catchBoolIO $ do
- decrypt cipher (feeder files) $
- readBytes $ meteredWriteFile meterupdate f
- return True
- where
- feeder files h = forM_ files $ L.hPut h <=< L.readFile
+retrieve :: FilePath -> ChunkConfig -> Preparer Retriever
+retrieve d (LegacyChunks _) = Legacy.retrieve locations d
+retrieve d _ = simplyPrepare $ byteRetriever $ \k ->
+ liftIO $ L.readFile =<< getLocation d k
retrieveCheap :: FilePath -> ChunkConfig -> Key -> FilePath -> Annex Bool
--- no cheap retrieval for chunks
+-- no cheap retrieval possible for chunks
retrieveCheap _ (UnpaddedChunks _) _ _ = return False
retrieveCheap _ (LegacyChunks _) _ _ = return False
#ifndef mingw32_HOST_OS
-retrieveCheap d ck k f = liftIO $ withStoredFiles ck d k go
- where
- go [file] = catchBoolIO $ createSymbolicLink file f >> return True
- go _files = return False
+retrieveCheap d NoChunks k f = liftIO $ catchBoolIO $ do
+ file <- getLocation d k
+ createSymbolicLink file f
+ return True
#else
retrieveCheap _ _ _ _ = return False
#endif
@@ -256,12 +161,25 @@ remove d k = liftIO $ do
- before it can delete them. -}
void $ tryIO $ mapM_ allowWrite =<< dirContents dir
#endif
- catchBoolIO $ do
+ ok <- catchBoolIO $ do
removeDirectoryRecursive dir
return True
+ {- Removing the subdirectory will fail if it doesn't exist.
+ - But, we want to succeed in that case, as long as the directory
+ - remote's top-level directory does exist. -}
+ if ok
+ then return ok
+ else doesDirectoryExist d <&&> (not <$> doesDirectoryExist dir)
where
dir = storeDir d k
checkPresent :: FilePath -> ChunkConfig -> Key -> Annex (Either String Bool)
-checkPresent d chunkconfig k = liftIO $ catchMsgIO $ withStoredFiles chunkconfig d k $
- const $ return True -- withStoredFiles checked that it exists
+checkPresent d (LegacyChunks _) k = Legacy.checkPresent d locations k
+checkPresent d _ k = liftIO $ do
+ v <- catchMsgIO $ anyM doesFileExist (locations d k)
+ case v of
+ Right False -> ifM (doesDirectoryExist d)
+ ( return v
+ , return $ Left $ "directory " ++ d ++ " is not accessible"
+ )
+ _ -> return v
diff --git a/Remote/Directory/LegacyChunked.hs b/Remote/Directory/LegacyChunked.hs
new file mode 100644
index 000000000..312119f4e
--- /dev/null
+++ b/Remote/Directory/LegacyChunked.hs
@@ -0,0 +1,112 @@
+{- Legacy chunksize support for directory special remote.
+ -
+ - Can be removed eventually.
+ -
+ - Copyright 2011-2012 Joey Hess <joey@kitenet.net>
+ -
+ - Licensed under the GNU GPL version 3 or higher.
+ -}
+
+{-# LANGUAGE Rank2Types #-}
+
+module Remote.Directory.LegacyChunked where
+
+import qualified Data.ByteString.Lazy as L
+import qualified Data.ByteString as S
+
+import Common.Annex
+import Utility.FileMode
+import Remote.Helper.ChunkedEncryptable
+import qualified Remote.Helper.Chunked.Legacy as Legacy
+import Annex.Perms
+import Utility.Metered
+
+withCheckedFiles :: (FilePath -> IO Bool) -> FilePath -> (FilePath -> Key -> [FilePath]) -> Key -> ([FilePath] -> IO Bool) -> IO Bool
+withCheckedFiles _ [] _locations _ _ = return False
+withCheckedFiles check d locations k a = go $ locations d k
+ where
+ go [] = return False
+ go (f:fs) = do
+ let chunkcount = f ++ Legacy.chunkCount
+ ifM (check chunkcount)
+ ( do
+ chunks <- Legacy.listChunks f <$> readFile chunkcount
+ ifM (allM check chunks)
+ ( a chunks , return False )
+ , do
+ chunks <- Legacy.probeChunks f check
+ if null chunks
+ then go fs
+ else a chunks
+ )
+withStoredFiles :: FilePath -> (FilePath -> Key -> [FilePath]) -> Key -> ([FilePath] -> IO Bool) -> IO Bool
+withStoredFiles = withCheckedFiles doesFileExist
+
+{- Splits a ByteString into chunks and writes to dests, obeying configured
+ - chunk size (not to be confused with the L.ByteString chunk size). -}
+storeLegacyChunked :: MeterUpdate -> ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
+storeLegacyChunked _ _ [] _ = error "bad storeLegacyChunked call"
+storeLegacyChunked meterupdate chunksize alldests@(firstdest:_) b
+ | L.null b = do
+ -- always write at least one file, even for empty
+ L.writeFile firstdest b
+ return [firstdest]
+ | otherwise = storeLegacyChunked' meterupdate chunksize alldests (L.toChunks b) []
+storeLegacyChunked' :: MeterUpdate -> ChunkSize -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath]
+storeLegacyChunked' _ _ [] _ _ = error "ran out of dests"
+storeLegacyChunked' _ _ _ [] c = return $ reverse c
+storeLegacyChunked' meterupdate chunksize (d:dests) bs c = do
+ bs' <- withFile d WriteMode $
+ feed zeroBytesProcessed chunksize bs
+ storeLegacyChunked' meterupdate chunksize dests bs' (d:c)
+ where
+ feed _ _ [] _ = return []
+ feed bytes sz (l:ls) h = do
+ let len = S.length l
+ let s = fromIntegral len
+ if s <= sz || sz == chunksize
+ then do
+ S.hPut h l
+ let bytes' = addBytesProcessed bytes len
+ meterupdate bytes'
+ feed bytes' (sz - s) ls h
+ else return (l:ls)
+
+storeHelper :: (FilePath -> FilePath -> IO ()) -> Key -> ([FilePath] -> IO [FilePath]) -> FilePath -> FilePath -> IO Bool
+storeHelper finalizer key storer tmpdir destdir = do
+ void $ liftIO $ tryIO $ createDirectoryIfMissing True tmpdir
+ Legacy.storeChunks key tmpdir destdir storer recorder finalizer
+ where
+ recorder f s = do
+ void $ tryIO $ allowWrite f
+ writeFile f s
+ void $ tryIO $ preventWrite f
+
+store :: ChunkSize -> (FilePath -> FilePath -> IO ()) -> Key -> L.ByteString -> MeterUpdate -> FilePath -> FilePath -> IO Bool
+store chunksize finalizer k b p = storeHelper finalizer k $ \dests ->
+ storeLegacyChunked p chunksize dests b
+
+{- Need to get a single ByteString containing every chunk.
+ - Done very innefficiently, by writing to a temp file.
+ - :/ This is legacy code..
+ -}
+retrieve :: (FilePath -> Key -> [FilePath]) -> FilePath -> Preparer Retriever
+retrieve locations d basek a = do
+ showLongNote "This remote uses the deprecated chunksize setting. So this will be quite slow."
+ tmpdir <- fromRepo $ gitAnnexTmpMiscDir
+ createAnnexDirectory tmpdir
+ let tmp = tmpdir </> keyFile basek ++ ".directorylegacy.tmp"
+ a $ Just $ byteRetriever $ \k -> liftIO $ do
+ void $ withStoredFiles d locations k $ \fs -> do
+ forM_ fs $
+ S.appendFile tmp <=< S.readFile
+ return True
+ b <- L.readFile tmp
+ nukeFile tmp
+ return b
+
+checkPresent :: FilePath -> (FilePath -> Key -> [FilePath]) -> Key -> Annex (Either String Bool)
+checkPresent d locations k = liftIO $ catchMsgIO $
+ withStoredFiles d locations k $
+ -- withStoredFiles checked that it exists
+ const $ return True
diff --git a/Remote/External.hs b/Remote/External.hs
index 464e9b57e..1c22a589b 100644
--- a/Remote/External.hs
+++ b/Remote/External.hs
@@ -15,14 +15,12 @@ import Types.CleanupActions
import qualified Git
import Config
import Remote.Helper.Special
-import Remote.Helper.Encryptable
-import Crypto
+import Remote.Helper.ChunkedEncryptable
import Utility.Metered
import Logs.Transfer
import Logs.PreferredContent.Raw
import Logs.RemoteState
import Config.Cost
-import Annex.Content
import Annex.UUID
import Annex.Exception
import Creds
@@ -30,7 +28,6 @@ import Creds
import Control.Concurrent.STM
import System.Log.Logger (debugM)
import qualified Data.Map as M
-import qualified Data.ByteString.Lazy as L
remote :: RemoteType
remote = RemoteType {
@@ -46,15 +43,15 @@ gen r u c gc = do
Annex.addCleanup (RemoteCleanup u) $ stopExternal external
cst <- getCost external r gc
avail <- getAvailability external r gc
- return $ Just $ encryptableRemote c
- (storeEncrypted external $ getGpgEncParams (c,gc))
- (retrieveEncrypted external)
+ return $ Just $ chunkedEncryptableRemote c
+ (simplyPrepare $ store external)
+ (simplyPrepare $ retrieve external)
Remote {
uuid = u,
cost = cst,
name = Git.repoDescribe r,
- storeKey = store external,
- retrieveKeyFile = retrieve external,
+ storeKey = storeKeyDummy,
+ retrieveKeyFile = retreiveKeyFileDummy,
retrieveKeyFileCheap = \_ _ -> return False,
removeKey = remove external,
hasKey = checkPresent external,
@@ -90,25 +87,8 @@ externalSetup mu _ c = do
gitConfigSpecialRemote u c'' "externaltype" externaltype
return (c'', u)
-store :: External -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
-store external k _f p = sendAnnex k rollback $ \f ->
- metered (Just p) k $
- storeHelper external k f
- where
- rollback = void $ remove external k
-
-storeEncrypted :: External -> [CommandParam] -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
-storeEncrypted external gpgOpts (cipher, enck) k p = withTmp enck $ \tmp ->
- sendAnnex k rollback $ \src -> do
- metered (Just p) k $ \meterupdate -> do
- liftIO $ encrypt gpgOpts cipher (feedFile src) $
- readBytes $ L.writeFile tmp
- storeHelper external enck tmp meterupdate
- where
- rollback = void $ remove external enck
-
-storeHelper :: External -> Key -> FilePath -> MeterUpdate -> Annex Bool
-storeHelper external k f p = safely $
+store :: External -> Storer
+store external = fileStorer $ \k f p ->
handleRequest external (TRANSFER Upload k f) (Just p) $ \resp ->
case resp of
TRANSFER_SUCCESS Upload k' | k == k' ->
@@ -119,31 +99,15 @@ storeHelper external k f p = safely $
return False
_ -> Nothing
-retrieve :: External -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Bool
-retrieve external k _f d p = metered (Just p) k $
- retrieveHelper external k d
-
-retrieveEncrypted :: External -> (Cipher, Key) -> Key -> FilePath -> MeterUpdate -> Annex Bool
-retrieveEncrypted external (cipher, enck) k f p = withTmp enck $ \tmp ->
- metered (Just p) k $ \meterupdate ->
- ifM (retrieveHelper external enck tmp meterupdate)
- ( liftIO $ catchBoolIO $ do
- decrypt cipher (feedFile tmp) $
- readBytes $ L.writeFile f
- return True
- , return False
- )
-
-retrieveHelper :: External -> Key -> FilePath -> MeterUpdate -> Annex Bool
-retrieveHelper external k d p = safely $
+retrieve :: External -> Retriever
+retrieve external = fileRetriever $ \d k p ->
handleRequest external (TRANSFER Download k d) (Just p) $ \resp ->
case resp of
TRANSFER_SUCCESS Download k'
- | k == k' -> Just $ return True
+ | k == k' -> Just $ return ()
TRANSFER_FAILURE Download k' errmsg
| k == k' -> Just $ do
- warning errmsg
- return False
+ error errmsg
_ -> Nothing
remove :: External -> Key -> Annex Bool
diff --git a/Remote/Glacier.hs b/Remote/Glacier.hs
index 00be9e1a9..bf8f05061 100644
--- a/Remote/Glacier.hs
+++ b/Remote/Glacier.hs
@@ -282,7 +282,8 @@ jobList r keys = go =<< glacierEnv (config r) (uuid r)
then return nada
else do
enckeys <- forM keys $ \k ->
- maybe k snd <$> cipherKey (config r) k
+ maybe k (\(_, enck) -> enck k)
+ <$> cipherKey (config r)
let keymap = M.fromList $ zip enckeys keys
let convert = mapMaybe (`M.lookup` keymap)
return (convert succeeded, convert failed)
diff --git a/Remote/Helper/Chunked.hs b/Remote/Helper/Chunked.hs
index 031ff63d6..0d786c98d 100644
--- a/Remote/Helper/Chunked.hs
+++ b/Remote/Helper/Chunked.hs
@@ -1,22 +1,30 @@
{- git-annex chunked remotes
-
- - Copyright 2012-2014 Joey Hess <joey@kitenet.net>
+ - Copyright 2014 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
-module Remote.Helper.Chunked
- ( ChunkSize
- , ChunkConfig(..)
- , chunkConfig
- , meteredWriteFileChunks
- ) where
+module Remote.Helper.Chunked (
+ ChunkSize,
+ ChunkConfig(..),
+ chunkConfig,
+ storeChunks,
+ removeChunks,
+ retrieveChunks,
+ hasKeyChunks,
+) where
import Common.Annex
import Utility.DataUnits
+import Types.StoreRetrieve
import Types.Remote
-import Logs.Chunk.Pure (ChunkSize)
+import Types.Key
+import Logs.Chunk
import Utility.Metered
+import Crypto (EncKey)
+import Backend (isStableKey)
+import Annex.Exception
import qualified Data.ByteString.Lazy as L
import qualified Data.Map as M
@@ -25,23 +33,366 @@ data ChunkConfig
= NoChunks
| UnpaddedChunks ChunkSize
| LegacyChunks ChunkSize
+ deriving (Show)
+
+noChunks :: ChunkConfig -> Bool
+noChunks NoChunks = True
+noChunks _ = False
chunkConfig :: RemoteConfig -> ChunkConfig
chunkConfig m =
case M.lookup "chunksize" m of
Nothing -> case M.lookup "chunk" m of
Nothing -> NoChunks
- Just v -> UnpaddedChunks $ readsz v "chunk"
- Just v -> LegacyChunks $ readsz v "chunksize"
- where
- readsz v f = case readSize dataUnits v of
- Just size | size > 0 -> fromInteger size
- _ -> error ("bad " ++ f)
-
-{- Writes a series of chunks to a file. The feeder is called to get
- - each chunk. -}
-meteredWriteFileChunks :: MeterUpdate -> FilePath -> [v] -> (v -> IO L.ByteString) -> IO ()
-meteredWriteFileChunks meterupdate dest chunks feeder =
- withBinaryFile dest WriteMode $ \h ->
- forM_ chunks $
- meteredWrite meterupdate h <=< feeder
+ Just v -> readsz UnpaddedChunks v "chunk"
+ Just v -> readsz LegacyChunks v "chunksize"
+ where
+ readsz c v f = case readSize dataUnits v of
+ Just size
+ | size == 0 -> NoChunks
+ | size > 0 -> c (fromInteger size)
+ _ -> error $ "bad configuration " ++ f ++ "=" ++ v
+
+-- An infinite stream of chunk keys, starting from chunk 1.
+newtype ChunkKeyStream = ChunkKeyStream [Key]
+
+chunkKeyStream :: Key -> ChunkSize -> ChunkKeyStream
+chunkKeyStream basek chunksize = ChunkKeyStream $ map mk [1..]
+ where
+ mk chunknum = sizedk { keyChunkNum = Just chunknum }
+ sizedk = basek { keyChunkSize = Just (toInteger chunksize) }
+
+nextChunkKeyStream :: ChunkKeyStream -> (Key, ChunkKeyStream)
+nextChunkKeyStream (ChunkKeyStream (k:l)) = (k, ChunkKeyStream l)
+nextChunkKeyStream (ChunkKeyStream []) = undefined -- stream is infinite!
+
+takeChunkKeyStream :: ChunkCount -> ChunkKeyStream -> [Key]
+takeChunkKeyStream n (ChunkKeyStream l) = genericTake n l
+
+-- Number of chunks already consumed from the stream.
+numChunks :: ChunkKeyStream -> Integer
+numChunks = pred . fromJust . keyChunkNum . fst . nextChunkKeyStream
+
+{- Splits up the key's content into chunks, passing each chunk to
+ - the storer action, along with a corresponding chunk key and a
+ - progress meter update callback.
+ -
+ - To support resuming, the checker is used to find the first missing
+ - chunk key. Storing starts from that chunk.
+ -
+ - This buffers each chunk in memory, so can use a lot of memory
+ - with a large ChunkSize.
+ - More optimal versions of this can be written, that rely
+ - on L.toChunks to split the lazy bytestring into chunks (typically
+ - smaller than the ChunkSize), and eg, write those chunks to a Handle.
+ - But this is the best that can be done with the storer interface that
+ - writes a whole L.ByteString at a time.
+ -}
+storeChunks
+ :: UUID
+ -> ChunkConfig
+ -> Key
+ -> FilePath
+ -> MeterUpdate
+ -> (Key -> ContentSource -> MeterUpdate -> Annex Bool)
+ -> (Key -> Annex (Either String Bool))
+ -> Annex Bool
+storeChunks u chunkconfig k f p storer checker =
+ case chunkconfig of
+ (UnpaddedChunks chunksize) | isStableKey k ->
+ bracketIO open close (go chunksize)
+ _ -> showprogress $ storer k (FileContent f)
+ where
+ showprogress = metered (Just p) k
+
+ open = tryIO $ openBinaryFile f ReadMode
+
+ close (Right h) = hClose h
+ close (Left _) = noop
+
+ go _ (Left e) = do
+ warning (show e)
+ return False
+ go chunksize (Right h) = showprogress $ \meterupdate -> do
+ let chunkkeys = chunkKeyStream k chunksize
+ (chunkkeys', startpos) <- seekResume h chunkkeys checker
+ b <- liftIO $ L.hGetContents h
+ gochunks meterupdate startpos chunksize b chunkkeys'
+
+ gochunks :: MeterUpdate -> BytesProcessed -> ChunkSize -> L.ByteString -> ChunkKeyStream -> Annex Bool
+ gochunks meterupdate startpos chunksize = loop startpos . splitchunk
+ where
+ splitchunk = L.splitAt chunksize
+
+ loop bytesprocessed (chunk, bs) chunkkeys
+ | L.null chunk && numchunks > 0 = do
+ -- Once all chunks are successfully
+ -- stored, update the chunk log.
+ chunksStored u k (FixedSizeChunks chunksize) numchunks
+ return True
+ | otherwise = do
+ liftIO $ meterupdate' zeroBytesProcessed
+ let (chunkkey, chunkkeys') = nextChunkKeyStream chunkkeys
+ ifM (storer chunkkey (ByteContent chunk) meterupdate')
+ ( do
+ let bytesprocessed' = addBytesProcessed bytesprocessed (L.length chunk)
+ loop bytesprocessed' (splitchunk bs) chunkkeys'
+ , return False
+ )
+ where
+ numchunks = numChunks chunkkeys
+ {- The MeterUpdate that is passed to the action
+ - storing a chunk is offset, so that it reflects
+ - the total bytes that have already been stored
+ - in previous chunks. -}
+ meterupdate' = offsetMeterUpdate meterupdate bytesprocessed
+
+{- Check if any of the chunk keys are present. If found, seek forward
+ - in the Handle, so it will be read starting at the first missing chunk.
+ - Returns the ChunkKeyStream truncated to start at the first missing
+ - chunk, and the number of bytes skipped due to resuming.
+ -
+ - As an optimisation, if the file fits into a single chunk, there's no need
+ - to check if that chunk is present -- we know it's not, because otherwise
+ - the whole file would be present and there would be no reason to try to
+ - store it.
+ -}
+seekResume
+ :: Handle
+ -> ChunkKeyStream
+ -> (Key -> Annex (Either String Bool))
+ -> Annex (ChunkKeyStream, BytesProcessed)
+seekResume h chunkkeys checker = do
+ sz <- liftIO (hFileSize h)
+ if sz <= fromMaybe 0 (keyChunkSize $ fst $ nextChunkKeyStream chunkkeys)
+ then return (chunkkeys, zeroBytesProcessed)
+ else check 0 chunkkeys sz
+ where
+ check pos cks sz
+ | pos >= sz = do
+ -- All chunks are already stored!
+ liftIO $ hSeek h AbsoluteSeek sz
+ return (cks, toBytesProcessed sz)
+ | otherwise = do
+ v <- checker k
+ case v of
+ Right True ->
+ check pos' cks' sz
+ _ -> do
+ when (pos > 0) $
+ liftIO $ hSeek h AbsoluteSeek pos
+ return (cks, toBytesProcessed pos)
+ where
+ (k, cks') = nextChunkKeyStream cks
+ pos' = pos + fromMaybe 0 (keyChunkSize k)
+
+{- Removes all chunks of a key from a remote, by calling a remover
+ - action on each.
+ -
+ - The remover action should succeed even if asked to
+ - remove a key that is not present on the remote.
+ -
+ - This action may be called on a chunked key. It will simply remove it.
+ -}
+removeChunks :: (Key -> Annex Bool) -> UUID -> ChunkConfig -> EncKey -> Key -> Annex Bool
+removeChunks remover u chunkconfig encryptor k = do
+ ls <- chunkKeys u chunkconfig k
+ ok <- allM (remover . encryptor) (concat ls)
+ when ok $ do
+ let chunksizes = catMaybes $ map (keyChunkSize <=< headMaybe) ls
+ forM_ chunksizes $ chunksRemoved u k . FixedSizeChunks . fromIntegral
+ return ok
+
+{- Retrieves a key from a remote, using a retriever action.
+ -
+ - When the remote is chunked, tries each of the options returned by
+ - chunkKeys until it finds one where the retriever successfully
+ - gets the first chunked key. The content of that key, and any
+ - other chunks in the list is fed to the sink.
+ -
+ - If retrival of one of the subsequent chunks throws an exception,
+ - gives up and returns False. Note that partial data may have been
+ - written to the sink in this case.
+ -
+ - Resuming is supported when using chunks. When the destination file
+ - already exists, it skips to the next chunked key that would be needed
+ - to resume.
+ -}
+retrieveChunks
+ :: Retriever
+ -> UUID
+ -> ChunkConfig
+ -> EncKey
+ -> Key
+ -> FilePath
+ -> MeterUpdate
+ -> (Maybe Handle -> Maybe MeterUpdate -> ContentSource -> Annex Bool)
+ -> Annex Bool
+retrieveChunks retriever u chunkconfig encryptor basek dest basep sink
+ | noChunks chunkconfig =
+ -- Optimisation: Try the unchunked key first, to avoid
+ -- looking in the git-annex branch for chunk counts
+ -- that are likely not there.
+ getunchunked `catchNonAsyncAnnex`
+ const (go =<< chunkKeysOnly u basek)
+ | otherwise = go =<< chunkKeys u chunkconfig basek
+ where
+ go ls = do
+ currsize <- liftIO $ catchMaybeIO $
+ toInteger . fileSize <$> getFileStatus dest
+ let ls' = maybe ls (setupResume ls) currsize
+ if any null ls'
+ then return True -- dest is already complete
+ else firstavail currsize ls' `catchNonAsyncAnnex` giveup
+
+ giveup e = do
+ warning (show e)
+ return False
+
+ firstavail _ [] = return False
+ firstavail currsize ([]:ls) = firstavail currsize ls
+ firstavail currsize ((k:ks):ls)
+ | k == basek = getunchunked
+ `catchNonAsyncAnnex` (const $ firstavail currsize ls)
+ | otherwise = do
+ let offset = resumeOffset currsize k
+ let p = maybe basep
+ (offsetMeterUpdate basep . toBytesProcessed)
+ offset
+ v <- tryNonAsyncAnnex $
+ retriever (encryptor k) p $ \content ->
+ bracketIO (maybe opennew openresume offset) hClose $ \h -> do
+ void $ tosink (Just h) p content
+ let sz = toBytesProcessed $
+ fromMaybe 0 $ keyChunkSize k
+ getrest p h sz sz ks
+ `catchNonAsyncAnnex` giveup
+ case v of
+ Left e
+ | null ls -> giveup e
+ | otherwise -> firstavail currsize ls
+ Right r -> return r
+
+ getrest _ _ _ _ [] = return True
+ getrest p h sz bytesprocessed (k:ks) = do
+ let p' = offsetMeterUpdate p bytesprocessed
+ liftIO $ p' zeroBytesProcessed
+ ifM (retriever (encryptor k) p' $ tosink (Just h) p')
+ ( getrest p h sz (addBytesProcessed bytesprocessed sz) ks
+ , giveup "chunk retrieval failed"
+ )
+
+ getunchunked = retriever (encryptor basek) basep $ tosink Nothing basep
+
+ opennew = openBinaryFile dest WriteMode
+
+ -- Open the file and seek to the start point in order to resume.
+ openresume startpoint = do
+ -- ReadWriteMode allows seeking; AppendMode does not.
+ h <- openBinaryFile dest ReadWriteMode
+ hSeek h AbsoluteSeek startpoint
+ return h
+
+ {- Progress meter updating is a bit tricky: If the Retriever
+ - populates a file, it is responsible for updating progress
+ - as the file is being retrieved.
+ -
+ - However, if the Retriever generates a lazy ByteString,
+ - it is not responsible for updating progress (often it cannot).
+ - Instead, the sink is passed a meter to update as it consumes
+ - the ByteString.
+ -}
+ tosink h p content = sink h p' content
+ where
+ p'
+ | isByteContent content = Just p
+ | otherwise = Nothing
+
+{- Can resume when the chunk's offset is at or before the end of
+ - the dest file. -}
+resumeOffset :: Maybe Integer -> Key -> Maybe Integer
+resumeOffset Nothing _ = Nothing
+resumeOffset currsize k
+ | offset <= currsize = offset
+ | otherwise = Nothing
+ where
+ offset = chunkKeyOffset k
+
+{- Drops chunks that are already present in a file, based on its size.
+ - Keeps any non-chunk keys.
+ -}
+setupResume :: [[Key]] -> Integer -> [[Key]]
+setupResume ls currsize = map dropunneeded ls
+ where
+ dropunneeded [] = []
+ dropunneeded l@(k:_) = case keyChunkSize k of
+ Just chunksize | chunksize > 0 ->
+ genericDrop (currsize `div` chunksize) l
+ _ -> l
+
+{- Checks if a key is present in a remote. This requires any one
+ - of the lists of options returned by chunkKeys to all check out
+ - as being present using the checker action.
+ -}
+hasKeyChunks
+ :: (Key -> Annex (Either String Bool))
+ -> UUID
+ -> ChunkConfig
+ -> EncKey
+ -> Key
+ -> Annex (Either String Bool)
+hasKeyChunks checker u chunkconfig encryptor basek
+ | noChunks chunkconfig =
+ -- Optimisation: Try the unchunked key first, to avoid
+ -- looking in the git-annex branch for chunk counts
+ -- that are likely not there.
+ ifM ((Right True ==) <$> checker (encryptor basek))
+ ( return (Right True)
+ , checklists Nothing =<< chunkKeysOnly u basek
+ )
+ | otherwise = checklists Nothing =<< chunkKeys u chunkconfig basek
+ where
+ checklists Nothing [] = return (Right False)
+ checklists (Just deferrederror) [] = return (Left deferrederror)
+ checklists d (l:ls)
+ | not (null l) = do
+ v <- checkchunks l
+ case v of
+ Left e -> checklists (Just e) ls
+ Right True -> return (Right True)
+ Right False -> checklists Nothing ls
+ | otherwise = checklists d ls
+
+ checkchunks :: [Key] -> Annex (Either String Bool)
+ checkchunks [] = return (Right True)
+ checkchunks (k:ks) = do
+ v <- checker (encryptor k)
+ if v == Right True
+ then checkchunks ks
+ else return v
+
+{- A key can be stored in a remote unchunked, or as a list of chunked keys.
+ - This can be the case whether or not the remote is currently configured
+ - to use chunking.
+ -
+ - It's even possible for a remote to have the same key stored multiple
+ - times with different chunk sizes!
+ -
+ - This finds all possible lists of keys that might be on the remote that
+ - can be combined to get back the requested key, in order from most to
+ - least likely to exist.
+ -}
+chunkKeys :: UUID -> ChunkConfig -> Key -> Annex [[Key]]
+chunkKeys u chunkconfig k = do
+ l <- chunkKeysOnly u k
+ return $ if noChunks chunkconfig
+ then [k] : l
+ else l ++ [[k]]
+
+chunkKeysOnly :: UUID -> Key -> Annex [[Key]]
+chunkKeysOnly u k = map (toChunkList k) <$> getCurrentChunks u k
+
+toChunkList :: Key -> (ChunkMethod, ChunkCount) -> [Key]
+toChunkList k (FixedSizeChunks chunksize, chunkcount) =
+ takeChunkKeyStream chunkcount $ chunkKeyStream k chunksize
+toChunkList _ (UnknownChunks _, _) = []
diff --git a/Remote/Helper/Chunked/Legacy.hs b/Remote/Helper/Chunked/Legacy.hs
index 1ec0eb68f..4f402705a 100644
--- a/Remote/Helper/Chunked/Legacy.hs
+++ b/Remote/Helper/Chunked/Legacy.hs
@@ -9,6 +9,7 @@ module Remote.Helper.Chunked.Legacy where
import Common.Annex
import Remote.Helper.Chunked
+import Utility.Metered
import qualified Data.ByteString.Lazy as L
import qualified Control.Exception as E
@@ -73,7 +74,7 @@ storeChunks key tmp dest storer recorder finalizer = either onerr return
finalizer tmp dest
return (not $ null stored)
onerr e = do
- print e
+ warningIO (show e)
return False
basef = tmp ++ keyFile key
@@ -104,7 +105,7 @@ storeChunked chunksize dests storer content = either onerr return
| otherwise = storechunks sz [] dests content
onerr e = do
- print e
+ warningIO (show e)
return []
storechunks _ _ [] _ = return [] -- ran out of dests
@@ -114,3 +115,12 @@ storeChunked chunksize dests storer content = either onerr return
let (chunk, b') = L.splitAt sz b
storer d chunk
storechunks sz (d:useddests) ds b'
+
+{- Writes a series of chunks to a file. The feeder is called to get
+ - each chunk.
+ -}
+meteredWriteFileChunks :: MeterUpdate -> FilePath -> [v] -> (v -> IO L.ByteString) -> IO ()
+meteredWriteFileChunks meterupdate dest chunks feeder =
+ withBinaryFile dest WriteMode $ \h ->
+ forM_ chunks $
+ meteredWrite meterupdate h <=< feeder
diff --git a/Remote/Helper/ChunkedEncryptable.hs b/Remote/Helper/ChunkedEncryptable.hs
new file mode 100644
index 000000000..2a844212b
--- /dev/null
+++ b/Remote/Helper/ChunkedEncryptable.hs
@@ -0,0 +1,200 @@
+{- Remotes that support both chunking and encryption.
+ -
+ - Copyright 2014 Joey Hess <joey@kitenet.net>
+ -
+ - Licensed under the GNU GPL version 3 or higher.
+ -}
+
+{-# LANGUAGE RankNTypes #-}
+
+module Remote.Helper.ChunkedEncryptable (
+ Preparer,
+ Storer,
+ Retriever,
+ simplyPrepare,
+ ContentSource,
+ checkPrepare,
+ fileStorer,
+ byteStorer,
+ fileRetriever,
+ byteRetriever,
+ storeKeyDummy,
+ retreiveKeyFileDummy,
+ chunkedEncryptableRemote,
+ module X
+) where
+
+import Common.Annex
+import Types.StoreRetrieve
+import Types.Remote
+import Crypto
+import Config.Cost
+import Utility.Metered
+import Remote.Helper.Chunked as X
+import Remote.Helper.Encryptable as X
+import Annex.Content
+import Annex.Exception
+
+import qualified Data.ByteString.Lazy as L
+import Control.Exception (bracket)
+
+-- Use when nothing needs to be done to prepare a helper.
+simplyPrepare :: helper -> Preparer helper
+simplyPrepare helper _ a = a $ Just helper
+
+-- Use to run a check when preparing a helper.
+checkPrepare :: (Key -> Annex Bool) -> helper -> Preparer helper
+checkPrepare checker helper k a = ifM (checker k)
+ ( a (Just helper)
+ , a Nothing
+ )
+
+-- A Storer that expects to be provided with a file containing
+-- the content of the key to store.
+fileStorer :: (Key -> FilePath -> MeterUpdate -> Annex Bool) -> Storer
+fileStorer a k (FileContent f) m = a k f m
+fileStorer a k (ByteContent b) m = withTmp k $ \f -> do
+ liftIO $ L.writeFile f b
+ a k f m
+
+-- A Storer that expects to be provided with a L.ByteString of
+-- the content to store.
+byteStorer :: (Key -> L.ByteString -> MeterUpdate -> Annex Bool) -> Storer
+byteStorer a k c m = withBytes c $ \b -> a k b m
+
+-- A Retriever that writes the content of a Key to a provided file.
+-- It is responsible for updating the progress meter as it retrieves data.
+fileRetriever :: (FilePath -> Key -> MeterUpdate -> Annex ()) -> Retriever
+fileRetriever a k m callback = do
+ f <- prepTmp k
+ a f k m
+ callback (FileContent f)
+
+-- A Retriever that generates a L.ByteString containing the Key's content.
+byteRetriever :: (Key -> Annex L.ByteString) -> Retriever
+byteRetriever a k _m callback = callback =<< (ByteContent <$> a k)
+
+{- The base Remote that is provided to chunkedEncryptableRemote
+ - needs to have storeKey and retreiveKeyFile methods, but they are
+ - never actually used (since chunkedEncryptableRemote replaces
+ - them). Here are some dummy ones.
+ -}
+storeKeyDummy :: Key -> AssociatedFile -> MeterUpdate -> Annex Bool
+storeKeyDummy _ _ _ = return False
+retreiveKeyFileDummy :: Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Bool
+retreiveKeyFileDummy _ _ _ _ = return False
+
+-- Modifies a base Remote to support both chunking and encryption.
+chunkedEncryptableRemote
+ :: RemoteConfig
+ -> Preparer Storer
+ -> Preparer Retriever
+ -> Remote
+ -> Remote
+chunkedEncryptableRemote c preparestorer prepareretriever baser = encr
+ where
+ encr = baser
+ { storeKey = \k _f p -> cip >>= storeKeyGen k p
+ , retrieveKeyFile = \k _f d p -> cip >>= retrieveKeyFileGen k d p
+ , retrieveKeyFileCheap = \k d -> cip >>= maybe
+ (retrieveKeyFileCheap baser k d)
+ (\_ -> return False)
+ , removeKey = \k -> cip >>= removeKeyGen k
+ , hasKey = \k -> cip >>= hasKeyGen k
+ , cost = maybe
+ (cost baser)
+ (const $ cost baser + encryptedRemoteCostAdj)
+ (extractCipher c)
+ }
+ cip = cipherKey c
+ chunkconfig = chunkConfig c
+ gpgopts = getGpgEncParams encr
+
+ safely a = catchNonAsyncAnnex a (\e -> warning (show e) >> return False)
+
+ -- chunk, then encrypt, then feed to the storer
+ storeKeyGen k p enc =
+ safely $ preparestorer k $ safely . go
+ where
+ go (Just storer) = sendAnnex k rollback $ \src ->
+ metered (Just p) k $ \p' ->
+ storeChunks (uuid baser) chunkconfig k src p'
+ (storechunk enc storer)
+ (hasKey baser)
+ go Nothing = return False
+ rollback = void $ removeKey encr k
+
+ storechunk Nothing storer k content p = storer k content p
+ storechunk (Just (cipher, enck)) storer k content p =
+ withBytes content $ \b ->
+ encrypt gpgopts cipher (feedBytes b) $
+ readBytes $ \encb ->
+ storer (enck k) (ByteContent encb) p
+
+ -- call retriever to get chunks; decrypt them; stream to dest file
+ retrieveKeyFileGen k dest p enc =
+ safely $ prepareretriever k $ safely . go
+ where
+ go (Just retriever) = metered (Just p) k $ \p' ->
+ retrieveChunks retriever (uuid baser) chunkconfig
+ enck k dest p' (sink dest enc)
+ go Nothing = return False
+ enck = maybe id snd enc
+
+ removeKeyGen k enc = removeChunks remover (uuid baser) chunkconfig enck k
+ where
+ enck = maybe id snd enc
+ remover = removeKey baser
+
+ hasKeyGen k enc = hasKeyChunks checker (uuid baser) chunkconfig enck k
+ where
+ enck = maybe id snd enc
+ checker = hasKey baser
+
+{- Sink callback for retrieveChunks. Stores the file content into the
+ - provided Handle, decrypting it first if necessary.
+ -
+ - If the remote did not store the content using chunks, no Handle
+ - will be provided, and it's up to us to open the destination file.
+ -
+ - Note that when neither chunking nor encryption is used, and the remote
+ - provides FileContent, that file only needs to be renamed
+ - into place. (And it may even already be in the right place..)
+ -}
+sink
+ :: FilePath
+ -> Maybe (Cipher, EncKey)
+ -> Maybe Handle
+ -> Maybe MeterUpdate
+ -> ContentSource
+ -> Annex Bool
+sink dest enc mh mp content = do
+ case (enc, mh, content) of
+ (Nothing, Nothing, FileContent f)
+ | f == dest -> noop
+ | otherwise -> liftIO $ moveFile f dest
+ (Just (cipher, _), _, ByteContent b) ->
+ decrypt cipher (feedBytes b) $
+ readBytes write
+ (Just (cipher, _), _, FileContent f) -> do
+ withBytes content $ \b ->
+ decrypt cipher (feedBytes b) $
+ readBytes write
+ liftIO $ nukeFile f
+ (Nothing, _, FileContent f) -> do
+ withBytes content write
+ liftIO $ nukeFile f
+ (Nothing, _, ByteContent b) -> write b
+ return True
+ where
+ write b = case mh of
+ Just h -> liftIO $ b `streamto` h
+ Nothing -> liftIO $ bracket opendest hClose (b `streamto`)
+ streamto b h = case mp of
+ Just p -> meteredWrite p h b
+ Nothing -> L.hPut h b
+ opendest = openBinaryFile dest WriteMode
+
+withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a
+withBytes (ByteContent b) a = a b
+withBytes (FileContent f) a = a =<< liftIO (L.readFile f)
diff --git a/Remote/Helper/Encryptable.hs b/Remote/Helper/Encryptable.hs
index 41174cf7c..65a3ba284 100644
--- a/Remote/Helper/Encryptable.hs
+++ b/Remote/Helper/Encryptable.hs
@@ -66,44 +66,45 @@ encryptionSetup c = maybe genCipher updateCipher $ extractCipher c
c' = foldr M.delete c
-- git-annex used to remove 'encryption' as well, since
-- it was redundant; we now need to keep it for
- -- public-key incryption, hence we leave it on newer
+ -- public-key encryption, hence we leave it on newer
-- remotes (while being backward-compatible).
[ "keyid", "keyid+", "keyid-", "highRandomQuality" ]
-{- Modifies a Remote to support encryption.
- -
- - Two additional functions must be provided by the remote,
- - to support storing and retrieving encrypted content. -}
+{- Modifies a Remote to support encryption. -}
+-- TODO: deprecated
encryptableRemote
:: RemoteConfig
-> ((Cipher, Key) -> Key -> MeterUpdate -> Annex Bool)
-> ((Cipher, Key) -> Key -> FilePath -> MeterUpdate -> Annex Bool)
-> Remote
-> Remote
-encryptableRemote c storeKeyEncrypted retrieveKeyFileEncrypted r =
- r {
- storeKey = store,
- retrieveKeyFile = retrieve,
- retrieveKeyFileCheap = retrieveCheap,
- removeKey = withkey $ removeKey r,
- hasKey = withkey $ hasKey r,
- cost = maybe
- (cost r)
- (const $ cost r + encryptedRemoteCostAdj)
- (extractCipher c)
- }
- where
- store k f p = cip k >>= maybe
+encryptableRemote c storeKeyEncrypted retrieveKeyFileEncrypted r = r
+ { storeKey = \k f p -> cip k >>= maybe
(storeKey r k f p)
- (\enck -> storeKeyEncrypted enck k p)
- retrieve k f d p = cip k >>= maybe
+ (\v -> storeKeyEncrypted v k p)
+ , retrieveKeyFile = \k f d p -> cip k >>= maybe
(retrieveKeyFile r k f d p)
- (\enck -> retrieveKeyFileEncrypted enck k d p)
- retrieveCheap k d = cip k >>= maybe
+ (\v -> retrieveKeyFileEncrypted v k d p)
+ , retrieveKeyFileCheap = \k d -> cip k >>= maybe
(retrieveKeyFileCheap r k d)
(\_ -> return False)
- withkey a k = cip k >>= maybe (a k) (a . snd)
- cip = cipherKey c
+ , removeKey = \k -> cip k >>= maybe
+ (removeKey r k)
+ (\(_, enckey) -> removeKey r enckey)
+ , hasKey = \k -> cip k >>= maybe
+ (hasKey r k)
+ (\(_, enckey) -> hasKey r enckey)
+ , cost = maybe
+ (cost r)
+ (const $ cost r + encryptedRemoteCostAdj)
+ (extractCipher c)
+ }
+ where
+ cip k = do
+ v <- cipherKey c
+ return $ case v of
+ Nothing -> Nothing
+ Just (cipher, enck) -> Just (cipher, enck k)
{- Gets encryption Cipher. The decrypted Ciphers are cached in the Annex
- state. -}
@@ -136,11 +137,11 @@ embedCreds c
| isJust (M.lookup "cipherkeys" c) && isJust (M.lookup "cipher" c) = True
| otherwise = False
-{- Gets encryption Cipher, and encrypted version of Key. -}
-cipherKey :: RemoteConfig -> Key -> Annex (Maybe (Cipher, Key))
-cipherKey c k = fmap make <$> remoteCipher c
+{- Gets encryption Cipher, and key encryptor. -}
+cipherKey :: RemoteConfig -> Annex (Maybe (Cipher, EncKey))
+cipherKey c = fmap make <$> remoteCipher c
where
- make ciphertext = (ciphertext, encryptKey mac ciphertext k)
+ make ciphertext = (ciphertext, encryptKey mac ciphertext)
mac = fromMaybe defaultMac $ M.lookup "mac" c >>= readMac
{- Stores an StorableCipher in a remote's configuration. -}
diff --git a/Remote/WebDAV.hs b/Remote/WebDAV.hs
index 4be7e4701..d6644cdc7 100644
--- a/Remote/WebDAV.hs
+++ b/Remote/WebDAV.hs
@@ -5,7 +5,7 @@
- Licensed under the GNU GPL version 3 or higher.
-}
-{-# LANGUAGE ScopedTypeVariables, CPP #-}
+{-# LANGUAGE ScopedTypeVariables #-}
module Remote.WebDAV (remote, davCreds, configUrl) where
@@ -16,11 +16,7 @@ import qualified Data.ByteString.Lazy.UTF8 as L8
import qualified Data.ByteString.Lazy as L
import qualified Control.Exception as E
import qualified Control.Exception.Lifted as EL
-#if MIN_VERSION_DAV(0,6,0)
import Network.HTTP.Client (HttpException(..))
-#else
-import Network.HTTP.Conduit (HttpException(..))
-#endif
import Network.HTTP.Types
import System.Log.Logger (debugM)
import System.IO.Error
@@ -113,7 +109,7 @@ storeHelper :: Remote -> Key -> DavUrl -> DavUser -> DavPass -> L.ByteString ->
storeHelper r k baseurl user pass b = catchBoolIO $ do
mkdirRecursiveDAV tmpurl user pass
case chunkconfig of
- NoChunks -> flip catchNonAsync (\e -> print e >> return False) $ do
+ NoChunks -> flip catchNonAsync (\e -> warningIO (show e) >> return False) $ do
storehttp tmpurl b
finalizer tmpurl keyurl
return True
@@ -140,7 +136,7 @@ retrieve :: Remote -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex
retrieve r k _f d p = metered (Just p) k $ \meterupdate ->
davAction r False $ \(baseurl, user, pass) -> liftIO $ catchBoolIO $
withStoredFiles r k baseurl user pass onerr $ \urls -> do
- meteredWriteFileChunks meterupdate d urls $ \url -> do
+ Legacy.meteredWriteFileChunks meterupdate d urls $ \url -> do
mb <- getDAV url user pass
case mb of
Nothing -> throwIO "download failed"
@@ -308,57 +304,37 @@ debugDAV :: DavUrl -> String -> IO ()
debugDAV msg url = debugM "DAV" $ msg ++ " " ++ url
{---------------------------------------------------------------------
- - Low-level DAV operations, using the new DAV monad when available.
+ - Low-level DAV operations.
---------------------------------------------------------------------}
putDAV :: DavUrl -> DavUser -> DavPass -> L.ByteString -> IO ()
putDAV url user pass b = do
debugDAV "PUT" url
-#if MIN_VERSION_DAV(0,6,0)
goDAV url user pass $ putContentM (contentType, b)
-#else
- putContent url user pass (contentType, b)
-#endif
getDAV :: DavUrl -> DavUser -> DavPass -> IO (Maybe L.ByteString)
getDAV url user pass = do
debugDAV "GET" url
eitherToMaybe <$> tryNonAsync go
where
-#if MIN_VERSION_DAV(0,6,0)
go = goDAV url user pass $ snd <$> getContentM
-#else
- go = snd . snd <$> getPropsAndContent url user pass
-#endif
deleteDAV :: DavUrl -> DavUser -> DavPass -> IO ()
deleteDAV url user pass = do
debugDAV "DELETE" url
-#if MIN_VERSION_DAV(0,6,0)
goDAV url user pass delContentM
-#else
- deleteContent url user pass
-#endif
moveDAV :: DavUrl -> DavUrl -> DavUser -> DavPass -> IO ()
moveDAV url newurl user pass = do
debugDAV ("MOVE to " ++ newurl ++ " from ") url
-#if MIN_VERSION_DAV(0,6,0)
goDAV url user pass $ moveContentM newurl'
-#else
- moveContent url newurl' user pass
-#endif
where
newurl' = B8.fromString newurl
mkdirDAV :: DavUrl -> DavUser -> DavPass -> IO Bool
mkdirDAV url user pass = do
debugDAV "MKDIR" url
-#if MIN_VERSION_DAV(0,6,0)
goDAV url user pass mkCol
-#else
- makeCollection url user pass
-#endif
existsDAV :: DavUrl -> DavUser -> DavPass -> IO (Either String Bool)
existsDAV url user pass = do
@@ -366,35 +342,19 @@ existsDAV url user pass = do
either (Left . show) id <$> tryNonAsync check
where
ispresent = return . Right
-#if MIN_VERSION_DAV(0,6,0)
check = goDAV url user pass $ do
setDepth Nothing
EL.catchJust
(matchStatusCodeException notFound404)
(getPropsM >> ispresent True)
(const $ ispresent False)
-#else
- check = E.catchJust
- (matchStatusCodeException notFound404)
-#if ! MIN_VERSION_DAV(0,4,0)
- (getProps url user pass >> ispresent True)
-#else
- (getProps url user pass Nothing >> ispresent True)
-#endif
- (const $ ispresent False)
-#endif
matchStatusCodeException :: Status -> HttpException -> Maybe ()
-#if MIN_VERSION_DAV(0,6,0)
matchStatusCodeException want (StatusCodeException s _ _)
-#else
-matchStatusCodeException want (StatusCodeException s _)
-#endif
| s == want = Just ()
| otherwise = Nothing
matchStatusCodeException _ _ = Nothing
-#if MIN_VERSION_DAV(0,6,0)
goDAV :: DavUrl -> DavUser -> DavPass -> DAVT IO a -> IO a
goDAV url user pass a = choke $ evalDAVT url $ do
setResponseTimeout Nothing -- disable default (5 second!) timeout
@@ -407,4 +367,3 @@ goDAV url user pass a = choke $ evalDAVT url $ do
case x of
Left e -> error e
Right r -> return r
-#endif