diff options
author | Joey Hess <joey@kitenet.net> | 2012-03-04 03:17:03 -0400 |
---|---|---|
committer | Joey Hess <joey@kitenet.net> | 2012-03-04 03:17:25 -0400 |
commit | 9856c24a5996f2d493c559cd9ea6b27b8127694a (patch) | |
tree | de50fea60e3e0eeb0cb8303cbf25bb2a0415034b | |
parent | 8fc533643d0acd5cddbdfede1a438a84c57329ba (diff) |
Add progress bar display to the directory special remote.
So far I've only written progress bars for sending files, not yet
receiving.
No longer uses external cp at all. ByteString IO is fast enough.
-rw-r--r-- | Messages.hs | 25 | ||||
-rw-r--r-- | Remote/Directory.hs | 91 | ||||
-rw-r--r-- | Remote/S3.hs | 4 | ||||
-rw-r--r-- | debian/changelog | 1 |
4 files changed, 86 insertions, 35 deletions
diff --git a/Messages.hs b/Messages.hs index 1b51cf23e..e8bbfed13 100644 --- a/Messages.hs +++ b/Messages.hs @@ -10,6 +10,8 @@ module Messages ( showNote, showAction, showProgress, + metered, + MeterUpdate, showSideAction, showOutput, showLongNote, @@ -29,9 +31,13 @@ module Messages ( ) where import Text.JSON +import Data.Progress.Meter +import Data.Progress.Tracker +import Data.Quantity import Common import Types +import Types.Key import qualified Annex import qualified Messages.JSON as JSON @@ -46,10 +52,29 @@ showNote s = handle (JSON.note s) $ showAction :: String -> Annex () showAction s = showNote $ s ++ "..." +{- Progress dots. -} showProgress :: Annex () showProgress = handle q $ flushed $ putStr "." +{- Shows a progress meter while performing a transfer of a key. + - The action is passed a callback to use to update the meter. -} +type MeterUpdate = Integer -> IO () +metered :: Key -> (MeterUpdate -> Annex a) -> Annex a +metered key a = Annex.getState Annex.output >>= go (keySize key) + where + go (Just size) Annex.NormalOutput = do + progress <- liftIO $ newProgress "" size + meter <- liftIO $ newMeter progress "B" 20 (renderNums binaryOpts 1) + showOutput + liftIO $ displayMeter stdout meter + r <- a $ \n -> liftIO $ do + incrP progress n + displayMeter stdout meter + liftIO $ clearMeter stdout meter + return r + go _ _ = a (const $ return ()) + showSideAction :: String -> Annex () showSideAction s = handle q $ putStrLn $ "(" ++ s ++ "...)" diff --git a/Remote/Directory.hs b/Remote/Directory.hs index d5394fe51..ab2a064ec 100644 --- a/Remote/Directory.hs +++ b/Remote/Directory.hs @@ -8,7 +8,9 @@ module Remote.Directory (remote) where import qualified Data.ByteString.Lazy.Char8 as L +import qualified Data.ByteString.Char8 as S import qualified Data.Map as M +import Control.Exception (bracket) import Common.Annex import Utility.CopyFile @@ -129,46 +131,71 @@ withStoredFiles = withCheckedFiles doesFileExist store :: FilePath -> ChunkSize -> Key -> Annex Bool store d chunksize k = do src <- inRepo $ gitAnnexLocation k - liftIO $ catchBoolIO $ storeHelper d chunksize k $ \dests -> - case chunksize of - Nothing -> do - let dest = Prelude.head dests - ok <- copyFileExternal src dest - return $ if ok then [dest] else [] - Just _ -> storeSplit chunksize dests =<< L.readFile src - -storeEncrypted :: FilePath -> ChunkSize -> (Cipher, Key) -> Key -> Annex Bool -storeEncrypted d chunksize (cipher, enck) k = do - src <- inRepo $ gitAnnexLocation k - liftIO $ catchBoolIO $ storeHelper d chunksize enck $ encrypt src - where - encrypt src dests = withEncryptedContent cipher (L.readFile src) $ \s -> + metered k $ \meterupdate -> + liftIO $ catchBoolIO $ storeHelper d chunksize k $ \dests -> case chunksize of Nothing -> do let dest = Prelude.head dests - L.writeFile dest s + meteredWriteFile meterupdate dest + =<< L.readFile src return [dest] - Just _ -> storeSplit chunksize dests s + Just _ -> + storeSplit meterupdate chunksize dests + =<< L.readFile src -{- Splits a ByteString into chunks and writes to dests. +storeEncrypted :: FilePath -> ChunkSize -> (Cipher, Key) -> Key -> Annex Bool +storeEncrypted d chunksize (cipher, enck) k = do + src <- inRepo $ gitAnnexLocation k + metered k $ \meterupdate -> + liftIO $ catchBoolIO $ storeHelper d chunksize enck $ \dests -> + withEncryptedContent cipher (L.readFile src) $ \s -> + case chunksize of + Nothing -> do + let dest = Prelude.head dests + meteredWriteFile meterupdate dest s + return [dest] + Just _ -> storeSplit meterupdate chunksize dests s + +{- Splits a ByteString into chunks and writes to dests, obeying configured + - chunk size (not to be confused with the L.ByteString chunk size). - Note: Must always write at least one file, even for empty ByteString. -} -storeSplit :: ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath] -storeSplit Nothing _ _ = error "bad storeSplit call" -storeSplit _ [] _ = error "bad storeSplit call" -storeSplit (Just chunksize) alldests@(firstdest:_) s - | L.null s = do +storeSplit :: MeterUpdate -> ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath] +storeSplit _ Nothing _ _ = error "bad storeSplit call" +storeSplit _ _ [] _ = error "bad storeSplit call" +storeSplit meterupdate (Just chunksize) alldests@(firstdest:_) b + | L.null b = do -- must always write at least one file, even for empty - L.writeFile firstdest s + L.writeFile firstdest b return [firstdest] - | otherwise = storeSplit' chunksize alldests s [] -storeSplit' :: Int64 -> [FilePath] -> L.ByteString -> [FilePath] -> IO [FilePath] -storeSplit' _ [] _ _ = error "expected an infinite list" -storeSplit' chunksize (d:dests) s c - | L.null s = return $ reverse c - | otherwise = do - let (chunk, rest) = L.splitAt chunksize s - L.writeFile d chunk - storeSplit' chunksize dests rest (d:c) + | otherwise = storeSplit' meterupdate chunksize alldests (L.toChunks b) [] +storeSplit' :: MeterUpdate -> Int64 -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath] +storeSplit' _ _ [] _ _ = error "ran out of dests" +storeSplit' _ _ _ [] c = return $ reverse c +storeSplit' meterupdate chunksize (d:dests) bs c = do + bs' <- bracket (openFile d WriteMode) hClose (feed chunksize bs) + storeSplit' meterupdate chunksize dests bs' (d:c) + where + feed _ [] _ = return [] + feed sz (l:ls) h = do + let s = fromIntegral $ S.length l + if s <= sz + then do + S.hPut h l + meterupdate $ toInteger s + feed (sz - s) ls h + else return (l:ls) + +{- Write a L.ByteString to a file, updating a progress meter + - after each chunk of the L.ByteString, typically every 64 kb or so. -} +meteredWriteFile :: MeterUpdate -> FilePath -> L.ByteString -> IO () +meteredWriteFile meterupdate dest b = + bracket (openFile dest WriteMode) hClose (feed $ L.toChunks b) + where + feed [] _ = return () + feed (l:ls) h = do + S.hPut h l + meterupdate $ toInteger $ S.length l + feed ls h {- Generates a list of destinations to write to in order to store a key. - When chunksize is specified, this list will be a list of chunks. diff --git a/Remote/S3.hs b/Remote/S3.hs index 812345b00..523edef65 100644 --- a/Remote/S3.hs +++ b/Remote/S3.hs @@ -144,9 +144,7 @@ storeHelper (conn, bucket) r k file = do case fromJust $ M.lookup "storageclass" $ fromJust $ config r of "REDUCED_REDUNDANCY" -> REDUCED_REDUNDANCY _ -> STANDARD - getsize = do - s <- liftIO $ getFileStatus file - return $ fileSize s + getsize = fileSize <$> (liftIO $ getFileStatus file) xheaders = filter isxheader $ M.assocs $ fromJust $ config r isxheader (h, _) = "x-amz-" `isPrefixOf` h diff --git a/debian/changelog b/debian/changelog index d5706d7be..7b9db5418 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,7 @@ git-annex (3.20120230) UNRELEASED; urgency=low which can read better than the old "." (which still works too). * Directory special remotes now support chunking files written to them, avoiding writing files larger than a specified size. + * Add progress bar display to the directory special remote. -- Joey Hess <joeyh@debian.org> Thu, 01 Mar 2012 22:34:27 -0400 |