diff options
40 files changed, 608 insertions, 74 deletions
diff --git a/Annex/LockPool/PosixOrPid.hs b/Annex/LockPool/PosixOrPid.hs index ecf96d51f..c788f6fa0 100644 --- a/Annex/LockPool/PosixOrPid.hs +++ b/Annex/LockPool/PosixOrPid.hs @@ -47,8 +47,14 @@ tryLockExclusive :: Maybe FileMode -> LockFile -> Annex (Maybe LockHandle) tryLockExclusive m f = tryPidLock m f $ Posix.tryLockExclusive m f checkLocked :: LockFile -> Annex (Maybe Bool) -checkLocked f = Posix.checkLocked f - `pidLockCheck` Pid.checkLocked +checkLocked f = Posix.checkLocked f `pidLockCheck` checkpid + where + checkpid pidlock = do + v <- Pid.checkLocked pidlock + case v of + -- Only return true when the posix lock file exists. + Just _ -> Posix.checkLocked f + Nothing -> return Nothing getLockStatus :: LockFile -> Annex LockStatus getLockStatus f = Posix.getLockStatus f @@ -88,6 +94,6 @@ tryPidLock m f posixlock = liftIO . go =<< pidLockFile -- The posix lock file is created even when using pid locks, in order to -- avoid complicating any code that might expect to be able to see that --- lock file. +-- lock file. But, it's not locked. dummyPosixLock :: Maybe FileMode -> LockFile -> IO () dummyPosixLock m f = closeFd =<< openLockFile ReadLock m f diff --git a/Utility/LockFile/PidLock.hs b/Utility/LockFile/PidLock.hs index 53eb5a54f..a21014c0e 100644 --- a/Utility/LockFile/PidLock.hs +++ b/Utility/LockFile/PidLock.hs @@ -201,7 +201,7 @@ checkInsaneLustre dest = do -- -- Uses a 1 second wait-loop. -- --- May wait untie timeout if the lock file is stale and is on a network file +-- May wait until timeout if the lock file is stale and is on a network file -- system, or on a system where the side lock cannot be taken. waitLock :: Seconds -> LockFile -> IO LockHandle waitLock (Seconds timeout) lockfile = go timeout diff --git a/Utility/LockPool.hs b/Utility/LockPool.hs index 2a4ac5101..7dbabb91a 100644 --- a/Utility/LockPool.hs +++ b/Utility/LockPool.hs @@ -7,15 +7,13 @@ - the lock will be released, despite the first thread still having the - lockfile open. - - - Or, if a process is already holding an exclusive lock on a file, an + - Or, if a process is already holding an exclusive lock on a file, and - re-opens it and tries to take another exclusive lock, it won't block - on the first lock. - - To avoid these problems, this implements a lock pool. This keeps track - - of which lock files are being used by the process, and avoids - - re-opening them. Instead, if a lockfile is in use by the current - - process, STM is used to handle further concurrent uses of that lock - - file. + - of which lock files are being used by the process, using STM to handle + - inter-process locking. - - Note that, like Utility.LockFile, this does *not* attempt to be a - portability shim; the native locking of the OS is used. diff --git a/Utility/LockPool/LockHandle.hs b/Utility/LockPool/LockHandle.hs index 68c979b5d..34446ff52 100644 --- a/Utility/LockPool/LockHandle.hs +++ b/Utility/LockPool/LockHandle.hs @@ -7,20 +7,24 @@ {-# LANGUAGE CPP #-} -module Utility.LockPool.LockHandle where +module Utility.LockPool.LockHandle ( + LockHandle, + FileLockOps(..), + dropLock, +#ifndef mingw32_HOST_OS + checkSaneLock, +#endif + makeLockHandle, + tryMakeLockHandle, +) where import qualified Utility.LockPool.STM as P -#ifndef mingw32_HOST_OS import Utility.LockPool.STM (LockFile) -#endif import Control.Concurrent.STM import Control.Exception -data LockHandle = LockHandle - { poolHandle :: P.LockHandle - , fileLockOps :: FileLockOps - } +data LockHandle = LockHandle P.LockHandle FileLockOps data FileLockOps = FileLockOps { fDropLock :: IO () @@ -30,7 +34,7 @@ data FileLockOps = FileLockOps } dropLock :: LockHandle -> IO () -dropLock h = P.releaseLock (poolHandle h) (fDropLock (fileLockOps h)) +dropLock (LockHandle ph _) = P.releaseLock ph #ifndef mingw32_HOST_OS checkSaneLock :: LockFile -> LockHandle -> IO Bool @@ -40,26 +44,30 @@ checkSaneLock lockfile (LockHandle _ flo) = fCheckSaneLock flo lockfile -- Take a lock, by first updating the lock pool, and then taking the file -- lock. If taking the file lock fails for any reason, take care to -- release the lock in the lock pool. -makeLockHandle :: STM P.LockHandle -> IO FileLockOps -> IO LockHandle -makeLockHandle pa fa = bracketOnError setup cleanup go +makeLockHandle :: P.LockPool -> LockFile -> (P.LockPool -> LockFile -> STM P.LockHandle) -> (LockFile -> IO FileLockOps) -> IO LockHandle +makeLockHandle pool file pa fa = bracketOnError setup cleanup go where - setup = atomically pa - cleanup ph = P.releaseLock ph (return ()) - go ph = do - fo <- fa - return $ LockHandle ph fo + setup = atomically (pa pool file) + cleanup ph = P.releaseLock ph + go ph = mkLockHandle pool file ph =<< fa file -tryMakeLockHandle :: STM (Maybe P.LockHandle) -> IO (Maybe FileLockOps) -> IO (Maybe LockHandle) -tryMakeLockHandle pa fa = bracketOnError setup cleanup go +tryMakeLockHandle :: P.LockPool -> LockFile -> (P.LockPool -> LockFile -> STM (Maybe P.LockHandle)) -> (LockFile -> IO (Maybe FileLockOps)) -> IO (Maybe LockHandle) +tryMakeLockHandle pool file pa fa = bracketOnError setup cleanup go where - setup = atomically pa + setup = atomically (pa pool file) cleanup Nothing = return () - cleanup (Just ph) = P.releaseLock ph (return ()) + cleanup (Just ph) = P.releaseLock ph go Nothing = return Nothing go (Just ph) = do - mfo <- fa + mfo <- fa file case mfo of Nothing -> do cleanup (Just ph) return Nothing - Just fo -> return $ Just $ LockHandle ph fo + Just fo -> Just <$> mkLockHandle pool file ph fo + +mkLockHandle :: P.LockPool -> LockFile -> P.LockHandle -> FileLockOps -> IO LockHandle +mkLockHandle pool file ph fo = do + atomically $ P.registerCloseLockFile pool file (fDropLock fo) + return $ LockHandle ph fo + diff --git a/Utility/LockPool/PidLock.hs b/Utility/LockPool/PidLock.hs index dca353fdf..26ed96f3c 100644 --- a/Utility/LockPool/PidLock.hs +++ b/Utility/LockPool/PidLock.hs @@ -32,15 +32,17 @@ import Prelude -- Takes a pid lock, blocking until the lock is available or the timeout. waitLock :: Seconds -> LockFile -> IO LockHandle -waitLock timeout file = makeLockHandle - (P.waitTakeLock P.lockPool file LockExclusive) - (mk <$> F.waitLock timeout file) +waitLock timeout file = makeLockHandle P.lockPool file + -- LockShared for STM lock, because a pid lock can be the top-level + -- lock with various other STM level locks gated behind it. + (\p f -> P.waitTakeLock p f LockShared) + (\f -> mk <$> F.waitLock timeout f) -- Tries to take a pid lock, but does not block. tryLock :: LockFile -> IO (Maybe LockHandle) -tryLock file = tryMakeLockHandle - (P.tryTakeLock P.lockPool file LockShared) - (fmap mk <$> F.tryLock file) +tryLock file = tryMakeLockHandle P.lockPool file + (\p f -> P.tryTakeLock p f LockShared) + (\f -> fmap mk <$> F.tryLock f) checkLocked :: LockFile -> IO (Maybe Bool) checkLocked file = P.getLockStatus P.lockPool file diff --git a/Utility/LockPool/Posix.hs b/Utility/LockPool/Posix.hs index a77ed8f01..2c0b7c78e 100644 --- a/Utility/LockPool/Posix.hs +++ b/Utility/LockPool/Posix.hs @@ -33,27 +33,27 @@ import Prelude -- Takes a shared lock, blocking until the lock is available. lockShared :: Maybe FileMode -> LockFile -> IO LockHandle -lockShared mode file = makeLockHandle - (P.waitTakeLock P.lockPool file LockShared) - (mk <$> F.lockShared mode file) +lockShared mode file = makeLockHandle P.lockPool file + (\p f -> P.waitTakeLock p f LockShared) + (\f -> mk <$> F.lockShared mode f) -- Takes an exclusive lock, blocking until the lock is available. lockExclusive :: Maybe FileMode -> LockFile -> IO LockHandle -lockExclusive mode file = makeLockHandle - (P.waitTakeLock P.lockPool file LockExclusive) - (mk <$> F.lockExclusive mode file) +lockExclusive mode file = makeLockHandle P.lockPool file + (\p f -> P.waitTakeLock p f LockExclusive) + (\f -> mk <$> F.lockExclusive mode f) -- Tries to take a shared lock, but does not block. tryLockShared :: Maybe FileMode -> LockFile -> IO (Maybe LockHandle) -tryLockShared mode file = tryMakeLockHandle - (P.tryTakeLock P.lockPool file LockShared) - (fmap mk <$> F.tryLockShared mode file) +tryLockShared mode file = tryMakeLockHandle P.lockPool file + (\p f -> P.tryTakeLock p f LockShared) + (\f -> fmap mk <$> F.tryLockShared mode f) -- Tries to take an exclusive lock, but does not block. tryLockExclusive :: Maybe FileMode -> LockFile -> IO (Maybe LockHandle) -tryLockExclusive mode file = tryMakeLockHandle - (P.tryTakeLock P.lockPool file LockExclusive) - (fmap mk <$> F.tryLockExclusive mode file) +tryLockExclusive mode file = tryMakeLockHandle P.lockPool file + (\p f -> P.tryTakeLock p f LockExclusive) + (\f -> fmap mk <$> F.tryLockExclusive mode f) -- Returns Nothing when the file doesn't exist, for cases where -- that is different from it not being locked. diff --git a/Utility/LockPool/STM.hs b/Utility/LockPool/STM.hs index 1dc30b09b..d1ee0dbaf 100644 --- a/Utility/LockPool/STM.hs +++ b/Utility/LockPool/STM.hs @@ -15,8 +15,12 @@ module Utility.LockPool.STM ( tryTakeLock, getLockStatus, releaseLock, + CloseLockFile, + registerCloseLockFile, ) where +import Utility.Monad + import System.IO.Unsafe (unsafePerformIO) import qualified Data.Map.Strict as M import Control.Concurrent.STM @@ -36,7 +40,9 @@ type LockHandle = TMVar (LockPool, LockFile) type LockCount = Integer -data LockStatus = LockStatus LockMode LockCount +data LockStatus = LockStatus LockMode LockCount CloseLockFile + +type CloseLockFile = IO () -- This TMVar is normally kept full. type LockPool = TMVar (M.Map LockFile LockStatus) @@ -59,11 +65,11 @@ waitTakeLock :: LockPool -> LockFile -> LockMode -> STM LockHandle waitTakeLock pool file mode = do m <- takeTMVar pool v <- case M.lookup file m of - Just (LockStatus mode' n) + Just (LockStatus mode' n closelockfile) | mode == LockShared && mode' == LockShared -> - return $ LockStatus mode (succ n) + return $ LockStatus mode (succ n) closelockfile | n > 0 -> retry -- wait for lock - _ -> return $ LockStatus mode 1 + _ -> return $ LockStatus mode 1 noop putTMVar pool (M.insert file v m) newTMVar (pool, file) @@ -74,6 +80,16 @@ tryTakeLock pool file mode = `orElse` return Nothing +-- Call after waitTakeLock or tryTakeLock, to register a CloseLockFile +-- action to run when releasing the lock. +registerCloseLockFile :: LockPool -> LockFile -> CloseLockFile -> STM () +registerCloseLockFile pool file closelockfile = do + m <- takeTMVar pool + putTMVar pool (M.update go file m) + where + go (LockStatus mode n closelockfile') = Just $ + LockStatus mode n (closelockfile' >> closelockfile) + -- Checks if a lock is being held. If it's held by the current process, -- runs the getdefault action; otherwise runs the checker action. -- @@ -87,7 +103,7 @@ getLockStatus pool file getdefault checker = do v <- atomically $ do m <- takeTMVar pool let threadlocked = case M.lookup file m of - Just (LockStatus _ n) | n > 0 -> True + Just (LockStatus _ n _) | n > 0 -> True _ -> False if threadlocked then do @@ -99,25 +115,24 @@ getLockStatus pool file getdefault checker = do Just restore -> bracket_ (return ()) restore checker -- Only runs action to close underlying lock file when this is the last --- user of the lock, and when the handle has not already been closed. +-- user of the lock, and when the lock has not already been closed. -- --- Note that the lock pool is left empty while the closelockfile action +-- Note that the lock pool is left empty while the CloseLockFile action -- is run, to avoid race with another thread trying to open the same lock -- file. -releaseLock :: LockHandle -> IO () -> IO () -releaseLock h closelockfile = go =<< atomically (tryTakeTMVar h) +releaseLock :: LockHandle -> IO () +releaseLock h = go =<< atomically (tryTakeTMVar h) where go (Just (pool, file)) = do - (m, unused) <- atomically $ do + (m, closelockfile) <- atomically $ do m <- takeTMVar pool return $ case M.lookup file m of - Just (LockStatus mode n) - | n == 1 -> (M.delete file m, True) + Just (LockStatus mode n closelockfile) + | n == 1 -> (M.delete file m, closelockfile) | otherwise -> - (M.insert file (LockStatus mode (pred n)) m, False) - Nothing -> (m, True) - when unused - closelockfile + (M.insert file (LockStatus mode (pred n) closelockfile) m, noop) + Nothing -> (m, noop) + closelockfile atomically $ putTMVar pool m -- The LockHandle was already closed. go Nothing = return () diff --git a/Utility/LockPool/Windows.hs b/Utility/LockPool/Windows.hs index 2641ac37d..0ca3c8116 100644 --- a/Utility/LockPool/Windows.hs +++ b/Utility/LockPool/Windows.hs @@ -22,9 +22,9 @@ import Utility.LockPool.STM (LockFile, LockMode(..)) {- Tries to lock a file with a shared lock, which allows other processes to - also lock it shared. Fails if the file is exclusively locked. -} lockShared :: LockFile -> IO (Maybe LockHandle) -lockShared file = tryMakeLockHandle - (P.tryTakeLock P.lockPool file LockShared) - (fmap mk <$> F.lockShared file) +lockShared file = tryMakeLockHandle P.lockPool file + (\p f -> P.tryTakeLock p f LockShared) + (\f -> fmap mk <$> F.lockShared f) {- Tries to take an exclusive lock on a file. Fails if another process has - a shared or exclusive lock. @@ -33,9 +33,9 @@ lockShared file = tryMakeLockHandle - read or write by any other process. So for advisory locking of a file's - content, a separate LockFile should be used. -} lockExclusive :: LockFile -> IO (Maybe LockHandle) -lockExclusive file = tryMakeLockHandle - (P.tryTakeLock P.lockPool file LockExclusive) - (fmap mk <$> F.lockExclusive file) +lockExclusive file = tryMakeLockHandle P.lockPool file + (\p f -> P.tryTakeLock f LockExclusive) + (\f -> fmap mk <$> F.lockExclusive f) {- If the initial lock fails, this is a BUSY wait, and does not - guarentee FIFO order of waiters. In other news, Windows is a POS. -} diff --git a/debian/changelog b/debian/changelog index 721c5b3aa..3ea3cc1af 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,11 @@ git-annex (6.20160230) UNRELEASED; urgency=medium * metadata: Added -r to remove all current values of a field. + * Fix data loss that can occur when annex.pidlock is set in a repository. + * Fix bug preventing moving files to/from a repository with annex.pidlock set. + * Fix shared lock file FD leak. + * Fix metadata hook behavior when multiple files are added at once. + Thanks, Klaus Ethgen. -- Joey Hess <id@joeyh.name> Mon, 29 Feb 2016 13:00:30 -0400 diff --git a/doc/bugs/__39__add__39___results_in_max_cpu__44___long_run_and_huge_repo.mdwn b/doc/bugs/__39__add__39___results_in_max_cpu__44___long_run_and_huge_repo.mdwn new file mode 100644 index 000000000..cb3c7ef81 --- /dev/null +++ b/doc/bugs/__39__add__39___results_in_max_cpu__44___long_run_and_huge_repo.mdwn @@ -0,0 +1,40 @@ +### Please describe the problem. +massive repo, max cpu using + + git annex add . + +had to interrupt the job as it was processing 1 small file per 5 seconds after about 3h run. + +I am running it on the root of a large (currently 1TB) exFAT-based drive used for archiving + +The repo grew to 28G. + +Is this a regular issue with exFAT? I've done quite a bit of searching. I'll do more. + +### What steps will reproduce the problem? +- install on El Capitan (latest) via homebrew +- create 1TB exFAT file store +- follow walk through to setup annex locally and on external +- add + +### What version of git-annex are you using? On what operating system? +git-annex version: 6.20160126 +build flags: Assistant Webapp Pairing Testsuite S3(multipartupload)(storageclasses) WebDAV FsEvents XMPP ConcurrentOutput TorrentParser Feeds Quvi +key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL +remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external + +El Capitan 10.11.3 + + +### Please provide any additional information below. + +[[!format sh """ +# If you can, paste a complete transcript of the problem occurring here. +# If the problem is with the git-annex assistant, paste in .git/annex/daemon.log + + +# End of transcript or log. +"""]] + +### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders) +I'd love to say I have. You'll hear my shout of joy when I do. diff --git a/doc/bugs/fatal:_Cannot_handle_files_this_big.mdwn b/doc/bugs/fatal:_Cannot_handle_files_this_big.mdwn index f4e8b7f91..7272bfc29 100644 --- a/doc/bugs/fatal:_Cannot_handle_files_this_big.mdwn +++ b/doc/bugs/fatal:_Cannot_handle_files_this_big.mdwn @@ -94,5 +94,3 @@ ok """]] - -> provisionally [[done]]. --[[Joey]] diff --git a/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_2_546782c644230741470f9a9de23bd019._comment b/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_2_546782c644230741470f9a9de23bd019._comment new file mode 100644 index 000000000..0ea9dc4d1 --- /dev/null +++ b/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_2_546782c644230741470f9a9de23bd019._comment @@ -0,0 +1,24 @@ +[[!comment format=mdwn + username="bvaa" + subject="similar problem" + date="2016-03-01T08:12:27Z" + content=""" +I have a similar problem on Windows 7 64bit trying to add files that are around 5GB in size. I tried repository version 5 and 6 with same results. + +``` +$ git annex add bigfile +add bigfile ok +(recording state in git...) + +$ git annex status +fatal: Cannot handle files this big +``` +git-annex version: 6.20160229-g37a89cc +build flags: Assistant Webapp Pairing Testsuite S3(multipartupload) WebDAV ConcurrentOutput TorrentParser Feeds Quvi +key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL +remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external +local repository version: 5 +supported repository versions: 5 6 +upgrade supported from repository versions: 2 3 4 5 + +"""]] diff --git a/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_3_151e7cf96c7d168e1397d111aa47f279._comment b/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_3_151e7cf96c7d168e1397d111aa47f279._comment new file mode 100644 index 000000000..e6ad551e4 --- /dev/null +++ b/doc/bugs/fatal:_Cannot_handle_files_this_big/comment_3_151e7cf96c7d168e1397d111aa47f279._comment @@ -0,0 +1,20 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 3""" + date="2016-03-01T14:41:45Z" + content=""" +git (not git-annex) will throw this error if a file size is greater than +`size_t`. + +This bug report seemed to originally concern git add being run on such a +file, but I can't see how git-annex would do that, it doesn't add large +files to git. + +I think that in the case of git-annex status, when it runs git status, that +looks at work tree files, and so falls over if they're large, even if +what's checked into git is a nice small git-annex symlink. This would also +probably affect other places where git looks at worktree files, perhaps git +diff (in v6 repo mode). + +Reopening bug report. +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock.mdwn b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock.mdwn new file mode 100644 index 000000000..d3d75e8c8 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock.mdwn @@ -0,0 +1,31 @@ +### Please describe the problem. + +Ideally annex should detect all "paranormal" cases such as running on NFS mounted partition, but according to [https://git-annex.branchable.com/bugs/huge_multiple_copies_of___39__.nfs__42____39___and___39__.panfs__42____39___being_created/](https://git-annex.branchable.com/bugs/huge_multiple_copies_of___39__.nfs__42____39___and___39__.panfs__42____39___being_created/). Happily ignorant we were running annex (5.20151116-g76139a9) on NFS mounted partition until we filled up 2TB of allocated to us space with .nfs* files. Well -- apparently according to above we should have tried pidlock... trying now but doesn't work :-/ + +[[!format sh """ +*$> git clone smaug:/tmp/123 123-clone && cd 123-clone && git config annex.pidlock true && echo 124 > 124.dat && git annex add 124.dat && git commit -m 'added 124' && git annex move --to=origin 124.dat +Initialized empty Git repository in /home/yhalchen/123-clone/.git/ +remote: Counting objects: 22, done. +remote: Compressing objects: 100% (16/16), done. +remote: Total 22 (delta 3), reused 0 (delta 0) +Receiving objects: 100% (22/22), done. +Resolving deltas: 100% (3/3), done. +total 1 +1 123.dat@ 1 README.txt +(merging origin/git-annex into git-annex...) +(recording state in git...) +add 124.dat ok +(recording state in git...) +[master 0f1092a] added 124 + 1 files changed, 1 insertions(+), 0 deletions(-) + create mode 120000 124.dat +move 124.dat (checking origin...) git-annex: content is locked + +$> echo $? +1 + +"""]] + +BTW running move in our old now somewhat screwed up annex, results in a differently expressed error: [http://www.onerussian.com/tmp/2016-02-29.png](http://www.onerussian.com/tmp/2016-02-29.png) + +[[!meta author=yoh]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_10_d44de6a250694b25ce9c3169d62db8d1._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_10_d44de6a250694b25ce9c3169d62db8d1._comment new file mode 100644 index 000000000..f2ba4335c --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_10_d44de6a250694b25ce9c3169d62db8d1._comment @@ -0,0 +1,20 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 10""" + date="2016-03-01T20:52:38Z" + content=""" + 2456732 openat(AT_FDCWD, ".git/annex/ssh/", O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC) = -1 ENOENT (No such file or directory) + 2456732 mkdir(".git/annex/ssh", 0777) = 0 + 2456732 open(".git/annex/ssh/smaug.lock", O_RDONLY|O_CREAT, 0666) = 11 + 2456732 fcntl(11, F_GETFD) = 0 + 2456732 fcntl(11, F_SETFD, FD_CLOEXEC) = 0 + 2456732 close(11) = 0 + +Backs up what I thought git-annex should be doing; it's not fcntl locking that file. + +Ah, I'll bet it's not git-annex at all this time. +It runs ssh with -S .git/annex/ssh/smaug, and ssh probably +does its own locking around setting up that control socket. + +If so, disabling annex.sshcaching will avoid the problem. +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_11_56ae0f15bbdea2331df3b261b74d0b0b._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_11_56ae0f15bbdea2331df3b261b74d0b0b._comment new file mode 100644 index 000000000..2ee2e6aa8 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_11_56ae0f15bbdea2331df3b261b74d0b0b._comment @@ -0,0 +1,7 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="comment 11" + date="2016-03-01T22:40:44Z" + content=""" +would then may be annex not to use sshcaching if operating under pidlock, unless some nfs specific flag is used to tease it apart +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_1_a98a54c04fa4e81f35fe958e746d61cb._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_1_a98a54c04fa4e81f35fe958e746d61cb._comment new file mode 100644 index 000000000..21304e2e8 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_1_a98a54c04fa4e81f35fe958e746d61cb._comment @@ -0,0 +1,7 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 1""" + date="2016-03-01T14:36:25Z" + content=""" +FYI, I think you could remove the .nfs files to free up space. +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_2_18169e7bbd2caba5ee4bb0286961ac95._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_2_18169e7bbd2caba5ee4bb0286961ac95._comment new file mode 100644 index 000000000..42b3265bd --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_2_18169e7bbd2caba5ee4bb0286961ac95._comment @@ -0,0 +1,10 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 2""" + date="2016-03-01T15:35:48Z" + content=""" +Oddly, I cannot reproduce this, although I can reproduce the behavior in +<http://git-annex.branchable.com/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/>a + +(smaug:/tmp/123 has permissions that do not let me access it.) +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_3_e3b623ff6714a9fe5fa0d332c72fe32f._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_3_e3b623ff6714a9fe5fa0d332c72fe32f._comment new file mode 100644 index 000000000..51f71c7ac --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_3_e3b623ff6714a9fe5fa0d332c72fe32f._comment @@ -0,0 +1,9 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 3""" + date="2016-03-01T16:52:16Z" + content=""" +I've fixed the STM transaction bug. Need either more info to reproduce this +bug, or you could test and see if it still occurs when git-annex is +upgraded to ad888a6b760e8f9d31f8d99c51912bcdaa7fb0c1 +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_4_58eebd8cfd664b32ef6fd0ddc34c5e86._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_4_58eebd8cfd664b32ef6fd0ddc34c5e86._comment new file mode 100644 index 000000000..7a562f7ef --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_4_58eebd8cfd664b32ef6fd0ddc34c5e86._comment @@ -0,0 +1,9 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="more info" + date="2016-03-01T17:31:29Z" + content=""" +If we could remove those .nfs* files, it would indeed be not that bad but we can't + +smaug:/tmp/123 -- sorry about permissions but it is a regular annex nothing special, so the bug should show itself with other repos as well I think. I gave you access to it now and also there is /tmp/123.tar.gz archive of it just in case. +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_5_e5e24428ac02b78d38cd4f197ae3807b._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_5_e5e24428ac02b78d38cd4f197ae3807b._comment new file mode 100644 index 000000000..75a45bdad --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_5_e5e24428ac02b78d38cd4f197ae3807b._comment @@ -0,0 +1,31 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="recent snapshot seems has fixed it" + date="2016-03-01T18:52:27Z" + content=""" +[[!format sh \"\"\" +$> git clone smaug:/tmp/123 123-clone && cd 123-clone && git config annex.pidlock true && echo 124 > 124.dat && git annex add 124.dat && git commit -m 'added 124' && git annex move --to=origin 124.dat +Cloning into '123-clone'... +remote: Counting objects: 22, done. +remote: Compressing objects: 100% (16/16), done. +remote: Total 22 (delta 3), reused 0 (delta 0) +Receiving objects: 100% (22/22), done. +Resolving deltas: 100% (3/3), done. +Checking connectivity... done. +total 1 +1 123.dat@ 1 README.txt +(merging origin/git-annex into git-annex...) +(recording state in git...) +add 124.dat ok +(recording state in git...) +[master 6eca577] added 124 + 1 file changed, 1 insertion(+) + create mode 120000 124.dat +move 124.dat (checking origin...) ok +(recording state in git...) + +$> git annex version +git-annex version: 6.20160301+gitg647fffd-1~ndall+1 + +\"\"\"]] +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_6_01dc7a1ff67783ce672d72cefe7b4bb5._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_6_01dc7a1ff67783ce672d72cefe7b4bb5._comment new file mode 100644 index 000000000..20eca2f62 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_6_01dc7a1ff67783ce672d72cefe7b4bb5._comment @@ -0,0 +1,7 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="comment 6" + date="2016-03-01T18:54:15Z" + content=""" +but then I found ./.git/annex/ssh/.nfs000000000000f41600003608.lock left behind (removable, luckily to me ;) ) +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_7_458518805b8d6613930b38b9ccc3c1bc._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_7_458518805b8d6613930b38b9ccc3c1bc._comment new file mode 100644 index 000000000..756b64250 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_7_458518805b8d6613930b38b9ccc3c1bc._comment @@ -0,0 +1,7 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="comment 7" + date="2016-03-01T18:58:20Z" + content=""" +and those are breeding with next subsequent --move +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_8_853bc273b19bd6d84ca8f5da6c3dfb56._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_8_853bc273b19bd6d84ca8f5da6c3dfb56._comment new file mode 100644 index 000000000..d24d78d51 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_8_853bc273b19bd6d84ca8f5da6c3dfb56._comment @@ -0,0 +1,20 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 8""" + date="2016-03-01T20:17:35Z" + content=""" +That ssh lock file is created by this code: + + -- The posix lock file is created even when using pid locks, in order to + -- avoid complicating any code that might expect to be able to see that + -- lock file. But, it's not locked. + dummyPosixLock :: Maybe FileMode -> LockFile -> IO () + dummyPosixLock m f = closeFd =<< openLockFile ReadLock m f + +But, that does not ever actually take a lock on the file, so +NFS should not make its .nfs thing in this case. Unless NFS does it when a +FD is simply opened with close-on-exec set. + +Can you get a strace of the creation of files under .git/annex/ssh/ +that result in these .nfs things? +"""]] diff --git a/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_9_86656a409ab25c7fa24de8ac3e68b254._comment b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_9_86656a409ab25c7fa24de8ac3e68b254._comment new file mode 100644 index 000000000..b862174b1 --- /dev/null +++ b/doc/bugs/git-annex:_content_is_locked__while_trying_to_move_under_NFS_and_pidlock/comment_9_86656a409ab25c7fa24de8ac3e68b254._comment @@ -0,0 +1,7 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4" + subject="comment 9" + date="2016-03-01T20:43:17Z" + content=""" +ok -- see on smaug /mnt/nfs/scrap/datalad/test_nfs/123-clone-move.strace . Now you can experiment there as well -- the entire /mnt/btrfs/scrap is mounted also via nfs (under /mnt/nfs/scrap) +"""]] diff --git a/doc/bugs/git-annex_confuses_Git_with_nested_submodules.mdwn b/doc/bugs/git-annex_confuses_Git_with_nested_submodules.mdwn new file mode 100644 index 000000000..83d74f79f --- /dev/null +++ b/doc/bugs/git-annex_confuses_Git_with_nested_submodules.mdwn @@ -0,0 +1,37 @@ +### Please describe the problem. +The way git-annex deals with submodules (replacing the .git file in the submodule, with a link to the corresponding gitdir of the submodule) seems to confuse Git when creating another submodule in an annex-init'ed submodule. + +### What steps will reproduce the problem? + % mkdir some ; cd some; git init + Initialized empty Git repository in /tmp/some/.git/ + % git submodule add /src/somegitrepo sub_lvl1 + Cloning into 'sub_lvl1'... + done. + % cd sub_lvl1 + % git annex init + init (merging origin/git-annex into git-annex...) + (recording state in git...) + ok + (recording state in git...) + % git submodule add /src/somegitrepo sub_lvl2 + Cloning into 'sub_lvl2'... + done. + fatal: Could not chdir to '../../../sub_lvl2': No such file or directory + Unable to checkout submodule 'sub_lvl2' + +### What version of git-annex are you using? On what operating system? + % apt-cache policy git-annex-standalone + git-annex-standalone: + Installed: 6.20160213+gitg9597a21-1~ndall+1 + +Debian stretch, git-annex from NeuroDebian. + +### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders) + +Yes, lots! Using it for some of its original use cases for more than five years now -- I was actually surprised to learn, just now, that the oldest commit in my music repository is exactly 5 years and 6 days old. Thanks for longevity and reliability! + +More recently I aim exploring the use of git annex for managing datasets and their dependencies, i.e. going from raw to some processed state over multiple levels, where each level is a useful starting point for some analysis, and each previous level is a dependency (input) to the next. With just one level above "raw" this has massively improved collaboration workflows in student/teacher settings for me. Deeper nesting levels would allow for even more interesting applications, but see above ;-) I think Git seems needlessly confused, but I don't fully grasp what is happening yet. I'd appreciate any insight you may have. Although it is Git that shows the undesired behavior, it seems it is git-annex that ultimately confused it. Hence I came here first. + +BTW: What a nice idea to ask for something like this in a bug report. + + diff --git a/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_1_fb01d4b5af500affc08a5c3b3b1849dd._comment b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_1_fb01d4b5af500affc08a5c3b3b1849dd._comment new file mode 100644 index 000000000..41068b5a9 --- /dev/null +++ b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_1_fb01d4b5af500affc08a5c3b3b1849dd._comment @@ -0,0 +1,16 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 1""" + date="2016-03-01T20:25:13Z" + content=""" +Reproduced this. + +This really does feel like a git bug. git is supposed to treat "gitlink" +files and .git symlinks the same. While modern versions of git set up +gitlink files for submodules, older versions of git used .git symlinks, and +git should still support that. + +Looks like the problem can be worked around, by setting +`GIT_DIR`. In your example, `GIT_DIR=../.git/modules/sub_lvl1/ git +submodule add /src/somegitrepo sub_lvl2` +"""]] diff --git a/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_2_094baf6c3738691879fd907dd1729c56._comment b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_2_094baf6c3738691879fd907dd1729c56._comment new file mode 100644 index 000000000..50d2f709a --- /dev/null +++ b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_2_094baf6c3738691879fd907dd1729c56._comment @@ -0,0 +1,17 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 2""" + date="2016-03-01T20:36:43Z" + content=""" +Here's a more minimal test case, not involving git-annex at all: + + git init gitdir + mkdir worktree + cd worktree + ln -s ../gitdir/.git .git + git submodule add /any/git/repo sub + + fatal: Could not chdir to '../../../sub': No such file or directory + +I have forwarded that test case to the git ML. +"""]] diff --git a/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_3_e1bc8eb7f6ce0d6f2d2f2b0ea6f20862._comment b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_3_e1bc8eb7f6ce0d6f2d2f2b0ea6f20862._comment new file mode 100644 index 000000000..49e883c7c --- /dev/null +++ b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_3_e1bc8eb7f6ce0d6f2d2f2b0ea6f20862._comment @@ -0,0 +1,29 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 3""" + date="2016-03-02T16:48:24Z" + content=""" +[git bug report](http://news.gmane.org/find-root.php?message_id=20160301204218.GA4083%40kitenet.net) + +So far, the git devs admit this is a problem, but don't seem too keen on fixing +it, even though it breaks backwards compatability with repositories git +submodule add created (circa 2012). + +It might be that git-annex init could work around git's bugginess by, +instead of making submodule/.git a symlink to ../.git/modules/dir, making +submodule/.git be the git directory, and converting ../.git/modules/dir +to a symlink. In very limited testing, that setup seems to work. + +I don't know if all the submodule stuff would work, perhaps it would break moving +submodules etc. And, since git likes to chdir around (not the best idea), +if it expected to be able to chdir from .git/modules to dir and chdir .. to +get back, changing that to a symlink would defeat it. + +BTW, I found another way, unrelated to git-annex or symlinks at all, +that git submodule add's broken path handling makes it fall over with +nested submodules. +<http://news.gmane.org/find-root.php?message_id=20160302165240.GA17654%40kitenet.net>. + +(It's almost like myrepos was a better idea than this submodule stuff, or +something...) +""]] diff --git a/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_4_4bcd571dcd6c1e709e83e519135519b3._comment b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_4_4bcd571dcd6c1e709e83e519135519b3._comment new file mode 100644 index 000000000..d663f3370 --- /dev/null +++ b/doc/bugs/git-annex_confuses_Git_with_nested_submodules/comment_4_4bcd571dcd6c1e709e83e519135519b3._comment @@ -0,0 +1,9 @@ +[[!comment format=mdwn + username="mih" + subject="Thanks" + date="2016-03-02T19:30:49Z" + content=""" +Thanks for investigating this further. + +One aspect that may make switching the location of the .git directory into the worktree of the submodule less desirable is this: With the actual .git in ../.git/modules/... one can easily rm -rf the submodule, deinit it, and re-init/update from the (still present) ../.git/modules/... at a later point in time. Especially, when a submodule is a more complicated beast (e.g. with multiple configured remotes) the required steps to regenerate the same setup get more complex. +"""]] diff --git a/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone.mdwn b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone.mdwn new file mode 100644 index 000000000..eaf79a862 --- /dev/null +++ b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone.mdwn @@ -0,0 +1,40 @@ +relates to having pidlock true + +[[!format sh """ +$> mkdir 123; cd 123; git init; git annex init; git config annex.pidlock true && echo "123" > 123.dat; git annex add 123.dat; git commit -m 'added'; +W: git-annex repositories not (yet) supported in the prompt +Initialized empty Git repository in /tmp/123/.git/ +init ok +(recording state in git...) +add 123.dat ok +(recording state in git...) +[master (root-commit) 9449f1b] added + 1 file changed, 1 insertion(+) + create mode 120000 123.dat + +$> git clone . ../123-clone && git remote add clone ../123-clone && git fetch clone && cd ../123-clone && git config annex.pidlock true && cd - && git annex move --to=clone . +Cloning into '../123-clone'... +done. +From ../123-clone + * [new branch] master -> clone/master +move 123.dat git-annex: thread blocked indefinitely in an STM transaction + +$> echo $? +1 + +$> git annex version +git-annex version: 6.20160226+gitg01f1de0-1~ndall+1 +build flags: Assistant Webapp Pairing Testsuite S3(multipartupload) WebDAV Inotify DBus DesktopNotify XMPP ConcurrentOutput TorrentParser MagicMime Feeds Quvi +key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL +remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external +local repository version: 5 +supported repository versions: 5 6 +upgrade supported from repository versions: 0 1 2 4 5 + +"""]] + +and it works ok without pidlock enabled + +[[!meta author=yoh]] + +> [[fixed|done]] --[[Joey]] diff --git a/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_1_0cb6b5d69cc47bfbab8fb5e87e6e2bad._comment b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_1_0cb6b5d69cc47bfbab8fb5e87e6e2bad._comment new file mode 100644 index 000000000..7afc0eb30 --- /dev/null +++ b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_1_0cb6b5d69cc47bfbab8fb5e87e6e2bad._comment @@ -0,0 +1,8 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 1""" + date="2016-03-01T15:40:12Z" + content=""" +I can reproduce this. But, when I change the origin remote to use ssh, it +works around the problem. +"""]] diff --git a/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_2_aa8c82f27965df44e69fd06b34be0ece._comment b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_2_aa8c82f27965df44e69fd06b34be0ece._comment new file mode 100644 index 000000000..2c06811d8 --- /dev/null +++ b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_2_aa8c82f27965df44e69fd06b34be0ece._comment @@ -0,0 +1,11 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 2""" + date="2016-03-01T16:11:37Z" + content=""" +A worse problem with annex.pidlock is that it completly broke checking +whether a key is present in the repository. That could lead to data loss +when eg, moving --to a repo with annex.pidlock set. + +I've fixed that related bug. +"""]] diff --git a/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_3_7e6b3ab0beaca49d7d68c9e610c1d147._comment b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_3_7e6b3ab0beaca49d7d68c9e610c1d147._comment new file mode 100644 index 000000000..becf5a1b3 --- /dev/null +++ b/doc/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/comment_3_7e6b3ab0beaca49d7d68c9e610c1d147._comment @@ -0,0 +1,17 @@ +[[!comment format=mdwn + username="joey" + subject="""comment 3""" + date="2016-03-01T16:21:31Z" + content=""" +Analysis: What's crashing is Utility.LockPool.PidLock.waitLock after a call +to Utility.LockPool.PidLock.tryLock. The former takes an exclusive STM lock +of the pid lock file; the latter takes a shared STM lock. + +Since the pid lock stands in for multiple more fine-grained locks, waitLock +will be called while a lock from tryLock (or a previous waitLock perhaps) +is still open. + +The fix seems as simple as making waitLock take a shared STM lock of the +pid lock file, leaving the exclusive lock for the later, more fine-grained +STM lock checking that's done after taking the pid lock. +"""]] diff --git a/doc/devblog/day_368__leap.mdwn b/doc/devblog/day_368__leap.mdwn new file mode 100644 index 000000000..69766b5e3 --- /dev/null +++ b/doc/devblog/day_368__leap.mdwn @@ -0,0 +1,9 @@ +Pushed out a release today, could not resist the leap day in the version +number, and also there were enough bug fixes accumulated to make it worth +doing. + +I now have `git-annex sync` working inside adjusted branches, so pulls +get adjusted appropriately before being merged into the adjusted branch. +Seems to mostly work well, I did just find one bug in it though. Only +propigating adjusted commits remains to be done to finish my adjusted +branches prototype. diff --git a/doc/forum/How_to_shrink_transfer_repo__63__.mdwn b/doc/forum/How_to_shrink_transfer_repo__63__.mdwn new file mode 100644 index 000000000..8e368087f --- /dev/null +++ b/doc/forum/How_to_shrink_transfer_repo__63__.mdwn @@ -0,0 +1,27 @@ +Hello, + +I have two repositories (Asaru and Horus) that are both ```group=client``` and ```wanted=standard```. The other one, Astarte is ```group=transfer``` and ```wanted=standard```. Pretty standard I think. + +``` +repository mode: direct +trusted repositories: 0 +semitrusted repositories: 5 + 00000000-0000-0000-0000-000000000001 -- web + 00000000-0000-0000-0000-000000000002 -- bittorrent + 58001764-966d-4076-ae99-4ef6de25df39 -- Asaru [here] + 8165bdf1-907e-4bbe-9c35-22fbf6f8cb00 -- Astarte [astarte] + cca0c3c8-593a-4395-936c-1093f0f762e8 -- Horus +untrusted repositories: 0 +``` + +I always sync on the two client repos like that ```git annex add . && git annex sync --content```. The transfer repo is growing larger and larger. ```git annex dropunused N``` says, that it ```could only verify the existence of 0 out of 1 necessary copies```. + +What is the best way to clean up the transfer repo? + +1. Make the two client repos trusted? The three repos have been created manually, not through the assistant. Is that what the assistant does, too? +2. Try to get the two client repos into touch with each other and try to use ```dropunsed --from=astarte```? + +What is the recommended way for that? + +Thanks, +Florian diff --git a/doc/forum/Multiple_remotes_with_the_same_path/comment_2_8cd3edf2e71e904f1b651abdfd7a4499._comment b/doc/forum/Multiple_remotes_with_the_same_path/comment_2_8cd3edf2e71e904f1b651abdfd7a4499._comment new file mode 100644 index 000000000..f18404a6d --- /dev/null +++ b/doc/forum/Multiple_remotes_with_the_same_path/comment_2_8cd3edf2e71e904f1b651abdfd7a4499._comment @@ -0,0 +1,8 @@ +[[!comment format=mdwn + username="grawity@2ea26be48562f66fcb9b66307da72b1e2e37453f" + nickname="grawity" + subject="comment 2" + date="2016-02-29T17:25:17Z" + content=""" +Hmm, I still think that avoiding duplicating uuids would be smarter behavior, but the host symlinks will do just fine. Thanks for the suggestion. +"""]] diff --git a/doc/forum/Undo_git_merge_git-annex.mdwn b/doc/forum/Undo_git_merge_git-annex.mdwn new file mode 100644 index 000000000..bc299174c --- /dev/null +++ b/doc/forum/Undo_git_merge_git-annex.mdwn @@ -0,0 +1,3 @@ +After accidentally typing git merge git-annex, I am now wondering how to clean up the resulting chaos... + +Any tips? diff --git a/doc/tips/automatically_adding_metadata/pre-commit-annex b/doc/tips/automatically_adding_metadata/pre-commit-annex index a77f7a8e3..2e07e3bf4 100755 --- a/doc/tips/automatically_adding_metadata/pre-commit-annex +++ b/doc/tips/automatically_adding_metadata/pre-commit-annex @@ -1,4 +1,4 @@ -#! /bin/sh +#!/bin/sh # # Copyright (C) 2014 Joey Hess <id@joeyh.name> # Copyright (C) 2016 Klaus Ethgen <Klaus@Ethgen.ch> @@ -112,7 +112,7 @@ if [ -n "$*" ]; then process "$f" done else - for f in "$(git diff-index --name-only --cached $against)"; do + git diff-index --name-only --cached $against | while read f; do process "$f" done fi diff --git a/doc/todo/import_--reinject/comment_2_04074324f866420ebf0d39ddfae85ff7._comment b/doc/todo/import_--reinject/comment_2_04074324f866420ebf0d39ddfae85ff7._comment new file mode 100644 index 000000000..eed8434b5 --- /dev/null +++ b/doc/todo/import_--reinject/comment_2_04074324f866420ebf0d39ddfae85ff7._comment @@ -0,0 +1,22 @@ +[[!comment format=mdwn + username="grawity@2ea26be48562f66fcb9b66307da72b1e2e37453f" + nickname="grawity" + subject="comment 2" + date="2016-03-01T07:10:55Z" + content=""" +Thanks, but you missed my point entirely... I wasn't asking for a mode that would delete data without checking. I was asking for the complete opposite – a mode that would _inject an extra copy_ of the data without checking. + +Yeah, I guess I could `annex add` the files, then un-annex them, and _then_ `annex import --clean-duplicates`, but that's a somewhat long-winded approach, needing twice the space and twice the time. + +(...speaking of losing data, it seems that `git annex reinject` is perfectly happy to delete files if I accidentally give it the wrong target. I.e. after failing content verification, it still throws away the source.) + +--- + +It doesn't have to be part of git-annex; I could _script_ this feature myself, though there aren't nearly enough plumbing commands either. (For example, a command to hash a file and give its key (like `git hash-object`), or a command to find all paths for a key.) + +Having an equivalent of `git hash-object -w` (inject an arbitrary object) would make it even easier, but I couldn't find anything like that either. + +--- + +Anyway, let's cancel this todo, I'll find other ways. +"""]] |