aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/main/java/com/google/devtools/build/lib/actions
diff options
context:
space:
mode:
authorGravatar Googler <noreply@google.com>2018-07-02 01:00:16 -0700
committerGravatar Copybara-Service <copybara-piper@google.com>2018-07-02 01:01:25 -0700
commit0e3df78b0b4f5af22a5c38980465fde87cc929ad (patch)
treed66d2f22e4a8973351b39e9879ba0f6afb926eee /src/main/java/com/google/devtools/build/lib/actions
parent417260d2d3d5f8d264473c19f0c7c798f71adffe (diff)
Relax the threshold for calling getDigestInExclusiveMode().
If the policy goal is to minimize disk seeks, this should be equally good. Second attempt, with test update. RELNOTES: None. PiperOrigin-RevId: 202907857
Diffstat (limited to 'src/main/java/com/google/devtools/build/lib/actions')
-rw-r--r--src/main/java/com/google/devtools/build/lib/actions/cache/DigestUtils.java18
1 files changed, 11 insertions, 7 deletions
diff --git a/src/main/java/com/google/devtools/build/lib/actions/cache/DigestUtils.java b/src/main/java/com/google/devtools/build/lib/actions/cache/DigestUtils.java
index 5e0f198273..37759f9ead 100644
--- a/src/main/java/com/google/devtools/build/lib/actions/cache/DigestUtils.java
+++ b/src/main/java/com/google/devtools/build/lib/actions/cache/DigestUtils.java
@@ -13,6 +13,7 @@
// limitations under the License.
package com.google.devtools.build.lib.actions.cache;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
@@ -59,6 +60,11 @@ public class DigestUtils {
private static final Object DIGEST_LOCK = new Object();
private static final AtomicBoolean MULTI_THREADED_DIGEST = new AtomicBoolean(false);
+ // Files of this size or less are assumed to be readable in one seek.
+ // (This is the default readahead window on Linux.)
+ @VisibleForTesting // the unittest is in a different package!
+ public static final int MULTI_THREADED_DIGEST_MAX_FILE_SIZE = 128 * 1024;
+
// The time that a digest computation has to take at least in order to be considered a slow-read.
private static final long SLOW_READ_MILLIS = 5000L;
@@ -238,13 +244,11 @@ public class DigestUtils {
// All right, we have neither a fast nor a cached digest. Let's go through the costly process of
// computing it from the file contents.
- if (fileSize > 4096 && !MULTI_THREADED_DIGEST.get()) {
- // We'll have to read file content in order to calculate the digest. In that case
- // it would be beneficial to serialize those calculations since there is a high
- // probability that MD5 will be requested for multiple output files simultaneously.
- // Exception is made for small (<=4K) files since they will not likely to introduce
- // significant delays (at worst they will result in two extra disk seeks by
- // interrupting other reads).
+ if (fileSize > MULTI_THREADED_DIGEST_MAX_FILE_SIZE && !MULTI_THREADED_DIGEST.get()) {
+ // We'll have to read file content in order to calculate the digest.
+ // We avoid overlapping this process for multiple large files, as
+ // seeking back and forth between them will result in an overall loss of
+ // throughput.
digest = getDigestInExclusiveMode(path);
} else {
digest = getDigestInternal(path);