aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/test/java/com/google/devtools
diff options
context:
space:
mode:
authorGravatar laurentlb <laurentlb@google.com>2018-05-22 09:43:18 -0700
committerGravatar Copybara-Service <copybara-piper@google.com>2018-05-22 09:45:30 -0700
commit98ad85c78cdefd100f03946b6fb0d1109995a2c5 (patch)
tree9a04255587421b819229effc0d4dc77a5273a8ba /src/test/java/com/google/devtools
parentfd0aec8adae7785ca5aecd21886dacd5d9e5091e (diff)
Skylark: do not eagerly scan the whole file
With this change, the parser explicitly asks the lexer to give the next token. To avoid changing the lexer too much, the tokenize() method populates a queue (it may add multiple tokens at the same time). While this reduces the peak memory usage, further work is needed to actually improve the performance. RELNOTES: None. PiperOrigin-RevId: 197576326
Diffstat (limited to 'src/test/java/com/google/devtools')
-rw-r--r--src/test/java/com/google/devtools/build/lib/syntax/LexerTest.java21
1 files changed, 18 insertions, 3 deletions
diff --git a/src/test/java/com/google/devtools/build/lib/syntax/LexerTest.java b/src/test/java/com/google/devtools/build/lib/syntax/LexerTest.java
index 9d2df743b5..81212e6fdb 100644
--- a/src/test/java/com/google/devtools/build/lib/syntax/LexerTest.java
+++ b/src/test/java/com/google/devtools/build/lib/syntax/LexerTest.java
@@ -23,6 +23,7 @@ import com.google.devtools.build.lib.events.Location;
import com.google.devtools.build.lib.events.Reporter;
import com.google.devtools.build.lib.skyframe.serialization.testutils.SerializationTester;
import com.google.devtools.build.lib.vfs.PathFragment;
+import java.util.ArrayList;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -57,8 +58,19 @@ public class LexerTest {
return new Lexer(inputSource, reporter);
}
- public Token[] tokens(String input) {
- return createLexer(input).getTokens().toArray(new Token[0]);
+ private ArrayList<Token> allTokens(Lexer lexer) {
+ ArrayList<Token> result = new ArrayList<>();
+ Token tok;
+ do {
+ tok = lexer.nextToken();
+ result.add(tok);
+ } while (tok.kind != TokenKind.EOF);
+ return result;
+ }
+
+ private Token[] tokens(String input) {
+ ArrayList<Token> result = allTokens(createLexer(input));
+ return result.toArray(new Token[0]);
}
/**
@@ -68,7 +80,7 @@ public class LexerTest {
private String linenums(String input) {
Lexer lexer = createLexer(input);
StringBuilder buf = new StringBuilder();
- for (Token tok : lexer.getTokens()) {
+ for (Token tok : allTokens(lexer)) {
if (buf.length() > 0) {
buf.append(' ');
}
@@ -477,13 +489,16 @@ public class LexerTest {
@Test
public void testContainsErrors() throws Exception {
Lexer lexerSuccess = createLexer("foo");
+ allTokens(lexerSuccess); // ensure the file has been completely scanned
assertThat(lexerSuccess.containsErrors()).isFalse();
Lexer lexerFail = createLexer("f$o");
+ allTokens(lexerFail);
assertThat(lexerFail.containsErrors()).isTrue();
String s = "'unterminated";
lexerFail = createLexer(s);
+ allTokens(lexerFail);
assertThat(lexerFail.containsErrors()).isTrue();
assertThat(lastErrorLocation.getStartOffset()).isEqualTo(0);
assertThat(lastErrorLocation.getEndOffset()).isEqualTo(s.length());