aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorOmer Strulovich <ostrulovich@fb.com>2022-04-13 03:44:22 -0700
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>2022-04-13 03:44:22 -0700
commitd82fa634cc397c64247593a7a488f40442c06840 (patch)
tree60a9e6f79b0723635f4de6e00da8a15ad4f858b0 /core
parent07d2ac0e51df170b34cd32e91a5e13a3ed4c66d6 (diff)
downloadktfmt-d82fa634cc397c64247593a7a488f40442c06840.tar.gz
Removing old prints from TokenizerTest
Summary: Small cleanup to make Github actions errors less spammy Reviewed By: cgrushko Differential Revision: D35601992 fbshipit-source-id: 2670fa5e573aa8ef39401c2ab5b244e9cad25cba
Diffstat (limited to 'core')
-rw-r--r--core/src/test/java/com/facebook/ktfmt/format/TokenizerTest.kt13
1 files changed, 0 insertions, 13 deletions
diff --git a/core/src/test/java/com/facebook/ktfmt/format/TokenizerTest.kt b/core/src/test/java/com/facebook/ktfmt/format/TokenizerTest.kt
index bfad5b6..3f8fe71 100644
--- a/core/src/test/java/com/facebook/ktfmt/format/TokenizerTest.kt
+++ b/core/src/test/java/com/facebook/ktfmt/format/TokenizerTest.kt
@@ -16,7 +16,6 @@
package com.facebook.ktfmt.format
-import com.facebook.ktfmt.debughelpers.PrintAstVisitor
import com.google.common.truth.Truth.assertThat
import org.junit.Test
import org.junit.runner.RunWith
@@ -35,8 +34,6 @@ class TokenizerTest {
.joinToString("\n")
val file = Parser.parse(code)
- println("# Parse tree of input: ")
- println("#".repeat(20))
val tokenizer = Tokenizer(code, file)
file.accept(tokenizer)
@@ -63,15 +60,9 @@ class TokenizerTest {
.joinToString("\n")
val file = Parser.parse(code)
- println("# Parse tree of input: ")
- println("#".repeat(20))
- file.accept(PrintAstVisitor())
-
val tokenizer = Tokenizer(code, file)
file.accept(tokenizer)
- print(tokenizer.toks.joinToString(", ") { "\"${it.originalText}\"" })
-
assertThat(tokenizer.toks.map { it.originalText })
.containsExactly(
"val",
@@ -109,13 +100,9 @@ class TokenizerTest {
|""".trimMargin().trimMargin()
val file = Parser.parse(code)
- println("# Parse tree of input: ")
- println("#".repeat(20))
val tokenizer = Tokenizer(code, file)
file.accept(tokenizer)
- print(tokenizer.toks.joinToString(",\n") { "\"${it}\"" })
-
assertThat(tokenizer.toks.map { it.originalText })
.containsExactly("val", " ", "b", "=", "\"a\"", "\n", "val", " ", "a", "=", "5")
.inOrder()