forked from scalameta/scalameta
/
LazyTokenIterator.scala
67 lines (50 loc) · 1.64 KB
/
LazyTokenIterator.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
package scala.meta.internal.parsers
import scala.meta.classifiers._
import scala.meta.tokens.Token
object LazyTokenIterator {
def apply(st: ScannerTokens): LazyTokenIterator = {
val curr = TokenRef(Nil, st.tokens(0), 0, null)
val prev = TokenRef(Nil, null, -1, curr)
new LazyTokenIterator(st, prev, curr)
}
}
private[parsers] class LazyTokenIterator private (
private val scannerTokens: ScannerTokens,
private var prev: TokenRef,
private var curr: TokenRef
) extends TokenIterator {
import scannerTokens._
@inline
private def getNextTokenRef(): TokenRef =
nextToken(curr)
override def next(): Unit = {
prev = curr
// also adds to prev in case we are forked
curr = getNextTokenRef()
}
private def resetCurr(ref: TokenRef): Unit = {
prev.next = ref
curr = ref
}
override def indenting: Boolean = curr.regions match {
case (r: RegionLine) :: rs if curr.token.is[Token.EOL] =>
// empty sepregions means we are at toplevel
rs.headOption.forall(x => x.indent >= 0 && x.indent < r.indent)
case _ => false
}
def previousIndentation: Int = curr.regions match {
case (r: RegionLine) :: _ if !curr.token.is[Token.EOL] => r.indent
case _ :: r :: _ => r.indent
case _ => 0
}
override def prevTokenPos: Int = prev.pointPos
override def prevToken: Token = prev.token
override def tokenPos: Int = curr.pointPos
override def token: Token = curr.token
override def fork: TokenIterator =
new LazyTokenIterator(scannerTokens, prev, curr)
override def peekToken: Token =
getNextTokenRef().token
override def peekIndex: Int =
getNextTokenRef().pointPos
}