2022-12-27 20:16:18 -08:00
|
|
|
use {super::*, CompileErrorKind::*, TokenKind::*};
|
2017-11-18 03:36:02 -08:00
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
/// Just language lexer
|
|
|
|
///
|
2020-02-14 04:49:25 -08:00
|
|
|
/// The lexer proceeds character-by-character, as opposed to using regular
|
|
|
|
/// expressions to lex tokens or semi-tokens at a time. As a result, it is
|
|
|
|
/// verbose and straightforward. Just used to have a regex-based lexer, which
|
|
|
|
/// was slower and generally godawful. However, this should not be taken as a
|
|
|
|
/// slight against regular expressions, the lexer was just idiosyncratically
|
|
|
|
/// bad.
|
2019-11-13 19:32:50 -08:00
|
|
|
pub(crate) struct Lexer<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
/// Char iterator
|
2021-09-16 06:44:40 -07:00
|
|
|
chars: Chars<'src>,
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Indentation stack
|
2021-09-16 06:44:40 -07:00
|
|
|
indentation: Vec<&'src str>,
|
2021-04-04 16:41:02 -07:00
|
|
|
/// Interpolation token start stack
|
|
|
|
interpolation_stack: Vec<Token<'src>>,
|
2023-11-21 20:17:38 -08:00
|
|
|
/// Next character to be lexed
|
|
|
|
next: Option<char>,
|
2020-10-27 23:51:17 -07:00
|
|
|
/// Current open delimiters
|
2021-09-16 06:44:40 -07:00
|
|
|
open_delimiters: Vec<(Delimiter, usize)>,
|
2023-11-21 20:17:38 -08:00
|
|
|
/// Path to source file
|
|
|
|
path: &'src Path,
|
|
|
|
/// Inside recipe body
|
|
|
|
recipe_body: bool,
|
|
|
|
/// Next indent will start a recipe body
|
|
|
|
recipe_body_pending: bool,
|
|
|
|
/// Source text
|
|
|
|
src: &'src str,
|
|
|
|
/// Tokens
|
|
|
|
tokens: Vec<Token<'src>>,
|
|
|
|
/// Current token end
|
|
|
|
token_end: Position,
|
|
|
|
/// Current token start
|
|
|
|
token_start: Position,
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-13 19:32:50 -08:00
|
|
|
impl<'src> Lexer<'src> {
|
2023-11-21 20:17:38 -08:00
|
|
|
/// Lex `src`
|
|
|
|
pub(crate) fn lex(path: &'src Path, src: &'src str) -> CompileResult<'src, Vec<Token<'src>>> {
|
2024-05-14 20:07:41 -07:00
|
|
|
Self::new(path, src).tokenize()
|
2023-11-21 20:17:38 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
pub(crate) fn test_lex(src: &'src str) -> CompileResult<'src, Vec<Token<'src>>> {
|
2024-05-14 20:07:41 -07:00
|
|
|
Self::new("justfile".as_ref(), src).tokenize()
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2023-11-21 20:17:38 -08:00
|
|
|
/// Create a new Lexer to lex `src`
|
2024-05-14 20:07:41 -07:00
|
|
|
fn new(path: &'src Path, src: &'src str) -> Self {
|
2019-11-07 10:55:15 -08:00
|
|
|
let mut chars = src.chars();
|
2019-04-15 22:40:02 -07:00
|
|
|
let next = chars.next();
|
|
|
|
|
|
|
|
let start = Position {
|
|
|
|
offset: 0,
|
2017-11-18 03:36:02 -08:00
|
|
|
column: 0,
|
2021-09-16 06:44:40 -07:00
|
|
|
line: 0,
|
2017-11-18 03:36:02 -08:00
|
|
|
};
|
|
|
|
|
2024-05-14 20:07:41 -07:00
|
|
|
Self {
|
2019-12-11 20:25:16 -08:00
|
|
|
indentation: vec![""],
|
2019-04-15 22:40:02 -07:00
|
|
|
tokens: Vec::new(),
|
|
|
|
token_start: start,
|
|
|
|
token_end: start,
|
2019-12-11 20:25:16 -08:00
|
|
|
recipe_body_pending: false,
|
|
|
|
recipe_body: false,
|
2021-04-04 16:41:02 -07:00
|
|
|
interpolation_stack: Vec::new(),
|
2020-10-27 23:51:17 -07:00
|
|
|
open_delimiters: Vec::new(),
|
2019-04-15 22:40:02 -07:00
|
|
|
chars,
|
|
|
|
next,
|
2019-11-07 10:55:15 -08:00
|
|
|
src,
|
2023-11-21 20:17:38 -08:00
|
|
|
path,
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-14 04:49:25 -08:00
|
|
|
/// Advance over the character in `self.next`, updating `self.token_end`
|
|
|
|
/// accordingly.
|
2023-12-29 12:16:31 -08:00
|
|
|
fn advance(&mut self) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
match self.next {
|
|
|
|
Some(c) => {
|
|
|
|
let len_utf8 = c.len_utf8();
|
|
|
|
|
|
|
|
self.token_end.offset += len_utf8;
|
2020-01-15 02:16:13 -08:00
|
|
|
self.token_end.column += len_utf8;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2020-01-15 02:16:13 -08:00
|
|
|
if c == '\n' {
|
|
|
|
self.token_end.column = 0;
|
|
|
|
self.token_end.line += 1;
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.next = self.chars.next();
|
|
|
|
|
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
None => Err(self.internal_error("Lexer advanced past end of text")),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 19:46:53 -07:00
|
|
|
/// Advance over N characters.
|
2023-12-29 12:16:31 -08:00
|
|
|
fn skip(&mut self, n: usize) -> CompileResult<'src> {
|
2021-03-24 19:46:53 -07:00
|
|
|
for _ in 0..n {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
/// Lexeme of in-progress token
|
2019-11-13 19:32:50 -08:00
|
|
|
fn lexeme(&self) -> &'src str {
|
2019-11-07 10:55:15 -08:00
|
|
|
&self.src[self.token_start.offset..self.token_end.offset]
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2019-04-18 13:12:38 -07:00
|
|
|
/// Length of current token
|
|
|
|
fn current_token_length(&self) -> usize {
|
|
|
|
self.token_end.offset - self.token_start.offset
|
|
|
|
}
|
|
|
|
|
2021-07-26 01:26:06 -07:00
|
|
|
fn accepted(&mut self, c: char) -> CompileResult<'src, bool> {
|
2020-10-26 18:16:42 -07:00
|
|
|
if self.next_is(c) {
|
|
|
|
self.advance()?;
|
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-29 12:16:31 -08:00
|
|
|
fn presume(&mut self, c: char) -> CompileResult<'src> {
|
2020-10-26 18:16:42 -07:00
|
|
|
if !self.next_is(c) {
|
2022-12-15 16:53:21 -08:00
|
|
|
return Err(self.internal_error(format!("Lexer presumed character `{c}`")));
|
2020-10-26 18:16:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.advance()?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-12-29 12:16:31 -08:00
|
|
|
fn presume_str(&mut self, s: &str) -> CompileResult<'src> {
|
2021-04-05 21:28:37 -07:00
|
|
|
for c in s.chars() {
|
|
|
|
self.presume(c)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-18 13:12:38 -07:00
|
|
|
/// Is next character c?
|
|
|
|
fn next_is(&self, c: char) -> bool {
|
|
|
|
self.next == Some(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Is next character ' ' or '\t'?
|
|
|
|
fn next_is_whitespace(&self) -> bool {
|
|
|
|
self.next_is(' ') || self.next_is('\t')
|
|
|
|
}
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
/// Un-lexed text
|
2019-11-13 19:32:50 -08:00
|
|
|
fn rest(&self) -> &'src str {
|
2019-11-07 10:55:15 -08:00
|
|
|
&self.src[self.token_end.offset..]
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Check if unlexed text begins with prefix
|
|
|
|
fn rest_starts_with(&self, prefix: &str) -> bool {
|
|
|
|
self.rest().starts_with(prefix)
|
|
|
|
}
|
|
|
|
|
2019-04-18 13:12:38 -07:00
|
|
|
/// Does rest start with "\n" or "\r\n"?
|
|
|
|
fn at_eol(&self) -> bool {
|
|
|
|
self.next_is('\n') || self.rest_starts_with("\r\n")
|
|
|
|
}
|
|
|
|
|
2021-05-07 00:14:38 -07:00
|
|
|
/// Are we at end-of-file?
|
|
|
|
fn at_eof(&self) -> bool {
|
|
|
|
self.rest().is_empty()
|
|
|
|
}
|
|
|
|
|
2019-04-18 13:12:38 -07:00
|
|
|
/// Are we at end-of-line or end-of-file?
|
|
|
|
fn at_eol_or_eof(&self) -> bool {
|
2021-05-07 00:14:38 -07:00
|
|
|
self.at_eol() || self.at_eof()
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Get current indentation
|
|
|
|
fn indentation(&self) -> &'src str {
|
2022-01-30 12:16:10 -08:00
|
|
|
self.indentation.last().unwrap()
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Are we currently indented
|
|
|
|
fn indented(&self) -> bool {
|
|
|
|
!self.indentation().is_empty()
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2020-02-14 04:49:25 -08:00
|
|
|
/// Create a new token with `kind` whose lexeme is between `self.token_start`
|
|
|
|
/// and `self.token_end`
|
2019-04-15 22:40:02 -07:00
|
|
|
fn token(&mut self, kind: TokenKind) {
|
|
|
|
self.tokens.push(Token {
|
|
|
|
offset: self.token_start.offset,
|
|
|
|
column: self.token_start.column,
|
|
|
|
line: self.token_start.line,
|
2019-11-07 10:55:15 -08:00
|
|
|
src: self.src,
|
2019-04-15 22:40:02 -07:00
|
|
|
length: self.token_end.offset - self.token_start.offset,
|
|
|
|
kind,
|
2023-11-21 20:17:38 -08:00
|
|
|
path: self.path,
|
2019-04-15 22:40:02 -07:00
|
|
|
});
|
|
|
|
|
|
|
|
// Set `token_start` to point after the lexed token
|
|
|
|
self.token_start = self.token_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create an internal error with `message`
|
2021-07-26 01:26:06 -07:00
|
|
|
fn internal_error(&self, message: impl Into<String>) -> CompileError<'src> {
|
2019-11-13 19:53:14 -08:00
|
|
|
// Use `self.token_end` as the location of the error
|
2019-11-13 19:32:50 -08:00
|
|
|
let token = Token {
|
2021-09-16 06:44:40 -07:00
|
|
|
src: self.src,
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: self.token_end.offset,
|
2021-09-16 06:44:40 -07:00
|
|
|
line: self.token_end.line,
|
2019-04-15 22:40:02 -07:00
|
|
|
column: self.token_end.column,
|
2019-11-13 19:32:50 -08:00
|
|
|
length: 0,
|
2021-09-16 06:44:40 -07:00
|
|
|
kind: Unspecified,
|
2023-11-21 20:17:38 -08:00
|
|
|
path: self.path,
|
2019-11-13 19:32:50 -08:00
|
|
|
};
|
2022-09-11 01:41:24 -07:00
|
|
|
CompileError::new(
|
|
|
|
token,
|
2023-06-12 09:53:55 -07:00
|
|
|
Internal {
|
2019-04-15 22:40:02 -07:00
|
|
|
message: message.into(),
|
|
|
|
},
|
2022-09-11 01:41:24 -07:00
|
|
|
)
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a compilation error with `kind`
|
2021-07-26 01:26:06 -07:00
|
|
|
fn error(&self, kind: CompileErrorKind<'src>) -> CompileError<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
// Use the in-progress token span as the location of the error.
|
|
|
|
|
2020-02-14 04:49:25 -08:00
|
|
|
// The width of the error site to highlight depends on the kind of error:
|
2019-11-13 19:32:50 -08:00
|
|
|
let length = match kind {
|
2021-04-05 21:28:37 -07:00
|
|
|
UnterminatedString | UnterminatedBacktick => {
|
2024-05-18 16:12:11 -07:00
|
|
|
let Some(kind) = StringKind::from_token_start(self.lexeme()) else {
|
|
|
|
return self.internal_error("Lexer::error: expected string or backtick token start");
|
2021-04-05 21:28:37 -07:00
|
|
|
};
|
|
|
|
kind.delimiter().len()
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
// highlight the full token
|
|
|
|
_ => self.lexeme().len(),
|
|
|
|
};
|
|
|
|
|
2019-11-13 19:32:50 -08:00
|
|
|
let token = Token {
|
|
|
|
kind: Unspecified,
|
2019-11-07 10:55:15 -08:00
|
|
|
src: self.src,
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: self.token_start.offset,
|
|
|
|
line: self.token_start.line,
|
|
|
|
column: self.token_start.column,
|
2019-11-13 19:32:50 -08:00
|
|
|
length,
|
2023-11-21 20:17:38 -08:00
|
|
|
path: self.path,
|
2019-11-13 19:32:50 -08:00
|
|
|
};
|
|
|
|
|
2022-09-11 01:41:24 -07:00
|
|
|
CompileError::new(token, kind)
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2021-07-26 01:26:06 -07:00
|
|
|
fn unterminated_interpolation_error(interpolation_start: Token<'src>) -> CompileError<'src> {
|
2022-09-11 01:41:24 -07:00
|
|
|
CompileError::new(interpolation_start, UnterminatedInterpolation)
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
/// True if `text` could be an identifier
|
|
|
|
pub(crate) fn is_identifier(text: &str) -> bool {
|
2021-09-16 07:51:45 -07:00
|
|
|
if !text.chars().next().map_or(false, Self::is_identifier_start) {
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for c in text.chars().skip(1) {
|
|
|
|
if !Self::is_identifier_continue(c) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if `c` can be the first character of an identifier
|
2024-05-29 01:08:29 -07:00
|
|
|
pub(crate) fn is_identifier_start(c: char) -> bool {
|
2020-09-17 17:59:46 -07:00
|
|
|
matches!(c, 'a'..='z' | 'A'..='Z' | '_')
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
}
|
|
|
|
|
2022-12-30 12:36:08 -08:00
|
|
|
/// True if `c` can be a continuation character of an identifier
|
2024-05-29 01:08:29 -07:00
|
|
|
pub(crate) fn is_identifier_continue(c: char) -> bool {
|
2024-05-14 20:07:41 -07:00
|
|
|
Self::is_identifier_start(c) || matches!(c, '0'..='9' | '-')
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
/// Consume the text and produce a series of tokens
|
2021-07-26 01:26:06 -07:00
|
|
|
fn tokenize(mut self) -> CompileResult<'src, Vec<Token<'src>>> {
|
2019-04-15 22:40:02 -07:00
|
|
|
loop {
|
|
|
|
if self.token_start.column == 0 {
|
|
|
|
self.lex_line_start()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
match self.next {
|
2019-12-11 20:25:16 -08:00
|
|
|
Some(first) => {
|
2021-04-04 16:41:02 -07:00
|
|
|
if let Some(&interpolation_start) = self.interpolation_stack.last() {
|
2021-05-07 00:14:38 -07:00
|
|
|
self.lex_interpolation(interpolation_start, first)?;
|
2019-12-11 20:25:16 -08:00
|
|
|
} else if self.recipe_body {
|
2021-05-07 00:14:38 -07:00
|
|
|
self.lex_body()?;
|
2019-12-11 20:25:16 -08:00
|
|
|
} else {
|
2021-05-07 00:14:38 -07:00
|
|
|
self.lex_normal(first)?;
|
2019-12-11 20:25:16 -08:00
|
|
|
};
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
None => break,
|
|
|
|
}
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2021-04-04 16:41:02 -07:00
|
|
|
if let Some(&interpolation_start) = self.interpolation_stack.last() {
|
2020-01-15 02:16:13 -08:00
|
|
|
return Err(Self::unterminated_interpolation_error(interpolation_start));
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
while self.indented() {
|
|
|
|
self.lex_dedent();
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.token(Eof);
|
|
|
|
|
2019-12-10 13:24:30 -08:00
|
|
|
assert_eq!(self.token_start.offset, self.token_end.offset);
|
|
|
|
assert_eq!(self.token_start.offset, self.src.len());
|
2019-12-11 20:25:16 -08:00
|
|
|
assert_eq!(self.indentation.len(), 1);
|
2019-12-10 13:24:30 -08:00
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
Ok(self.tokens)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Handle blank lines and indentation
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_line_start(&mut self) -> CompileResult<'src> {
|
2019-12-11 20:25:16 -08:00
|
|
|
enum Indentation<'src> {
|
|
|
|
// Line only contains whitespace
|
|
|
|
Blank,
|
|
|
|
// Indentation continues
|
|
|
|
Continue,
|
|
|
|
// Indentation decreases
|
|
|
|
Decrease,
|
|
|
|
// Indentation isn't consistent
|
|
|
|
Inconsistent,
|
|
|
|
// Indentation increases
|
|
|
|
Increase,
|
|
|
|
// Indentation mixes spaces and tabs
|
|
|
|
Mixed { whitespace: &'src str },
|
|
|
|
}
|
|
|
|
|
|
|
|
use Indentation::*;
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
let nonblank_index = self
|
|
|
|
.rest()
|
|
|
|
.char_indices()
|
|
|
|
.skip_while(|&(_, c)| c == ' ' || c == '\t')
|
|
|
|
.map(|(i, _)| i)
|
|
|
|
.next()
|
|
|
|
.unwrap_or_else(|| self.rest().len());
|
|
|
|
|
|
|
|
let rest = &self.rest()[nonblank_index..];
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
let whitespace = &self.rest()[..nonblank_index];
|
|
|
|
|
|
|
|
let body_whitespace = &whitespace[..whitespace
|
|
|
|
.char_indices()
|
|
|
|
.take(self.indentation().chars().count())
|
|
|
|
.map(|(i, _c)| i)
|
|
|
|
.next()
|
|
|
|
.unwrap_or(0)];
|
|
|
|
|
|
|
|
let spaces = whitespace.chars().any(|c| c == ' ');
|
|
|
|
let tabs = whitespace.chars().any(|c| c == '\t');
|
|
|
|
|
|
|
|
let body_spaces = body_whitespace.chars().any(|c| c == ' ');
|
|
|
|
let body_tabs = body_whitespace.chars().any(|c| c == '\t');
|
|
|
|
|
|
|
|
#[allow(clippy::if_same_then_else)]
|
|
|
|
let indentation = if rest.starts_with('\n') || rest.starts_with("\r\n") || rest.is_empty() {
|
|
|
|
Blank
|
|
|
|
} else if whitespace == self.indentation() {
|
|
|
|
Continue
|
|
|
|
} else if self.indentation.contains(&whitespace) {
|
|
|
|
Decrease
|
|
|
|
} else if self.recipe_body && whitespace.starts_with(self.indentation()) {
|
|
|
|
Continue
|
|
|
|
} else if self.recipe_body && body_spaces && body_tabs {
|
|
|
|
Mixed {
|
|
|
|
whitespace: body_whitespace,
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
} else if !self.recipe_body && spaces && tabs {
|
|
|
|
Mixed { whitespace }
|
|
|
|
} else if whitespace.len() < self.indentation().len() {
|
|
|
|
Inconsistent
|
|
|
|
} else if self.recipe_body
|
|
|
|
&& body_whitespace.len() >= self.indentation().len()
|
|
|
|
&& !body_whitespace.starts_with(self.indentation())
|
|
|
|
{
|
|
|
|
Inconsistent
|
|
|
|
} else if whitespace.len() >= self.indentation().len()
|
|
|
|
&& !whitespace.starts_with(self.indentation())
|
|
|
|
{
|
|
|
|
Inconsistent
|
|
|
|
} else {
|
|
|
|
Increase
|
|
|
|
};
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
match indentation {
|
|
|
|
Blank => {
|
|
|
|
if !whitespace.is_empty() {
|
|
|
|
while self.next_is_whitespace() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
self.token(Whitespace);
|
|
|
|
};
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
Continue => {
|
|
|
|
if !self.indentation().is_empty() {
|
|
|
|
for _ in self.indentation().chars() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
self.token(Whitespace);
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
Decrease => {
|
|
|
|
while self.indentation() != whitespace {
|
|
|
|
self.lex_dedent();
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
if !whitespace.is_empty() {
|
|
|
|
while self.next_is_whitespace() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.token(Whitespace);
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
Mixed { whitespace } => {
|
|
|
|
for _ in whitespace.chars() {
|
2019-04-18 13:12:38 -07:00
|
|
|
self.advance()?;
|
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
|
|
|
|
Err(self.error(MixedLeadingWhitespace { whitespace }))
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
Inconsistent => {
|
|
|
|
for _ in whitespace.chars() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
Err(self.error(InconsistentLeadingWhitespace {
|
|
|
|
expected: self.indentation(),
|
2021-09-16 06:44:40 -07:00
|
|
|
found: whitespace,
|
2019-12-11 20:25:16 -08:00
|
|
|
}))
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
Increase => {
|
|
|
|
while self.next_is_whitespace() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2020-10-27 23:51:17 -07:00
|
|
|
if self.open_delimiters() {
|
|
|
|
self.token(Whitespace);
|
|
|
|
} else {
|
|
|
|
let indentation = self.lexeme();
|
|
|
|
self.indentation.push(indentation);
|
|
|
|
self.token(Indent);
|
|
|
|
if self.recipe_body_pending {
|
|
|
|
self.recipe_body = true;
|
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Lex token beginning with `start` outside of a recipe body
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_normal(&mut self, start: char) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
match start {
|
2021-09-16 16:45:56 -07:00
|
|
|
' ' | '\t' => self.lex_whitespace(),
|
2023-12-24 09:14:17 -08:00
|
|
|
'!' if self.rest().starts_with("!include") => Err(self.error(Include)),
|
2023-12-19 20:31:51 -08:00
|
|
|
'!' => self.lex_digraph('!', '=', BangEquals),
|
2021-09-16 16:45:56 -07:00
|
|
|
'#' => self.lex_comment(),
|
2021-03-25 18:35:24 -07:00
|
|
|
'$' => self.lex_single(Dollar),
|
2021-09-16 16:45:56 -07:00
|
|
|
'&' => self.lex_digraph('&', '&', AmpersandAmpersand),
|
|
|
|
'(' => self.lex_delimiter(ParenL),
|
|
|
|
')' => self.lex_delimiter(ParenR),
|
|
|
|
'*' => self.lex_single(Asterisk),
|
|
|
|
'+' => self.lex_single(Plus),
|
|
|
|
',' => self.lex_single(Comma),
|
2022-06-25 02:39:06 -07:00
|
|
|
'/' => self.lex_single(Slash),
|
2021-09-16 16:45:56 -07:00
|
|
|
':' => self.lex_colon(),
|
|
|
|
'=' => self.lex_choices('=', &[('=', EqualsEquals), ('~', EqualsTilde)], Equals),
|
2023-12-29 12:16:31 -08:00
|
|
|
'?' => self.lex_single(QuestionMark),
|
2019-04-15 22:40:02 -07:00
|
|
|
'@' => self.lex_single(At),
|
2020-10-27 23:51:17 -07:00
|
|
|
'[' => self.lex_delimiter(BracketL),
|
2023-12-29 12:16:31 -08:00
|
|
|
'\\' => self.lex_escape(),
|
2021-09-16 16:45:56 -07:00
|
|
|
'\n' | '\r' => self.lex_eol(),
|
2021-11-04 21:35:57 -07:00
|
|
|
'\u{feff}' => self.lex_single(ByteOrderMark),
|
2020-10-27 23:51:17 -07:00
|
|
|
']' => self.lex_delimiter(BracketR),
|
2021-09-16 16:45:56 -07:00
|
|
|
'`' | '"' | '\'' => self.lex_string(),
|
2020-10-27 23:51:17 -07:00
|
|
|
'{' => self.lex_delimiter(BraceL),
|
|
|
|
'}' => self.lex_delimiter(BraceR),
|
2020-10-26 18:16:42 -07:00
|
|
|
_ if Self::is_identifier_start(start) => self.lex_identifier(),
|
|
|
|
_ => {
|
|
|
|
self.advance()?;
|
|
|
|
Err(self.error(UnknownStartOfToken))
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Lex token beginning with `start` inside an interpolation
|
2019-04-15 22:40:02 -07:00
|
|
|
fn lex_interpolation(
|
|
|
|
&mut self,
|
2019-11-13 19:32:50 -08:00
|
|
|
interpolation_start: Token<'src>,
|
2019-04-15 22:40:02 -07:00
|
|
|
start: char,
|
2023-12-29 12:16:31 -08:00
|
|
|
) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
if self.rest_starts_with("}}") {
|
2019-12-11 20:25:16 -08:00
|
|
|
// end current interpolation
|
2021-04-04 16:41:02 -07:00
|
|
|
if self.interpolation_stack.pop().is_none() {
|
|
|
|
self.advance()?;
|
|
|
|
self.advance()?;
|
|
|
|
return Err(self.internal_error(
|
|
|
|
"Lexer::lex_interpolation found `}}` but was called with empty interpolation stack.",
|
|
|
|
));
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
// Emit interpolation end token
|
|
|
|
self.lex_double(InterpolationEnd)
|
2019-04-18 13:12:38 -07:00
|
|
|
} else if self.at_eol_or_eof() {
|
2020-02-10 20:07:06 -08:00
|
|
|
// Return unterminated interpolation error that highlights the opening
|
|
|
|
// {{
|
2020-01-15 02:16:13 -08:00
|
|
|
Err(Self::unterminated_interpolation_error(interpolation_start))
|
2019-04-15 22:40:02 -07:00
|
|
|
} else {
|
2019-12-11 20:25:16 -08:00
|
|
|
// Otherwise lex as per normal
|
2019-04-15 22:40:02 -07:00
|
|
|
self.lex_normal(start)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
/// Lex token while in recipe body
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_body(&mut self) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
enum Terminator {
|
|
|
|
Newline,
|
|
|
|
NewlineCarriageReturn,
|
|
|
|
Interpolation,
|
|
|
|
EndOfFile,
|
|
|
|
}
|
|
|
|
|
|
|
|
use Terminator::*;
|
|
|
|
|
|
|
|
let terminator = loop {
|
2021-03-24 19:46:53 -07:00
|
|
|
if self.rest_starts_with("{{{{") {
|
|
|
|
self.skip(4)?;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-04-04 16:41:02 -07:00
|
|
|
if self.rest_starts_with("\n") {
|
2019-04-15 22:40:02 -07:00
|
|
|
break Newline;
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.rest_starts_with("\r\n") {
|
|
|
|
break NewlineCarriageReturn;
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.rest_starts_with("{{") {
|
|
|
|
break Interpolation;
|
|
|
|
}
|
|
|
|
|
2021-05-07 00:14:38 -07:00
|
|
|
if self.at_eof() {
|
2019-04-15 22:40:02 -07:00
|
|
|
break EndOfFile;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.advance()?;
|
|
|
|
};
|
|
|
|
|
|
|
|
// emit text token containing text so far
|
|
|
|
if self.current_token_length() > 0 {
|
|
|
|
self.token(Text);
|
|
|
|
}
|
|
|
|
|
|
|
|
match terminator {
|
2019-12-11 20:25:16 -08:00
|
|
|
Newline => self.lex_single(Eol),
|
|
|
|
NewlineCarriageReturn => self.lex_double(Eol),
|
2019-04-15 22:40:02 -07:00
|
|
|
Interpolation => {
|
2019-11-13 19:32:50 -08:00
|
|
|
self.lex_double(InterpolationStart)?;
|
2021-04-04 16:41:02 -07:00
|
|
|
self
|
|
|
|
.interpolation_stack
|
|
|
|
.push(self.tokens[self.tokens.len() - 1]);
|
2019-11-13 19:32:50 -08:00
|
|
|
Ok(())
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-12-11 20:25:16 -08:00
|
|
|
EndOfFile => Ok(()),
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
fn lex_dedent(&mut self) {
|
|
|
|
assert_eq!(self.current_token_length(), 0);
|
|
|
|
self.token(Dedent);
|
|
|
|
self.indentation.pop();
|
|
|
|
self.recipe_body_pending = false;
|
|
|
|
self.recipe_body = false;
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
/// Lex a single-character token
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_single(&mut self, kind: TokenKind) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
self.advance()?;
|
|
|
|
self.token(kind);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
/// Lex a double-character token
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_double(&mut self, kind: TokenKind) -> CompileResult<'src> {
|
2019-04-15 22:40:02 -07:00
|
|
|
self.advance()?;
|
|
|
|
self.advance()?;
|
|
|
|
self.token(kind);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
/// Lex a double-character token of kind `then` if the second character of
|
|
|
|
/// that token would be `second`, otherwise lex a single-character token of
|
|
|
|
/// kind `otherwise`
|
2021-09-16 16:45:56 -07:00
|
|
|
fn lex_choices(
|
2020-10-26 18:16:42 -07:00
|
|
|
&mut self,
|
2021-09-16 16:45:56 -07:00
|
|
|
first: char,
|
|
|
|
choices: &[(char, TokenKind)],
|
2020-10-26 18:16:42 -07:00
|
|
|
otherwise: TokenKind,
|
2023-12-29 12:16:31 -08:00
|
|
|
) -> CompileResult<'src> {
|
2021-09-16 16:45:56 -07:00
|
|
|
self.presume(first)?;
|
2019-04-18 11:48:02 -07:00
|
|
|
|
2021-09-16 16:45:56 -07:00
|
|
|
for (second, then) in choices {
|
|
|
|
if self.accepted(*second)? {
|
|
|
|
self.token(*then);
|
|
|
|
return Ok(());
|
|
|
|
}
|
2019-04-18 11:48:02 -07:00
|
|
|
}
|
|
|
|
|
2021-09-16 16:45:56 -07:00
|
|
|
self.token(otherwise);
|
|
|
|
|
2019-04-18 11:48:02 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-27 23:51:17 -07:00
|
|
|
/// Lex an opening or closing delimiter
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_delimiter(&mut self, kind: TokenKind) -> CompileResult<'src> {
|
2020-10-27 23:51:17 -07:00
|
|
|
use Delimiter::*;
|
|
|
|
|
|
|
|
match kind {
|
|
|
|
BraceL => self.open_delimiter(Brace),
|
|
|
|
BraceR => self.close_delimiter(Brace)?,
|
|
|
|
BracketL => self.open_delimiter(Bracket),
|
|
|
|
BracketR => self.close_delimiter(Bracket)?,
|
|
|
|
ParenL => self.open_delimiter(Paren),
|
|
|
|
ParenR => self.close_delimiter(Paren)?,
|
2021-09-16 06:44:40 -07:00
|
|
|
_ => {
|
2020-10-27 23:51:17 -07:00
|
|
|
return Err(self.internal_error(format!(
|
2023-01-26 18:49:03 -08:00
|
|
|
"Lexer::lex_delimiter called with non-delimiter token: `{kind}`",
|
2021-09-16 06:44:40 -07:00
|
|
|
)))
|
|
|
|
}
|
2020-10-27 23:51:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the delimiter token
|
|
|
|
self.lex_single(kind)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Push a delimiter onto the open delimiter stack
|
|
|
|
fn open_delimiter(&mut self, delimiter: Delimiter) {
|
|
|
|
self
|
|
|
|
.open_delimiters
|
|
|
|
.push((delimiter, self.token_start.line));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Pop a delimiter from the open delimiter stack and error if incorrect type
|
2023-12-29 12:16:31 -08:00
|
|
|
fn close_delimiter(&mut self, close: Delimiter) -> CompileResult<'src> {
|
2020-10-27 23:51:17 -07:00
|
|
|
match self.open_delimiters.pop() {
|
|
|
|
Some((open, _)) if open == close => Ok(()),
|
|
|
|
Some((open, open_line)) => Err(self.error(MismatchedClosingDelimiter {
|
|
|
|
open,
|
|
|
|
close,
|
|
|
|
open_line,
|
|
|
|
})),
|
|
|
|
None => Err(self.error(UnexpectedClosingDelimiter { close })),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if there are any unclosed delimiters
|
|
|
|
fn open_delimiters(&self) -> bool {
|
|
|
|
!self.open_delimiters.is_empty()
|
|
|
|
}
|
|
|
|
|
2021-07-22 00:20:25 -07:00
|
|
|
/// Lex a two-character digraph
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_digraph(&mut self, left: char, right: char, token: TokenKind) -> CompileResult<'src> {
|
2021-07-22 00:20:25 -07:00
|
|
|
self.presume(left)?;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2021-07-22 00:20:25 -07:00
|
|
|
if self.accepted(right)? {
|
|
|
|
self.token(token);
|
2020-10-26 18:16:42 -07:00
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
// Emit an unspecified token to consume the current character,
|
|
|
|
self.token(Unspecified);
|
2021-05-07 00:14:38 -07:00
|
|
|
|
|
|
|
if self.at_eof() {
|
2021-07-22 00:20:25 -07:00
|
|
|
return Err(self.error(UnexpectedEndOfToken { expected: right }));
|
2021-05-07 00:14:38 -07:00
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
// …and advance past another character,
|
|
|
|
self.advance()?;
|
2023-12-19 20:31:51 -08:00
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
// …so that the error we produce highlights the unexpected character.
|
2021-07-22 00:20:25 -07:00
|
|
|
Err(self.error(UnexpectedCharacter { expected: right }))
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
/// Lex a token starting with ':'
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_colon(&mut self) -> CompileResult<'src> {
|
2020-10-26 18:16:42 -07:00
|
|
|
self.presume(':')?;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
if self.accepted('=')? {
|
|
|
|
self.token(ColonEquals);
|
|
|
|
} else {
|
|
|
|
self.token(Colon);
|
|
|
|
self.recipe_body_pending = true;
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
2023-07-25 02:05:47 -07:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex an token starting with '\' escape
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_escape(&mut self) -> CompileResult<'src> {
|
2023-07-25 02:05:47 -07:00
|
|
|
self.presume('\\')?;
|
|
|
|
|
|
|
|
// Treat newline escaped with \ as whitespace
|
|
|
|
if self.accepted('\n')? {
|
|
|
|
while self.next_is_whitespace() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
|
|
|
self.token(Whitespace);
|
|
|
|
} else if self.accepted('\r')? {
|
|
|
|
if !self.accepted('\n')? {
|
|
|
|
return Err(self.error(UnpairedCarriageReturn));
|
|
|
|
}
|
|
|
|
while self.next_is_whitespace() {
|
|
|
|
self.advance()?;
|
|
|
|
}
|
|
|
|
self.token(Whitespace);
|
|
|
|
} else if let Some(character) = self.next {
|
2023-10-16 20:07:09 -07:00
|
|
|
return Err(self.error(InvalidEscapeSequence { character }));
|
2023-07-25 02:05:47 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
Ok(())
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex a carriage return and line feed
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_eol(&mut self) -> CompileResult<'src> {
|
2020-10-27 23:51:17 -07:00
|
|
|
if self.accepted('\r')? {
|
|
|
|
if !self.accepted('\n')? {
|
|
|
|
return Err(self.error(UnpairedCarriageReturn));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.presume('\n')?;
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2020-10-27 23:51:17 -07:00
|
|
|
// Emit an eol if there are no open delimiters, otherwise emit a whitespace
|
|
|
|
// token.
|
|
|
|
if self.open_delimiters() {
|
|
|
|
self.token(Whitespace);
|
|
|
|
} else {
|
|
|
|
self.token(Eol);
|
|
|
|
}
|
2020-10-26 18:16:42 -07:00
|
|
|
|
|
|
|
Ok(())
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
/// Lex name: [a-zA-Z_][a-zA-Z0-9_]*
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_identifier(&mut self) -> CompileResult<'src> {
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
self.advance()?;
|
|
|
|
|
|
|
|
while let Some(c) = self.next {
|
|
|
|
if !Self::is_identifier_continue(c) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
self.advance()?;
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
self.token(Identifier);
|
2019-04-15 22:40:02 -07:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex comment: #[^\r\n]
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_comment(&mut self) -> CompileResult<'src> {
|
2020-10-26 18:16:42 -07:00
|
|
|
self.presume('#')?;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-04-18 13:12:38 -07:00
|
|
|
while !self.at_eol_or_eof() {
|
2019-04-15 22:40:02 -07:00
|
|
|
self.advance()?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.token(Comment);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lex whitespace: [ \t]+
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_whitespace(&mut self) -> CompileResult<'src> {
|
2019-04-18 13:12:38 -07:00
|
|
|
while self.next_is_whitespace() {
|
2021-05-07 00:14:38 -07:00
|
|
|
self.advance()?;
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.token(Whitespace);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-04-04 16:41:02 -07:00
|
|
|
/// Lex a backtick, cooked string, or raw string.
|
|
|
|
///
|
2023-06-12 09:53:55 -07:00
|
|
|
/// Backtick: ``[^`]*``
|
2021-04-04 16:41:02 -07:00
|
|
|
/// Cooked string: "[^"]*" # also processes escape sequences
|
|
|
|
/// Raw string: '[^']*'
|
2023-12-29 12:16:31 -08:00
|
|
|
fn lex_string(&mut self) -> CompileResult<'src> {
|
2024-05-18 16:12:11 -07:00
|
|
|
let Some(kind) = StringKind::from_token_start(self.rest()) else {
|
2021-04-05 21:28:37 -07:00
|
|
|
self.advance()?;
|
|
|
|
return Err(self.internal_error("Lexer::lex_string: invalid string start"));
|
|
|
|
};
|
|
|
|
|
|
|
|
self.presume_str(kind.delimiter())?;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
|
|
|
let mut escape = false;
|
|
|
|
|
|
|
|
loop {
|
2022-09-11 01:41:24 -07:00
|
|
|
if self.next.is_none() {
|
2021-04-05 21:28:37 -07:00
|
|
|
return Err(self.error(kind.unterminated_error_kind()));
|
|
|
|
} else if kind.processes_escape_sequences() && self.next_is('\\') && !escape {
|
|
|
|
escape = true;
|
|
|
|
} else if self.rest_starts_with(kind.delimiter()) && !escape {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
escape = false;
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 22:40:02 -07:00
|
|
|
self.advance()?;
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2021-04-05 21:28:37 -07:00
|
|
|
self.presume_str(kind.delimiter())?;
|
2021-04-04 16:41:02 -07:00
|
|
|
self.token(kind.token_kind());
|
2019-04-15 22:40:02 -07:00
|
|
|
|
|
|
|
Ok(())
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
2019-04-15 22:40:02 -07:00
|
|
|
mod tests {
|
2017-11-18 03:36:02 -08:00
|
|
|
use super::*;
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
use pretty_assertions::assert_eq;
|
2019-04-19 02:17:43 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
macro_rules! test {
|
|
|
|
{
|
2021-04-05 21:28:37 -07:00
|
|
|
name: $name:ident,
|
|
|
|
text: $text:expr,
|
|
|
|
tokens: ($($kind:ident $(: $lexeme:literal)?),* $(,)?)$(,)?
|
2019-10-17 20:04:54 -07:00
|
|
|
} => {
|
2017-11-18 03:36:02 -08:00
|
|
|
#[test]
|
|
|
|
fn $name() {
|
2019-10-17 20:04:54 -07:00
|
|
|
let kinds: &[TokenKind] = &[$($kind,)* Eof];
|
2017-11-18 03:36:02 -08:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
let lexemes: &[&str] = &[$(lexeme!($kind $(, $lexeme)?),)* ""];
|
2017-11-18 03:36:02 -08:00
|
|
|
|
2021-04-05 21:28:37 -07:00
|
|
|
test($text, true, kinds, lexemes);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
{
|
|
|
|
name: $name:ident,
|
|
|
|
text: $text:expr,
|
|
|
|
tokens: ($($kind:ident $(: $lexeme:literal)?),* $(,)?)$(,)?
|
|
|
|
unindent: $unindent:expr,
|
|
|
|
} => {
|
|
|
|
#[test]
|
|
|
|
fn $name() {
|
|
|
|
let kinds: &[TokenKind] = &[$($kind,)* Eof];
|
|
|
|
|
|
|
|
let lexemes: &[&str] = &[$(lexeme!($kind $(, $lexeme)?),)* ""];
|
|
|
|
|
|
|
|
test($text, $unindent, kinds, lexemes);
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
macro_rules! lexeme {
|
|
|
|
{
|
|
|
|
$kind:ident, $lexeme:literal
|
|
|
|
} => {
|
|
|
|
$lexeme
|
|
|
|
};
|
|
|
|
{
|
|
|
|
$kind:ident
|
|
|
|
} => {
|
|
|
|
default_lexeme($kind)
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
}
|
|
|
|
|
2021-04-05 21:28:37 -07:00
|
|
|
fn test(text: &str, unindent_text: bool, want_kinds: &[TokenKind], want_lexemes: &[&str]) {
|
|
|
|
let text = if unindent_text {
|
|
|
|
unindent(text)
|
|
|
|
} else {
|
|
|
|
text.to_owned()
|
|
|
|
};
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2023-11-21 20:17:38 -08:00
|
|
|
let have = Lexer::test_lex(&text).unwrap();
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
let have_kinds = have
|
|
|
|
.iter()
|
|
|
|
.map(|token| token.kind)
|
|
|
|
.collect::<Vec<TokenKind>>();
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2021-05-07 00:14:38 -07:00
|
|
|
let have_lexemes = have.iter().map(Token::lexeme).collect::<Vec<&str>>();
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
assert_eq!(have_kinds, want_kinds, "Token kind mismatch");
|
|
|
|
assert_eq!(have_lexemes, want_lexemes, "Token lexeme mismatch");
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
let mut roundtrip = String::new();
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
for lexeme in have_lexemes {
|
|
|
|
roundtrip.push_str(lexeme);
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
assert_eq!(roundtrip, text, "Roundtrip mismatch");
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
let mut offset = 0;
|
|
|
|
let mut line = 0;
|
|
|
|
let mut column = 0;
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
for token in have {
|
|
|
|
assert_eq!(token.offset, offset);
|
|
|
|
assert_eq!(token.line, line);
|
|
|
|
assert_eq!(token.lexeme().len(), token.length);
|
|
|
|
assert_eq!(token.column, column);
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
for c in token.lexeme().chars() {
|
|
|
|
if c == '\n' {
|
|
|
|
line += 1;
|
|
|
|
column = 0;
|
|
|
|
} else {
|
|
|
|
column += c.len_utf8();
|
|
|
|
}
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
offset += token.length;
|
|
|
|
}
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
fn default_lexeme(kind: TokenKind) -> &'static str {
|
|
|
|
match kind {
|
|
|
|
// Fixed lexemes
|
2021-07-22 00:20:25 -07:00
|
|
|
AmpersandAmpersand => "&&",
|
2020-06-13 01:49:13 -07:00
|
|
|
Asterisk => "*",
|
2019-10-17 20:04:54 -07:00
|
|
|
At => "@",
|
2020-10-26 18:16:42 -07:00
|
|
|
BangEquals => "!=",
|
|
|
|
BraceL => "{",
|
|
|
|
BraceR => "}",
|
2019-11-10 23:17:47 -08:00
|
|
|
BracketL => "[",
|
|
|
|
BracketR => "]",
|
2021-11-04 21:35:57 -07:00
|
|
|
ByteOrderMark => "\u{feff}",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon => ":",
|
|
|
|
ColonEquals => ":=",
|
|
|
|
Comma => ",",
|
2021-03-25 18:35:24 -07:00
|
|
|
Dollar => "$",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol => "\n",
|
|
|
|
Equals => "=",
|
2020-10-26 18:16:42 -07:00
|
|
|
EqualsEquals => "==",
|
2021-09-16 16:45:56 -07:00
|
|
|
EqualsTilde => "=~",
|
2019-10-17 20:04:54 -07:00
|
|
|
Indent => " ",
|
|
|
|
InterpolationEnd => "}}",
|
|
|
|
InterpolationStart => "{{",
|
|
|
|
ParenL => "(",
|
|
|
|
ParenR => ")",
|
|
|
|
Plus => "+",
|
2023-12-29 12:16:31 -08:00
|
|
|
QuestionMark => "?",
|
2022-06-25 02:39:06 -07:00
|
|
|
Slash => "/",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace => " ",
|
|
|
|
|
|
|
|
// Empty lexemes
|
2019-11-07 10:55:15 -08:00
|
|
|
Dedent | Eof => "",
|
2019-10-17 20:04:54 -07:00
|
|
|
|
|
|
|
// Variable lexemes
|
2021-09-16 06:44:40 -07:00
|
|
|
Text | StringToken | Backtick | Identifier | Comment | Unspecified => {
|
2022-12-15 16:53:21 -08:00
|
|
|
panic!("Token {kind:?} has no default lexeme")
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
macro_rules! error {
|
|
|
|
(
|
|
|
|
name: $name:ident,
|
|
|
|
input: $input:expr,
|
|
|
|
offset: $offset:expr,
|
|
|
|
line: $line:expr,
|
|
|
|
column: $column:expr,
|
|
|
|
width: $width:expr,
|
|
|
|
kind: $kind:expr,
|
|
|
|
) => {
|
|
|
|
#[test]
|
|
|
|
fn $name() {
|
|
|
|
error($input, $offset, $line, $column, $width, $kind);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
fn error(
|
|
|
|
src: &str,
|
|
|
|
offset: usize,
|
|
|
|
line: usize,
|
|
|
|
column: usize,
|
2019-11-13 19:32:50 -08:00
|
|
|
length: usize,
|
2021-07-26 01:26:06 -07:00
|
|
|
kind: CompileErrorKind,
|
2019-11-07 10:55:15 -08:00
|
|
|
) {
|
2023-11-21 20:17:38 -08:00
|
|
|
match Lexer::test_lex(src) {
|
2019-11-13 19:32:50 -08:00
|
|
|
Ok(_) => panic!("Lexing succeeded but expected"),
|
|
|
|
Err(have) => {
|
2021-07-26 01:26:06 -07:00
|
|
|
let want = CompileError {
|
2019-11-13 19:32:50 -08:00
|
|
|
token: Token {
|
|
|
|
kind: have.token.kind,
|
|
|
|
src,
|
|
|
|
offset,
|
|
|
|
line,
|
|
|
|
column,
|
|
|
|
length,
|
2023-11-21 20:17:38 -08:00
|
|
|
path: "justfile".as_ref(),
|
2019-11-13 19:32:50 -08:00
|
|
|
},
|
2024-05-14 20:07:41 -07:00
|
|
|
kind: kind.into(),
|
2019-11-13 19:32:50 -08:00
|
|
|
};
|
|
|
|
assert_eq!(have, want);
|
2021-09-16 06:44:40 -07:00
|
|
|
}
|
2019-11-07 10:55:15 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
test! {
|
|
|
|
name: name_new,
|
|
|
|
text: "foo",
|
2019-11-07 10:55:15 -08:00
|
|
|
tokens: (Identifier:"foo"),
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: comment,
|
|
|
|
text: "# hello",
|
|
|
|
tokens: (Comment:"# hello"),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: backtick,
|
|
|
|
text: "`echo`",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (Backtick:"`echo`"),
|
2021-04-04 16:41:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: backtick_multi_line,
|
|
|
|
text: "`echo\necho`",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (Backtick:"`echo\necho`"),
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: raw_string,
|
|
|
|
text: "'hello'",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (StringToken:"'hello'"),
|
2021-04-04 16:41:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: raw_string_multi_line,
|
|
|
|
text: "'hello\ngoodbye'",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (StringToken:"'hello\ngoodbye'"),
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: cooked_string,
|
|
|
|
text: "\"hello\"",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (StringToken:"\"hello\""),
|
2021-04-04 16:41:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: cooked_string_multi_line,
|
|
|
|
text: "\"hello\ngoodbye\"",
|
2021-04-05 21:28:37 -07:00
|
|
|
tokens: (StringToken:"\"hello\ngoodbye\""),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: cooked_multiline_string,
|
|
|
|
text: "\"\"\"hello\ngoodbye\"\"\"",
|
|
|
|
tokens: (StringToken:"\"\"\"hello\ngoodbye\"\"\""),
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 00:20:25 -07:00
|
|
|
test! {
|
|
|
|
name: ampersand_ampersand,
|
|
|
|
text: "&&",
|
|
|
|
tokens: (AmpersandAmpersand),
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
test! {
|
|
|
|
name: equals,
|
|
|
|
text: "=",
|
|
|
|
tokens: (Equals),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: equals_equals,
|
|
|
|
text: "==",
|
|
|
|
tokens: (EqualsEquals),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: bang_equals,
|
|
|
|
text: "!=",
|
|
|
|
tokens: (BangEquals),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: brace_l,
|
|
|
|
text: "{",
|
|
|
|
tokens: (BraceL),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: brace_r,
|
2020-10-27 23:51:17 -07:00
|
|
|
text: "{}",
|
|
|
|
tokens: (BraceL, BraceR),
|
2020-10-26 18:16:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: brace_lll,
|
|
|
|
text: "{{{",
|
|
|
|
tokens: (BraceL, BraceL, BraceL),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: brace_rrr,
|
2020-10-27 23:51:17 -07:00
|
|
|
text: "{{{}}}",
|
|
|
|
tokens: (BraceL, BraceL, BraceL, BraceR, BraceR, BraceR),
|
2020-10-26 18:16:42 -07:00
|
|
|
}
|
|
|
|
|
2021-03-25 18:35:24 -07:00
|
|
|
test! {
|
|
|
|
name: dollar,
|
|
|
|
text: "$",
|
|
|
|
tokens: (Dollar),
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
test! {
|
2022-05-28 19:07:53 -07:00
|
|
|
name: export_concatenation,
|
2019-10-17 20:04:54 -07:00
|
|
|
text: "export foo = 'foo' + 'bar'",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"export",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Equals,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'foo'",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'bar'",
|
2019-10-17 20:04:54 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: export_complex,
|
|
|
|
text: "export foo = ('foo' + 'bar') + `baz`",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"export",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Equals,
|
|
|
|
Whitespace,
|
|
|
|
ParenL,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'foo'",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'bar'",
|
2019-10-17 20:04:54 -07:00
|
|
|
ParenR,
|
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`baz`",
|
2019-10-17 20:04:54 -07:00
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
2021-04-05 21:28:37 -07:00
|
|
|
name: eol_linefeed,
|
|
|
|
text: "\n",
|
|
|
|
tokens: (Eol),
|
|
|
|
unindent: false,
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
2021-04-05 21:28:37 -07:00
|
|
|
name: eol_carriage_return_linefeed,
|
|
|
|
text: "\r\n",
|
|
|
|
tokens: (Eol:"\r\n"),
|
|
|
|
unindent: false,
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_line,
|
|
|
|
text: "foo:\n a",
|
2019-11-07 10:55:15 -08:00
|
|
|
tokens: (Identifier:"foo", Colon, Eol, Indent:" ", Text:"a", Dedent),
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
test! {
|
|
|
|
name: indented_normal,
|
|
|
|
text: "
|
|
|
|
a
|
|
|
|
b
|
|
|
|
c
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"a",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"b",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Identifier:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_normal_nonempty_blank,
|
|
|
|
text: "a\n b\n\t\t\n c\n",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"a",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"b",
|
|
|
|
Eol,
|
|
|
|
Whitespace:"\t\t",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Identifier:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
2021-04-05 21:28:37 -07:00
|
|
|
unindent: false,
|
2019-12-11 20:25:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_normal_multiple,
|
|
|
|
text: "
|
|
|
|
a
|
|
|
|
b
|
|
|
|
c
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"a",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"b",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indent_indent_dedent_indent,
|
|
|
|
text: "
|
|
|
|
a
|
|
|
|
b
|
|
|
|
c
|
|
|
|
d
|
|
|
|
e
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"a",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"b",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Whitespace:" ",
|
|
|
|
Identifier:"d",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"e",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indent_recipe_dedent_indent,
|
|
|
|
text: "
|
|
|
|
a
|
|
|
|
b:
|
|
|
|
c
|
|
|
|
d
|
|
|
|
e
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"a",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"b",
|
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Whitespace:" ",
|
|
|
|
Identifier:"d",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Identifier:"e",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
test! {
|
|
|
|
name: indented_block,
|
|
|
|
text: "
|
|
|
|
foo:
|
|
|
|
a
|
|
|
|
b
|
|
|
|
c
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"a",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"b",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-03-24 19:46:53 -07:00
|
|
|
test! {
|
|
|
|
name: brace_escape,
|
|
|
|
text: "
|
|
|
|
foo:
|
|
|
|
{{{{
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"foo",
|
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"{{{{",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
test! {
|
|
|
|
name: indented_block_followed_by_item,
|
|
|
|
text: "
|
|
|
|
foo:
|
|
|
|
a
|
|
|
|
b:
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"a",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_block_followed_by_blank,
|
|
|
|
text: "
|
|
|
|
foo:
|
|
|
|
a
|
|
|
|
|
|
|
|
b:
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"a",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_line_containing_unpaired_carriage_return,
|
|
|
|
text: "foo:\n \r \n",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"\r ",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
2021-04-05 21:28:37 -07:00
|
|
|
unindent: false,
|
2019-10-17 20:04:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: indented_blocks,
|
|
|
|
text: "
|
|
|
|
b: a
|
|
|
|
@mv a b
|
|
|
|
|
|
|
|
a:
|
|
|
|
@touch F
|
|
|
|
@touch a
|
|
|
|
|
|
|
|
d: c
|
|
|
|
@rm c
|
|
|
|
|
|
|
|
c: b
|
|
|
|
@mv b c
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@mv a b",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@touch F",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"@touch a",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"d",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@rm c",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@mv b c",
|
|
|
|
Eol,
|
|
|
|
Dedent
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: interpolation_empty,
|
|
|
|
text: "hello:\n echo {{}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: interpolation_expression,
|
|
|
|
text: "hello:\n echo {{`echo hello` + `echo goodbye`}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo hello`",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo goodbye`",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
2021-04-03 19:59:13 -07:00
|
|
|
test! {
|
|
|
|
name: interpolation_raw_multiline_string,
|
|
|
|
text: "hello:\n echo {{'\n'}}",
|
|
|
|
tokens: (
|
|
|
|
Identifier:"hello",
|
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'\n'",
|
2021-04-03 19:59:13 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
2019-10-17 20:04:54 -07:00
|
|
|
test! {
|
|
|
|
name: tokenize_names,
|
|
|
|
text: "
|
|
|
|
foo
|
|
|
|
bar-bob
|
|
|
|
b-bob_asdfAAAA
|
|
|
|
test123
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"bar-bob",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b-bob_asdfAAAA",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"test123",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_indented_line,
|
|
|
|
text: "foo:\n a",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"a",
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_indented_block,
|
|
|
|
text: "
|
|
|
|
foo:
|
|
|
|
a
|
|
|
|
b
|
|
|
|
c
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"a",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"b",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_strings,
|
|
|
|
text: r#"a = "'a'" + '"b"' + "'c'" + '"d"'#echo hello"#,
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Equals,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"\"'a'\"",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'\"b\"'",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"\"'c'\"",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'\"d\"'",
|
2019-10-17 20:04:54 -07:00
|
|
|
Comment:"#echo hello",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_recipe_interpolation_eol,
|
|
|
|
text: "
|
|
|
|
foo: # some comment
|
|
|
|
{{hello}}
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
|
|
|
Comment:"# some comment",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Eol,
|
|
|
|
Dedent
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_recipe_interpolation_eof,
|
|
|
|
text: "foo: # more comments
|
2017-11-18 03:36:02 -08:00
|
|
|
{{hello}}
|
|
|
|
# another comment
|
|
|
|
",
|
2019-10-17 20:04:54 -07:00
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
|
|
|
Comment:"# more comments",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Comment:"# another comment",
|
|
|
|
Eol,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_recipe_complex_interpolation_expression,
|
|
|
|
text: "foo: #lol\n {{a + b + \"z\" + blarg}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace:" ",
|
|
|
|
Comment:"#lol",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"\"z\"",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"blarg",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_recipe_multiple_interpolations,
|
|
|
|
text: "foo:,#ok\n {{a}}0{{b}}1{{c}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"foo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Comma,
|
|
|
|
Comment:"#ok",
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Text:"0",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Text:"1",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_junk,
|
|
|
|
text: "
|
|
|
|
bob
|
|
|
|
|
|
|
|
hello blah blah blah : a b c #whatever
|
2017-11-18 03:36:02 -08:00
|
|
|
",
|
2019-10-17 20:04:54 -07:00
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"bob",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"blah",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"blah",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"blah",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Comment:"#whatever",
|
|
|
|
Eol,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_empty_lines,
|
|
|
|
text: "
|
|
|
|
|
|
|
|
# this does something
|
|
|
|
hello:
|
|
|
|
asdf
|
|
|
|
bsdf
|
|
|
|
|
|
|
|
csdf
|
|
|
|
|
|
|
|
dsdf # whatever
|
|
|
|
|
|
|
|
# yolo
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Eol,
|
|
|
|
Comment:"# this does something",
|
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"asdf",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"bsdf",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"csdf",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"dsdf # whatever",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Comment:"# yolo",
|
|
|
|
Eol,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_comment_before_variable,
|
|
|
|
text: "
|
|
|
|
#
|
|
|
|
A='1'
|
|
|
|
echo:
|
|
|
|
echo {{A}}
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Comment:"#",
|
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"A",
|
2019-10-17 20:04:54 -07:00
|
|
|
Equals,
|
2021-04-05 21:28:37 -07:00
|
|
|
StringToken:"'1'",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"echo",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"A",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_interpolation_backticks,
|
|
|
|
text: "hello:\n echo {{`echo hello` + `echo goodbye`}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo hello`",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo goodbye`",
|
2019-10-17 20:04:54 -07:00
|
|
|
InterpolationEnd,
|
|
|
|
Dedent
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_empty_interpolation,
|
|
|
|
text: "hello:\n echo {{}}",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"echo ",
|
|
|
|
InterpolationStart,
|
|
|
|
InterpolationEnd,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_assignment_backticks,
|
|
|
|
text: "a = `echo hello` + `echo goodbye`",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Equals,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo hello`",
|
2019-10-17 20:04:54 -07:00
|
|
|
Whitespace,
|
|
|
|
Plus,
|
|
|
|
Whitespace,
|
2021-04-05 21:28:37 -07:00
|
|
|
Backtick:"`echo goodbye`",
|
2019-10-17 20:04:54 -07:00
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_multiple,
|
|
|
|
text: "
|
|
|
|
|
|
|
|
hello:
|
|
|
|
a
|
|
|
|
b
|
|
|
|
|
|
|
|
c
|
|
|
|
|
|
|
|
d
|
|
|
|
|
|
|
|
# hello
|
|
|
|
bob:
|
|
|
|
frank
|
|
|
|
\t
|
|
|
|
",
|
|
|
|
tokens: (
|
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"hello",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"a",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"b",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"c",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"d",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
Comment:"# hello",
|
|
|
|
Eol,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"bob",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"frank",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_comment,
|
|
|
|
text: "a:=#",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
ColonEquals,
|
|
|
|
Comment:"#",
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_comment_with_bang,
|
|
|
|
text: "a:=#foo!",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
ColonEquals,
|
|
|
|
Comment:"#foo!",
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_order,
|
|
|
|
text: "
|
|
|
|
b: a
|
|
|
|
@mv a b
|
|
|
|
|
|
|
|
a:
|
|
|
|
@touch F
|
|
|
|
@touch a
|
|
|
|
|
|
|
|
d: c
|
|
|
|
@rm c
|
|
|
|
|
|
|
|
c: b
|
|
|
|
@mv b c
|
|
|
|
",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@mv a b",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@touch F",
|
|
|
|
Eol,
|
|
|
|
Whitespace:" ",
|
|
|
|
Text:"@touch a",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"d",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@rm c",
|
|
|
|
Eol,
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"c",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Whitespace,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Eol,
|
|
|
|
Indent,
|
|
|
|
Text:"@mv b c",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: tokenize_parens,
|
2020-10-27 23:51:17 -07:00
|
|
|
text: "((())) ()abc(+",
|
2019-10-17 20:04:54 -07:00
|
|
|
tokens: (
|
|
|
|
ParenL,
|
|
|
|
ParenL,
|
|
|
|
ParenL,
|
|
|
|
ParenR,
|
|
|
|
ParenR,
|
|
|
|
ParenR,
|
|
|
|
Whitespace,
|
2020-10-27 23:51:17 -07:00
|
|
|
ParenL,
|
2019-10-17 20:04:54 -07:00
|
|
|
ParenR,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"abc",
|
2019-10-17 20:04:54 -07:00
|
|
|
ParenL,
|
|
|
|
Plus,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: crlf_newline,
|
|
|
|
text: "#\r\n#asdf\r\n",
|
|
|
|
tokens: (
|
|
|
|
Comment:"#",
|
|
|
|
Eol:"\r\n",
|
|
|
|
Comment:"#asdf",
|
|
|
|
Eol:"\r\n",
|
|
|
|
),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: multiple_recipes,
|
|
|
|
text: "a:\n foo\nb:",
|
|
|
|
tokens: (
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"a",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
Eol,
|
|
|
|
Indent:" ",
|
|
|
|
Text:"foo",
|
|
|
|
Eol,
|
|
|
|
Dedent,
|
2019-11-07 10:55:15 -08:00
|
|
|
Identifier:"b",
|
2019-10-17 20:04:54 -07:00
|
|
|
Colon,
|
|
|
|
),
|
2019-04-11 23:58:08 -07:00
|
|
|
}
|
|
|
|
|
2019-11-10 23:17:47 -08:00
|
|
|
test! {
|
|
|
|
name: brackets,
|
2020-10-27 23:51:17 -07:00
|
|
|
text: "[][]",
|
|
|
|
tokens: (BracketL, BracketR, BracketL, BracketR),
|
|
|
|
}
|
|
|
|
|
|
|
|
test! {
|
|
|
|
name: open_delimiter_eol,
|
|
|
|
text: "[\n](\n){\n}",
|
|
|
|
tokens: (
|
|
|
|
BracketL, Whitespace:"\n", BracketR,
|
|
|
|
ParenL, Whitespace:"\n", ParenR,
|
|
|
|
BraceL, Whitespace:"\n", BraceR
|
|
|
|
),
|
2019-11-10 23:17:47 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2017-11-18 03:36:02 -08:00
|
|
|
name: tokenize_space_then_tab,
|
|
|
|
input: "a:
|
|
|
|
0
|
|
|
|
1
|
|
|
|
\t2
|
|
|
|
",
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: 9,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 3,
|
|
|
|
column: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
width: 1,
|
2017-11-18 03:36:02 -08:00
|
|
|
kind: InconsistentLeadingWhitespace{expected: " ", found: "\t"},
|
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2017-11-18 03:36:02 -08:00
|
|
|
name: tokenize_tabs_then_tab_space,
|
|
|
|
input: "a:
|
|
|
|
\t\t0
|
|
|
|
\t\t 1
|
|
|
|
\t 2
|
|
|
|
",
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: 12,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 3,
|
|
|
|
column: 0,
|
2019-12-11 20:25:16 -08:00
|
|
|
width: 3,
|
|
|
|
kind: InconsistentLeadingWhitespace{expected: "\t\t", found: "\t "},
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: tokenize_unknown,
|
2021-09-16 16:45:56 -07:00
|
|
|
input: "%",
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: 0,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 0,
|
|
|
|
column: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
width: 1,
|
2017-11-18 03:36:02 -08:00
|
|
|
kind: UnknownStartOfToken,
|
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unterminated_string_with_escapes,
|
|
|
|
input: r#"a = "\n\t\r\"\\"#,
|
|
|
|
offset: 4,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
column: 4,
|
|
|
|
width: 1,
|
2021-04-05 21:28:37 -07:00
|
|
|
kind: UnterminatedString,
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unterminated_raw_string,
|
|
|
|
input: "r a='asdf",
|
|
|
|
offset: 4,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
column: 4,
|
|
|
|
width: 1,
|
2021-04-05 21:28:37 -07:00
|
|
|
kind: UnterminatedString,
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unterminated_interpolation,
|
|
|
|
input: "foo:\n echo {{
|
|
|
|
",
|
|
|
|
offset: 11,
|
|
|
|
line: 1,
|
|
|
|
column: 6,
|
|
|
|
width: 2,
|
|
|
|
kind: UnterminatedInterpolation,
|
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unterminated_backtick,
|
|
|
|
input: "`echo",
|
|
|
|
offset: 0,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
column: 0,
|
|
|
|
width: 1,
|
2021-04-05 21:28:37 -07:00
|
|
|
kind: UnterminatedBacktick,
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unpaired_carriage_return,
|
|
|
|
input: "foo\rbar",
|
|
|
|
offset: 3,
|
|
|
|
line: 0,
|
|
|
|
column: 3,
|
|
|
|
width: 1,
|
|
|
|
kind: UnpairedCarriageReturn,
|
|
|
|
}
|
|
|
|
|
Reform positional argument parsing (#523)
This diff makes positional argument parsing much cleaner, along with
adding a bunch of tests. Just's positional argument parsing is rather,
complex, so hopefully this reform allows it to both be correct and stay
correct.
User-visible changes:
- `just ..` is now accepted, with the same effect as `just ../`
- `just .` is also accepted, with the same effect as `just`
- It is now an error to pass arguments or overrides to subcommands
that do not accept them, namely `--dump`, `--edit`, `--list`,
`--show`, and `--summary`. It is also an error to pass arguments to
`--evaluate`, although `--evaluate` does of course still accept
overrides.
(This is a breaking change, but hopefully worth it, as it will allow us
to add arguments to subcommands which did not previously take
them, if we so desire.)
- Subcommands which do not accept arguments may now accept a
single search-directory argument, so `just --list ../` and
`just --dump foo/` are now accepted, with the former starting the
search for the justfile to list in the parent directory, and the latter
starting the search for the justfile to dump in `foo`.
2019-11-10 18:02:36 -08:00
|
|
|
error! {
|
|
|
|
name: invalid_name_start_dash,
|
|
|
|
input: "-foo",
|
|
|
|
offset: 0,
|
|
|
|
line: 0,
|
|
|
|
column: 0,
|
|
|
|
width: 1,
|
|
|
|
kind: UnknownStartOfToken,
|
|
|
|
}
|
|
|
|
|
|
|
|
error! {
|
|
|
|
name: invalid_name_start_digit,
|
|
|
|
input: "0foo",
|
|
|
|
offset: 0,
|
|
|
|
line: 0,
|
|
|
|
column: 0,
|
|
|
|
width: 1,
|
|
|
|
kind: UnknownStartOfToken,
|
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unterminated_string,
|
|
|
|
input: r#"a = ""#,
|
|
|
|
offset: 4,
|
|
|
|
line: 0,
|
|
|
|
column: 4,
|
|
|
|
width: 1,
|
2021-04-05 21:28:37 -07:00
|
|
|
kind: UnterminatedString,
|
2017-12-02 12:49:31 -08:00
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-12-11 20:25:16 -08:00
|
|
|
name: mixed_leading_whitespace_recipe,
|
2018-01-05 02:03:58 -08:00
|
|
|
input: "a:\n\t echo hello",
|
2019-04-15 22:40:02 -07:00
|
|
|
offset: 3,
|
2017-11-18 03:36:02 -08:00
|
|
|
line: 1,
|
|
|
|
column: 0,
|
2019-04-15 22:40:02 -07:00
|
|
|
width: 2,
|
2017-11-18 03:36:02 -08:00
|
|
|
kind: MixedLeadingWhitespace{whitespace: "\t "},
|
|
|
|
}
|
2019-04-15 22:40:02 -07:00
|
|
|
|
2019-12-11 20:25:16 -08:00
|
|
|
error! {
|
|
|
|
name: mixed_leading_whitespace_normal,
|
|
|
|
input: "a\n\t echo hello",
|
|
|
|
offset: 2,
|
|
|
|
line: 1,
|
|
|
|
column: 0,
|
|
|
|
width: 2,
|
|
|
|
kind: MixedLeadingWhitespace{whitespace: "\t "},
|
|
|
|
}
|
|
|
|
|
|
|
|
error! {
|
|
|
|
name: mixed_leading_whitespace_indent,
|
|
|
|
input: "a\n foo\n \tbar",
|
|
|
|
offset: 7,
|
|
|
|
line: 2,
|
|
|
|
column: 0,
|
|
|
|
width: 2,
|
|
|
|
kind: MixedLeadingWhitespace{whitespace: " \t"},
|
|
|
|
}
|
|
|
|
|
|
|
|
error! {
|
|
|
|
name: bad_dedent,
|
|
|
|
input: "a\n foo\n bar\n baz",
|
|
|
|
offset: 14,
|
|
|
|
line: 3,
|
|
|
|
column: 0,
|
|
|
|
width: 2,
|
|
|
|
kind: InconsistentLeadingWhitespace{expected: " ", found: " "},
|
|
|
|
}
|
|
|
|
|
2019-11-07 10:55:15 -08:00
|
|
|
error! {
|
2019-04-15 22:40:02 -07:00
|
|
|
name: unclosed_interpolation_delimiter,
|
|
|
|
input: "a:\n echo {{ foo",
|
|
|
|
offset: 9,
|
|
|
|
line: 1,
|
|
|
|
column: 6,
|
|
|
|
width: 2,
|
|
|
|
kind: UnterminatedInterpolation,
|
|
|
|
}
|
2020-10-26 18:16:42 -07:00
|
|
|
|
|
|
|
error! {
|
2023-12-19 20:31:51 -08:00
|
|
|
name: unexpected_character_after_at,
|
|
|
|
input: "@%",
|
2020-10-26 18:16:42 -07:00
|
|
|
offset: 1,
|
|
|
|
line: 0,
|
|
|
|
column: 1,
|
|
|
|
width: 1,
|
2023-11-21 11:28:59 -08:00
|
|
|
kind: UnknownStartOfToken,
|
2020-10-26 18:16:42 -07:00
|
|
|
}
|
|
|
|
|
2020-10-27 23:51:17 -07:00
|
|
|
error! {
|
|
|
|
name: mismatched_closing_brace,
|
|
|
|
input: "(]",
|
|
|
|
offset: 1,
|
|
|
|
line: 0,
|
|
|
|
column: 1,
|
|
|
|
width: 0,
|
|
|
|
kind: MismatchedClosingDelimiter {
|
|
|
|
open: Delimiter::Paren,
|
|
|
|
close: Delimiter::Bracket,
|
|
|
|
open_line: 0,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-07-22 00:20:25 -07:00
|
|
|
error! {
|
|
|
|
name: ampersand_eof,
|
|
|
|
input: "&",
|
|
|
|
offset: 1,
|
|
|
|
line: 0,
|
|
|
|
column: 1,
|
|
|
|
width: 0,
|
|
|
|
kind: UnexpectedEndOfToken {
|
|
|
|
expected: '&',
|
|
|
|
},
|
|
|
|
}
|
|
|
|
error! {
|
|
|
|
name: ampersand_unexpected,
|
|
|
|
input: "&%",
|
|
|
|
offset: 1,
|
|
|
|
line: 0,
|
|
|
|
column: 1,
|
|
|
|
width: 1,
|
|
|
|
kind: UnexpectedCharacter {
|
|
|
|
expected: '&',
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:16:42 -07:00
|
|
|
#[test]
|
|
|
|
fn presume_error() {
|
2023-11-21 20:17:38 -08:00
|
|
|
let compile_error = Lexer::new("justfile".as_ref(), "!")
|
|
|
|
.presume('-')
|
|
|
|
.unwrap_err();
|
2020-10-26 18:16:42 -07:00
|
|
|
assert_matches!(
|
2022-09-11 01:41:24 -07:00
|
|
|
compile_error.token,
|
|
|
|
Token {
|
|
|
|
offset: 0,
|
|
|
|
line: 0,
|
|
|
|
column: 0,
|
|
|
|
length: 0,
|
|
|
|
src: "!",
|
|
|
|
kind: Unspecified,
|
2023-11-21 20:17:38 -08:00
|
|
|
path: _,
|
2022-09-11 01:41:24 -07:00
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_matches!(&*compile_error.kind,
|
|
|
|
Internal { ref message }
|
|
|
|
if message == "Lexer presumed character `-`"
|
2020-10-26 18:16:42 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-28 18:06:57 -07:00
|
|
|
Error::Compile { compile_error }
|
|
|
|
.color_display(Color::never())
|
|
|
|
.to_string(),
|
2023-11-21 20:17:38 -08:00
|
|
|
"error: Internal error, this may indicate a bug in just: Lexer presumed character `-`
|
|
|
|
consider filing an issue: https://github.com/casey/just/issues/new
|
2023-12-29 13:25:30 -08:00
|
|
|
——▶ justfile:1:1
|
|
|
|
│
|
|
|
|
1 │ !
|
|
|
|
│ ^"
|
2020-10-26 18:16:42 -07:00
|
|
|
);
|
|
|
|
}
|
2017-11-18 03:36:02 -08:00
|
|
|
}
|