|
|
@ -3,6 +3,9 @@ use logos::Logos; |
|
|
|
#[derive(Logos, Debug)] |
|
|
|
#[derive(Logos, Debug)] |
|
|
|
#[logos(skip r"[ \t\n\r]")] |
|
|
|
#[logos(skip r"[ \t\n\r]")] |
|
|
|
enum Token<'a> { |
|
|
|
enum Token<'a> { |
|
|
|
|
|
|
|
#[regex(r#"\/\*[^\/\*]*\*\/"#, |lex| lex.slice())] |
|
|
|
|
|
|
|
MultilineComment(&'a str), |
|
|
|
|
|
|
|
|
|
|
|
#[regex(r#""[^"]+""#, |lex| lex.slice().trim_start_matches('"').trim_end_matches('"'))]
|
|
|
|
#[regex(r#""[^"]+""#, |lex| lex.slice().trim_start_matches('"').trim_end_matches('"'))]
|
|
|
|
#[regex("[a-zA-Z0-9]+", |lex| lex.slice())] |
|
|
|
#[regex("[a-zA-Z0-9]+", |lex| lex.slice())] |
|
|
|
Keyword(&'a str), |
|
|
|
Keyword(&'a str), |
|
|
@ -48,4 +51,8 @@ fn main() { |
|
|
|
let lex = Token::lexer(&std::str::from_utf8(&source).unwrap()); |
|
|
|
let lex = Token::lexer(&std::str::from_utf8(&source).unwrap()); |
|
|
|
let content = lex.collect::<Vec<_>>(); |
|
|
|
let content = lex.collect::<Vec<_>>(); |
|
|
|
println!("{content:#?}"); |
|
|
|
println!("{content:#?}"); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let lex = Token::lexer("/* multiline\ncomment*/var value;"); |
|
|
|
|
|
|
|
let content = lex.collect::<Vec<_>>(); |
|
|
|
|
|
|
|
println!("{content:#?}"); |
|
|
|
} |
|
|
|
} |
|
|
|