mirror of
https://github.com/Noratrieb/dilaria.git
synced 2026-01-16 10:25:02 +01:00
docs
This commit is contained in:
parent
e66cd52861
commit
9eff0f0188
7 changed files with 31 additions and 13 deletions
|
|
@ -2,5 +2,4 @@ disallowed-types = [
|
||||||
{ path = "std::collections::HashMap", reason = "may be fxhash or siphash, depending on the feature, stay flexible" },
|
{ path = "std::collections::HashMap", reason = "may be fxhash or siphash, depending on the feature, stay flexible" },
|
||||||
{ path = "std::collections::HashSet", reason = "may be fxhash or siphash, depending on the feature, stay flexible" },
|
{ path = "std::collections::HashSet", reason = "may be fxhash or siphash, depending on the feature, stay flexible" },
|
||||||
{ path = "std::collections::Vec", reason = "we generally want to use bumpalos collections" },
|
{ path = "std::collections::Vec", reason = "we generally want to use bumpalos collections" },
|
||||||
{ path = "std::boxed::Box", reason = "we generally want to use bumpalos allocation" },
|
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
//!
|
//!
|
||||||
//! The AST module contains all structs and enums for the abstract syntax tree generated by the parser
|
//! The AST module contains all structs and enums for the abstract syntax tree generated by the parser
|
||||||
|
//!
|
||||||
|
//! All AST nodes are bump allocated into the lifetime `'ast`
|
||||||
|
|
||||||
use crate::errors::Span;
|
use crate::errors::Span;
|
||||||
use crate::value::Symbol;
|
use crate::value::Symbol;
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
//! The bytecode that is executed in the vm
|
||||||
|
|
||||||
use crate::errors::Span;
|
use crate::errors::Span;
|
||||||
use crate::value::{HashMap, Symbol};
|
use crate::value::{HashMap, Symbol};
|
||||||
use bumpalo::boxed::Box;
|
use bumpalo::boxed::Box;
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
//! The compiler that compiles the AST down to bytecode
|
||||||
|
|
||||||
use crate::ast::{
|
use crate::ast::{
|
||||||
Assignment, BinaryOp, BinaryOpKind, Block, Call, Declaration, Expr, FnDecl, Ident, IfStmt,
|
Assignment, BinaryOp, BinaryOpKind, Block, Call, Declaration, Expr, FnDecl, Ident, IfStmt,
|
||||||
Literal, Program, Stmt, UnaryOp, WhileStmt,
|
Literal, Program, Stmt, UnaryOp, WhileStmt,
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,10 @@
|
||||||
//!
|
//!
|
||||||
//! This modules handles error reporting in the interpreter
|
//! This modules handles error reporting in the interpreter
|
||||||
|
//!
|
||||||
|
//! The `span` submodule handles Spans, which are used for tracking locations in the source code.
|
||||||
|
//!
|
||||||
|
//! There is a single type `CompilerError` that can be created from anywhere, and reported using
|
||||||
|
//! functions from here.
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
|
|
||||||
27
src/lex.rs
27
src/lex.rs
|
|
@ -1,5 +1,8 @@
|
||||||
//!
|
//!
|
||||||
//! The lex module lexes the source code into Tokens
|
//! The lex module lexes the source code into Tokens
|
||||||
|
//!
|
||||||
|
//! For error handling, there is a single `Error` token, which contains the error. The lexer
|
||||||
|
//! is an iterator, and can therefore be used without any allocations
|
||||||
|
|
||||||
use crate::errors::{CompilerError, Span};
|
use crate::errors::{CompilerError, Span};
|
||||||
use std::iter::Peekable;
|
use std::iter::Peekable;
|
||||||
|
|
@ -96,8 +99,8 @@ pub enum TokenKind<'code> {
|
||||||
/// <=
|
/// <=
|
||||||
LessEqual,
|
LessEqual,
|
||||||
|
|
||||||
/// An error occurred
|
/// An error occurred. It's boxed to save space, since `CompilerError` is > 6 `usize` big
|
||||||
Error(CompilerError),
|
Error(Box<CompilerError>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
|
@ -199,11 +202,11 @@ impl<'code> Iterator for Lexer<'code> {
|
||||||
} else {
|
} else {
|
||||||
Token::new(
|
Token::new(
|
||||||
Span::single(start),
|
Span::single(start),
|
||||||
TokenKind::Error(CompilerError::with_note(
|
TokenKind::Error(Box::new(CompilerError::with_note(
|
||||||
Span::single(start),
|
Span::single(start),
|
||||||
"Expected '=' after '!'".to_string(),
|
"Expected '=' after '!'".to_string(),
|
||||||
"If you meant to use it for negation, use `not`".to_string(),
|
"If you meant to use it for negation, use `not`".to_string(),
|
||||||
)),
|
))),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -232,11 +235,11 @@ impl<'code> Iterator for Lexer<'code> {
|
||||||
None => {
|
None => {
|
||||||
return Some(Token::new(
|
return Some(Token::new(
|
||||||
Span::single(start),
|
Span::single(start),
|
||||||
TokenKind::Error(CompilerError::with_note(
|
TokenKind::Error(Box::new(CompilerError::with_note(
|
||||||
Span::single(start), // no not show the whole literal, this does not make sense
|
Span::single(start), // no not show the whole literal, this does not make sense
|
||||||
"String literal not closed".to_string(),
|
"String literal not closed".to_string(),
|
||||||
"Close the literal using '\"'".to_string(),
|
"Close the literal using '\"'".to_string(),
|
||||||
)),
|
))),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -265,19 +268,19 @@ impl<'code> Iterator for Lexer<'code> {
|
||||||
let number = number_str.parse::<f64>();
|
let number = number_str.parse::<f64>();
|
||||||
break match number {
|
break match number {
|
||||||
Ok(number) if number.is_infinite() => {
|
Ok(number) if number.is_infinite() => {
|
||||||
Token::new(span, TokenKind::Error(CompilerError::with_note(
|
Token::new(span, TokenKind::Error(Box::new(CompilerError::with_note(
|
||||||
span,
|
span,
|
||||||
"Number literal too long".to_string(),
|
"Number literal too long".to_string(),
|
||||||
"A number literal cannot be larger than a 64 bit float can represent"
|
"A number literal cannot be larger than a 64 bit float can represent"
|
||||||
.to_string(),
|
.to_string(),
|
||||||
)))
|
))))
|
||||||
}
|
}
|
||||||
Ok(number) => Token::new(span, TokenKind::Number(number)),
|
Ok(number) => Token::new(span, TokenKind::Number(number)),
|
||||||
Err(err) => Token::new(span, TokenKind::Error(CompilerError::with_note(
|
Err(err) => Token::new(span, TokenKind::Error(Box::new(CompilerError::with_note(
|
||||||
span,
|
span,
|
||||||
"Invalid number".to_string(),
|
"Invalid number".to_string(),
|
||||||
err.to_string(),
|
err.to_string(),
|
||||||
))),
|
)))),
|
||||||
};
|
};
|
||||||
} else if is_valid_ident_start(char) {
|
} else if is_valid_ident_start(char) {
|
||||||
// it must be an identifier
|
// it must be an identifier
|
||||||
|
|
@ -297,12 +300,12 @@ impl<'code> Iterator for Lexer<'code> {
|
||||||
} else {
|
} else {
|
||||||
break Token::new(
|
break Token::new(
|
||||||
Span::single(start),
|
Span::single(start),
|
||||||
TokenKind::Error(CompilerError::with_note(
|
TokenKind::Error(Box::new(CompilerError::with_note(
|
||||||
Span::single(start),
|
Span::single(start),
|
||||||
format!("Unexpected character: '{}'", char),
|
format!("Unexpected character: '{}'", char),
|
||||||
"Character is not allowed outside of string literals and comments"
|
"Character is not allowed outside of string literals and comments"
|
||||||
.to_string(),
|
.to_string(),
|
||||||
)),
|
))),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,8 @@
|
||||||
|
//! The parser implementation.
|
||||||
|
//!
|
||||||
|
//! It's a handwritten recursive descent parser. It has an internal peekable iterator from where
|
||||||
|
//! it gets its next tokens. Only a lookahead of one is required.
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test;
|
mod test;
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue