summaryrefslogtreecommitdiff
path: root/src/tokenizer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/tokenizer.cc')
-rw-r--r--src/tokenizer.cc24
1 files changed, 13 insertions, 11 deletions
diff --git a/src/tokenizer.cc b/src/tokenizer.cc
index a1a365c..cf8a5fa 100644
--- a/src/tokenizer.cc
+++ b/src/tokenizer.cc
@@ -3,8 +3,8 @@
#include "common.cc"
#include "source.cc"
-#include "utf8.cc"
#include "token.cc"
+#include "utf8.cc"
struct Tokenizer {
Buffer* buffer;
@@ -16,27 +16,29 @@ struct Tokenizer {
static inline Buffer* tokenizer_get_buffer(Tokenizer* tokenizer) {
assert_neq(tokenizer, nullptr);
- if(tokenizer->buffer != nullptr) return tokenizer->buffer;
+ if (tokenizer->buffer != nullptr) return tokenizer->buffer;
Buffer* buffer = nullptr;
- if(!buffer_stack_pop(tokenizer->stack, &buffer)) return nullptr;
+ if (!buffer_stack_pop(tokenizer->stack, &buffer)) return nullptr;
tokenizer->buffer = buffer;
return buffer;
}
-static inline char tokenizer_advance(const Tokenizer* tokenizer, usize* offset) {
+static inline char tokenizer_advance(const Tokenizer* tokenizer,
+ usize* offset) {
const String text = tokenizer->buffer->content;
- unsigned char c = *text[*offset];
+ unsigned char c = *text[*offset];
u8 nobytes = utf8_nobytes(c);
- if(nobytes > 1) panic("no support for multi-byte chars: %c:%d", c, nobytes);
+ if (nobytes > 1) panic("no support for multi-byte chars: %c:%d", c, nobytes);
- offset += nobytes;
+ *offset += nobytes;
return c;
}
-static inline String tokenizer_make_lexeme(const Tokenizer* tokenizer, usize start, usize end) {
+static inline String tokenizer_make_lexeme(const Tokenizer* tokenizer,
+ usize start, usize end) {
assert_neq(tokenizer, nullptr);
Buffer* buffer = tokenizer->buffer;
@@ -49,14 +51,14 @@ bool tokenizer_next(Tokenizer* tokenizer, Token* out) {
again:
Buffer* buffer = tokenizer_get_buffer(tokenizer);
- if(buffer == nullptr) return false;
+ if (buffer == nullptr) return false;
usize start = buffer->cursor;
- if(start == buffer->content.length) {
+ if (start == buffer->content.length) {
tokenizer->buffer = nullptr;
goto again;
}
-
+
usize offset = start;
tokenizer_advance(tokenizer, &offset);