summaryrefslogtreecommitdiff
path: root/src/tokenizer.cc
blob: 274f572a49e2157014523fc72ed03b4508af9170 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#ifndef TOKENIZER_CC
#define TOKENIZER_CC

#include "common.cc"
#include "source.cc"
#include "utf8.cc"
#include "token.cc"

struct Tokenizer {
  const Buffer* buffer;
  Buffer_Stack* stack;

  Tokenizer(Buffer_Stack* stack) : buffer(nullptr), stack(stack) {}
};

static inline const Buffer* tokenizer_get_buffer(Tokenizer* tokenizer) {
  assert_neq(tokenizer, nullptr);

  if(tokenizer->buffer != nullptr) return tokenizer->buffer;

  Buffer* buffer = nullptr;
  if(!buffer_stack_pop(tokenizer->stack, &buffer)) return nullptr;

  tokenizer->buffer = buffer;
  return buffer;
}

static inline char tokenizer_advance(const Tokenizer* tokenizer, usize* offset) {
  const String text = tokenizer->buffer->content;

  unsigned char c = *text[*offset];  
  u8 nobytes = utf8_nobytes(c);
  if(nobytes > 1) panic("no support for multi-byte chars: %c:%d", c, nobytes);

  offset += nobytes;
  return c;
}

bool tokenizer_next(Tokenizer* tokenizer, Token* out) {
  assert_neq(tokenizer, nullptr);
  assert_neq(out, nullptr);

again:
  const Buffer* buffer = tokenizer_get_buffer(tokenizer);
  if(buffer == nullptr) return false;

  usize offset = buffer->cursor;
  if(offset == buffer->content.length) {
    tokenizer->buffer = nullptr;
    goto again;
  }

  tokenizer_advance(tokenizer, &offset);

  const unsigned char* str = buffer->content[offset];  
  *out = Token(Token_Kind_Eof, String(str, 1), Span(buffer->file, 0, 0));
  return true;
}

#endif