diff --git a/src/langs/jai.jai b/src/langs/jai.jai index 1c7a42b41..7563c8731 100644 --- a/src/langs/jai.jai +++ b/src/langs/jai.jai @@ -19,7 +19,7 @@ highlight_jai_syntax :: (using buffer: *Buffer) { // Maybe retroactively highlight a function if token.type == .punctuation && token.punctuation == .l_paren { // Handle "func :: (" - before_prev, prev := last_tokens[0], last_tokens[1]; + before_prev, prev := last_tokens[2], last_tokens[3]; if prev.type == .identifier { memset(colors.data + prev.start, xx Code_Color.FUNCTION, prev.len); } else if before_prev.type == .identifier && prev.type == .operation && prev.operation == .double_colon { @@ -27,15 +27,25 @@ highlight_jai_syntax :: (using buffer: *Buffer) { } } else if token.type == .keyword && token.keyword == .kw_inline { // Handle "func :: inline" - before_prev, prev := last_tokens[0], last_tokens[1]; + before_prev, prev := last_tokens[2], last_tokens[3]; if before_prev.type == .identifier && prev.type == .operation && prev.operation == .double_colon { memset(colors.data + before_prev.start, xx Code_Color.FUNCTION, before_prev.len); } + } else if token.type == .operation && token.operation == .colon { + // Handle ") -> named: s64 {" + before_prev, prev := last_tokens[2], last_tokens[3]; + if before_prev.type == .operation && before_prev.operation == .arrow && prev.type == .type_keyword { + memset(colors.data + prev.start, xx COLOR_MAP[Token.Type.identifier], prev.len); + } + } else if token.type == .identifier && is_type_def(*tokenizer, token) { + token.type = .type_keyword; } - // Remember last 2 tokens + // Remember last 4 tokens last_tokens[0] = last_tokens[1]; - last_tokens[1] = token; + last_tokens[1] = last_tokens[2]; + last_tokens[2] = last_tokens[3]; + last_tokens[3] = token; color := COLOR_MAP[token.type]; memset(colors.data + token.start, xx color, token.len); @@ -80,10 +90,10 @@ get_next_token :: (using tokenizer: *Tokenizer) -> Token { case #char "%"; parse_percent (tokenizer, *token); case #char "@"; parse_note (tokenizer, *token); case #char "^"; parse_caret (tokenizer, *token); + case #char "."; parse_period (tokenizer, *token); case #char ";"; token.type = .punctuation; token.punctuation = .semicolon; t += 1; case #char ","; token.type = .punctuation; token.punctuation = .comma; t += 1; - case #char "."; token.type = .punctuation; token.punctuation = .period; t += 1; case #char "{"; token.type = .punctuation; token.punctuation = .l_brace; t += 1; case #char "}"; token.type = .punctuation; token.punctuation = .r_brace; t += 1; case #char "("; token.type = .punctuation; token.punctuation = .l_paren; t += 1; @@ -103,6 +113,37 @@ get_next_token :: (using tokenizer: *Tokenizer) -> Token { return token; } +is_type_def :: (using tokenizer: *Tokenizer, token: Token) -> bool #expand { + // Handle these scenarios: + // ": T", ": *T", "-> T", "-> *T", "] *T" + // "-> thing: T", "-> thing: *T" + // ignore for *thing: things { + + is_for_identifier := (last_tokens[0].type == .keyword && last_tokens[0].keyword == .kw_for) || (last_tokens[1].type == .keyword && last_tokens[1].keyword == .kw_for); + if is_for_identifier return false; + + before_prev, prev := last_tokens[2], last_tokens[3]; + if prev.type == .operation && prev.operation == .asterisk { + return (before_prev.type == .operation && before_prev.operation == .colon) || + (before_prev.type == .operation && before_prev.operation == .arrow) || + (before_prev.type == .punctuation && before_prev.punctuation == .r_bracket); + } + if prev.type == .operation && prev.operation == .colon { + // thing: T + return before_prev.type == .identifier; + } + if prev.type == .operation && prev.operation == .arrow { + // ) -> T + return before_prev.type == .punctuation && before_prev.punctuation == .r_paren; + } + if prev.type == .punctuation && prev.punctuation == .r_bracket { + // [..] T + // [N] T + return before_prev.type == .number || (before_prev.type == .operation && before_prev.operation == .period_range); + } + return false; +} + parse_identifier :: (using tokenizer: *Tokenizer, token: *Token) { token.type = .identifier; @@ -304,6 +345,21 @@ parse_caret :: (using tokenizer: *Tokenizer, token: *Token) { } } +parse_period :: (using tokenizer: *Tokenizer, token: *Token) { + token.type = .punctuation; + token.punctuation = .period; + + t += 1; + if t >= max_t return; + + if << t == { + case #char "."; + token.type = .operation; + token.operation = .period_range; + t += 1; + } +} + parse_note :: (using tokenizer: *Tokenizer, token: *Token) { token.type = .punctuation; token.punctuation = .note; @@ -463,7 +519,7 @@ Tokenizer :: struct { start_t: *u8; // cursor when starting parsing new token t: *u8; // cursor - last_tokens: [2] Token; // to retroactively highlight functions + last_tokens: [4] Token; // to retroactively highlight functions } Token :: struct { @@ -527,7 +583,7 @@ OPERATIONS :: string.[ "percent", "percent_equal", "less_than", "double_less_than", "less_than_equal", "greater_than", "greater_than_equal", "minus", "minus_equal", "triple_dash", "asterisk", "asterisk_equal", "colon", "colon_equal", "double_colon", "slash", "plus", "plus_equal", "slash_equal", "ampersand", "double_ampersand", "ampersand_equal", "tilde", "unknown", - "caret", "caret_equal", + "caret", "caret_equal", "period_range", ]; KEYWORDS :: string.[