LibJS: Port the JS lexer and parser to UTF-16

This ports the lexer to UTF-16 and deals with the immediate fallout up
to the AST. The AST will be dealt with in upcoming commits.

The lexer will still accept UTF-8 strings as input, and will transcode
them to UTF-16 for lexing. This doesn't actually incur a new allocation,
as we were already converting the input StringView to a ByteString for
each lexer.

One immediate logical benefit here is that we do not need to know off-
hand how many UTF-8 bytes some special code points occupy. They all
happen to be a single UTF-16 code unit. So instead of advancing the
lexer by 3 positions in some cases, we can just always advance by 1.
This commit is contained in:
Timothy Flynn 2025-08-06 07:18:45 -04:00 committed by Tim Flynn
commit 00182a2405
Notes: github-actions[bot] 2025-08-13 13:57:27 +00:00
14 changed files with 467 additions and 474 deletions

View file

@ -5,28 +5,28 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "Lexer.h"
#include <AK/CharacterTypes.h>
#include <AK/Debug.h>
#include <AK/GenericLexer.h>
#include <AK/HashMap.h>
#include <AK/Utf8View.h>
#include <AK/Utf16FlyString.h>
#include <LibJS/Lexer.h>
#include <LibUnicode/CharacterTypes.h>
#include <stdio.h>
namespace JS {
HashMap<FlyString, TokenType> Lexer::s_keywords;
HashMap<Utf16FlyString, TokenType> Lexer::s_keywords;
static constexpr TokenType parse_two_char_token(StringView view)
static constexpr TokenType parse_two_char_token(Utf16View const& view)
{
if (view.length() != 2)
if (view.length_in_code_units() != 2)
return TokenType::Invalid;
auto const* bytes = view.bytes().data();
switch (bytes[0]) {
auto ch0 = view.code_unit_at(0);
auto ch1 = view.code_unit_at(1);
switch (ch0) {
case '=':
switch (bytes[1]) {
switch (ch1) {
case '>':
return TokenType::Arrow;
case '=':
@ -35,7 +35,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '+':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::PlusEquals;
case '+':
@ -44,7 +44,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '-':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::MinusEquals;
case '-':
@ -53,7 +53,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '*':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::AsteriskEquals;
case '*':
@ -62,21 +62,21 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '/':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::SlashEquals;
default:
return TokenType::Invalid;
}
case '%':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::PercentEquals;
default:
return TokenType::Invalid;
}
case '&':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::AmpersandEquals;
case '&':
@ -85,7 +85,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '|':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::PipeEquals;
case '|':
@ -94,14 +94,14 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '^':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::CaretEquals;
default:
return TokenType::Invalid;
}
case '<':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::LessThanEquals;
case '<':
@ -110,7 +110,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '>':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::GreaterThanEquals;
case '>':
@ -119,7 +119,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '?':
switch (bytes[1]) {
switch (ch1) {
case '?':
return TokenType::DoubleQuestionMark;
case '.':
@ -128,7 +128,7 @@ static constexpr TokenType parse_two_char_token(StringView view)
return TokenType::Invalid;
}
case '!':
switch (bytes[1]) {
switch (ch1) {
case '=':
return TokenType::ExclamationMarkEquals;
default:
@ -139,49 +139,52 @@ static constexpr TokenType parse_two_char_token(StringView view)
}
}
static constexpr TokenType parse_three_char_token(StringView view)
static constexpr TokenType parse_three_char_token(Utf16View const& view)
{
if (view.length() != 3)
if (view.length_in_code_units() != 3)
return TokenType::Invalid;
auto const* bytes = view.bytes().data();
switch (bytes[0]) {
auto ch0 = view.code_unit_at(0);
auto ch1 = view.code_unit_at(1);
auto ch2 = view.code_unit_at(2);
switch (ch0) {
case '<':
if (bytes[1] == '<' && bytes[2] == '=')
if (ch1 == '<' && ch2 == '=')
return TokenType::ShiftLeftEquals;
return TokenType::Invalid;
case '>':
if (bytes[1] == '>' && bytes[2] == '=')
if (ch1 == '>' && ch2 == '=')
return TokenType::ShiftRightEquals;
if (bytes[1] == '>' && bytes[2] == '>')
if (ch1 == '>' && ch2 == '>')
return TokenType::UnsignedShiftRight;
return TokenType::Invalid;
case '=':
if (bytes[1] == '=' && bytes[2] == '=')
if (ch1 == '=' && ch2 == '=')
return TokenType::EqualsEqualsEquals;
return TokenType::Invalid;
case '!':
if (bytes[1] == '=' && bytes[2] == '=')
if (ch1 == '=' && ch2 == '=')
return TokenType::ExclamationMarkEqualsEquals;
return TokenType::Invalid;
case '.':
if (bytes[1] == '.' && bytes[2] == '.')
if (ch1 == '.' && ch2 == '.')
return TokenType::TripleDot;
return TokenType::Invalid;
case '*':
if (bytes[1] == '*' && bytes[2] == '=')
if (ch1 == '*' && ch2 == '=')
return TokenType::DoubleAsteriskEquals;
return TokenType::Invalid;
case '&':
if (bytes[1] == '&' && bytes[2] == '=')
if (ch1 == '&' && ch2 == '=')
return TokenType::DoubleAmpersandEquals;
return TokenType::Invalid;
case '|':
if (bytes[1] == '|' && bytes[2] == '=')
if (ch1 == '|' && ch2 == '=')
return TokenType::DoublePipeEquals;
return TokenType::Invalid;
case '?':
if (bytes[1] == '?' && bytes[2] == '=')
if (ch1 == '?' && ch2 == '=')
return TokenType::DoubleQuestionMarkEquals;
return TokenType::Invalid;
default:
@ -222,8 +225,31 @@ static consteval Array<TokenType, 256> make_single_char_tokens_array()
static constexpr auto s_single_char_tokens = make_single_char_tokens_array();
static Utf16String create_utf16_string_from_possibly_invalid_utf8_string(StringView source)
{
Utf8View utf8_source { source };
if (utf8_source.validate()) [[likely]]
return Utf16String::from_utf8_without_validation(source);
StringBuilder builder(StringBuilder::Mode::UTF16);
for (auto code_point : utf8_source) {
builder.append_code_point(code_point);
if (code_point == AK::UnicodeUtils::REPLACEMENT_CODE_POINT)
break;
}
return builder.to_utf16_string();
}
Lexer::Lexer(StringView source, StringView filename, size_t line_number, size_t line_column)
: m_source(source)
: Lexer(create_utf16_string_from_possibly_invalid_utf8_string(source), filename, line_number, line_column)
{
// FIXME: Remove this API once all callers are ported to UTF-16.
}
Lexer::Lexer(Utf16String source, StringView filename, size_t line_number, size_t line_column)
: m_source(move(source))
, m_current_token(TokenType::Eof, {}, {}, {}, 0, 0, 0)
, m_filename(String::from_utf8(filename).release_value_but_fixme_should_propagate_errors())
, m_line_number(line_number)
@ -231,46 +257,46 @@ Lexer::Lexer(StringView source, StringView filename, size_t line_number, size_t
, m_parsed_identifiers(adopt_ref(*new ParsedIdentifiers))
{
if (s_keywords.is_empty()) {
s_keywords.set("async"_fly_string, TokenType::Async);
s_keywords.set("await"_fly_string, TokenType::Await);
s_keywords.set("break"_fly_string, TokenType::Break);
s_keywords.set("case"_fly_string, TokenType::Case);
s_keywords.set("catch"_fly_string, TokenType::Catch);
s_keywords.set("class"_fly_string, TokenType::Class);
s_keywords.set("const"_fly_string, TokenType::Const);
s_keywords.set("continue"_fly_string, TokenType::Continue);
s_keywords.set("debugger"_fly_string, TokenType::Debugger);
s_keywords.set("default"_fly_string, TokenType::Default);
s_keywords.set("delete"_fly_string, TokenType::Delete);
s_keywords.set("do"_fly_string, TokenType::Do);
s_keywords.set("else"_fly_string, TokenType::Else);
s_keywords.set("enum"_fly_string, TokenType::Enum);
s_keywords.set("export"_fly_string, TokenType::Export);
s_keywords.set("extends"_fly_string, TokenType::Extends);
s_keywords.set("false"_fly_string, TokenType::BoolLiteral);
s_keywords.set("finally"_fly_string, TokenType::Finally);
s_keywords.set("for"_fly_string, TokenType::For);
s_keywords.set("function"_fly_string, TokenType::Function);
s_keywords.set("if"_fly_string, TokenType::If);
s_keywords.set("import"_fly_string, TokenType::Import);
s_keywords.set("in"_fly_string, TokenType::In);
s_keywords.set("instanceof"_fly_string, TokenType::Instanceof);
s_keywords.set("let"_fly_string, TokenType::Let);
s_keywords.set("new"_fly_string, TokenType::New);
s_keywords.set("null"_fly_string, TokenType::NullLiteral);
s_keywords.set("return"_fly_string, TokenType::Return);
s_keywords.set("super"_fly_string, TokenType::Super);
s_keywords.set("switch"_fly_string, TokenType::Switch);
s_keywords.set("this"_fly_string, TokenType::This);
s_keywords.set("throw"_fly_string, TokenType::Throw);
s_keywords.set("true"_fly_string, TokenType::BoolLiteral);
s_keywords.set("try"_fly_string, TokenType::Try);
s_keywords.set("typeof"_fly_string, TokenType::Typeof);
s_keywords.set("var"_fly_string, TokenType::Var);
s_keywords.set("void"_fly_string, TokenType::Void);
s_keywords.set("while"_fly_string, TokenType::While);
s_keywords.set("with"_fly_string, TokenType::With);
s_keywords.set("yield"_fly_string, TokenType::Yield);
s_keywords.set("async"_utf16_fly_string, TokenType::Async);
s_keywords.set("await"_utf16_fly_string, TokenType::Await);
s_keywords.set("break"_utf16_fly_string, TokenType::Break);
s_keywords.set("case"_utf16_fly_string, TokenType::Case);
s_keywords.set("catch"_utf16_fly_string, TokenType::Catch);
s_keywords.set("class"_utf16_fly_string, TokenType::Class);
s_keywords.set("const"_utf16_fly_string, TokenType::Const);
s_keywords.set("continue"_utf16_fly_string, TokenType::Continue);
s_keywords.set("debugger"_utf16_fly_string, TokenType::Debugger);
s_keywords.set("default"_utf16_fly_string, TokenType::Default);
s_keywords.set("delete"_utf16_fly_string, TokenType::Delete);
s_keywords.set("do"_utf16_fly_string, TokenType::Do);
s_keywords.set("else"_utf16_fly_string, TokenType::Else);
s_keywords.set("enum"_utf16_fly_string, TokenType::Enum);
s_keywords.set("export"_utf16_fly_string, TokenType::Export);
s_keywords.set("extends"_utf16_fly_string, TokenType::Extends);
s_keywords.set("false"_utf16_fly_string, TokenType::BoolLiteral);
s_keywords.set("finally"_utf16_fly_string, TokenType::Finally);
s_keywords.set("for"_utf16_fly_string, TokenType::For);
s_keywords.set("function"_utf16_fly_string, TokenType::Function);
s_keywords.set("if"_utf16_fly_string, TokenType::If);
s_keywords.set("import"_utf16_fly_string, TokenType::Import);
s_keywords.set("in"_utf16_fly_string, TokenType::In);
s_keywords.set("instanceof"_utf16_fly_string, TokenType::Instanceof);
s_keywords.set("let"_utf16_fly_string, TokenType::Let);
s_keywords.set("new"_utf16_fly_string, TokenType::New);
s_keywords.set("null"_utf16_fly_string, TokenType::NullLiteral);
s_keywords.set("return"_utf16_fly_string, TokenType::Return);
s_keywords.set("super"_utf16_fly_string, TokenType::Super);
s_keywords.set("switch"_utf16_fly_string, TokenType::Switch);
s_keywords.set("this"_utf16_fly_string, TokenType::This);
s_keywords.set("throw"_utf16_fly_string, TokenType::Throw);
s_keywords.set("true"_utf16_fly_string, TokenType::BoolLiteral);
s_keywords.set("try"_utf16_fly_string, TokenType::Try);
s_keywords.set("typeof"_utf16_fly_string, TokenType::Typeof);
s_keywords.set("var"_utf16_fly_string, TokenType::Var);
s_keywords.set("void"_utf16_fly_string, TokenType::Void);
s_keywords.set("while"_utf16_fly_string, TokenType::While);
s_keywords.set("with"_utf16_fly_string, TokenType::With);
s_keywords.set("yield"_utf16_fly_string, TokenType::Yield);
}
consume();
@ -279,16 +305,16 @@ Lexer::Lexer(StringView source, StringView filename, size_t line_number, size_t
void Lexer::consume()
{
auto did_reach_eof = [this] {
if (m_position < m_source.length())
if (m_position < m_source.length_in_code_units())
return false;
m_eof = true;
m_current_char = '\0';
m_position = m_source.length() + 1;
m_current_code_unit = '\0';
m_position = m_source.length_in_code_units() + 1;
m_line_column++;
return true;
};
if (m_position > m_source.length())
if (m_position > m_source.length_in_code_units())
return;
if (did_reach_eof())
@ -296,30 +322,23 @@ void Lexer::consume()
if (is_line_terminator()) {
if constexpr (LEXER_DEBUG) {
ByteString type;
if (m_current_char == '\n')
type = "LINE FEED";
else if (m_current_char == '\r')
type = "CARRIAGE RETURN";
else if (m_source[m_position + 1] == (char)0xa8)
type = "LINE SEPARATOR";
StringView type;
if (m_current_code_unit == '\n')
type = "LINE FEED"sv;
else if (m_current_code_unit == '\r')
type = "CARRIAGE RETURN"sv;
else if (m_source.code_unit_at(m_position + 1) == LINE_SEPARATOR)
type = "LINE SEPARATOR"sv;
else
type = "PARAGRAPH SEPARATOR";
type = "PARAGRAPH SEPARATOR"sv;
dbgln("Found a line terminator: {}", type);
}
// This is a three-char line terminator, we need to increase m_position some more.
// We might reach EOF and need to check again.
if (m_current_char != '\n' && m_current_char != '\r') {
m_position += 2;
if (did_reach_eof())
return;
}
// If the previous character is \r and the current one \n we already updated line number
// and column - don't do it again. From https://tc39.es/ecma262/#sec-line-terminators:
// The sequence <CR><LF> is commonly used as a line terminator.
// It should be considered a single SourceCharacter for the purpose of reporting line numbers.
auto second_char_of_crlf = m_position > 1 && m_source[m_position - 2] == '\r' && m_current_char == '\n';
auto second_char_of_crlf = m_position > 1 && m_source.code_unit_at(m_position - 2) == '\r' && m_current_code_unit == '\n';
if (!second_char_of_crlf) {
m_line_number++;
@ -328,50 +347,28 @@ void Lexer::consume()
} else {
dbgln_if(LEXER_DEBUG, "Previous was CR, this is LF - not incrementing line number again.");
}
} else if (is_unicode_character()) {
size_t char_size = 1;
if ((m_current_char & 64) == 0) {
m_hit_invalid_unicode = m_position;
} else if ((m_current_char & 32) == 0) {
char_size = 2;
} else if ((m_current_char & 16) == 0) {
char_size = 3;
} else if ((m_current_char & 8) == 0) {
char_size = 4;
}
} else {
if (AK::UnicodeUtils::is_utf16_high_surrogate(m_current_code_unit) && m_position < m_source.length_in_code_units()) {
if (AK::UnicodeUtils::is_utf16_low_surrogate(m_source.code_unit_at(m_position))) {
++m_position;
VERIFY(char_size >= 1);
--char_size;
for (size_t i = m_position; i < m_position + char_size; i++) {
if (i >= m_source.length() || (m_source[i] & 0b11000000) != 0b10000000) {
m_hit_invalid_unicode = m_position;
break;
if (did_reach_eof())
return;
}
}
if (m_hit_invalid_unicode.has_value())
m_position = m_source.length();
else
m_position += char_size;
if (did_reach_eof())
return;
m_line_column++;
} else {
m_line_column++;
++m_line_column;
}
m_current_char = m_source[m_position++];
m_current_code_unit = m_source.code_unit_at(m_position++);
}
bool Lexer::consume_decimal_number()
{
if (!is_ascii_digit(m_current_char))
if (!is_ascii_digit(m_current_code_unit))
return false;
while (is_ascii_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_ascii_digit)) {
while (is_ascii_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_ascii_digit)) {
consume();
}
return true;
@ -380,16 +377,16 @@ bool Lexer::consume_decimal_number()
bool Lexer::consume_exponent()
{
consume();
if (m_current_char == '-' || m_current_char == '+')
if (m_current_code_unit == '-' || m_current_code_unit == '+')
consume();
if (!is_ascii_digit(m_current_char))
if (!is_ascii_digit(m_current_code_unit))
return false;
return consume_decimal_number();
}
static constexpr bool is_octal_digit(char ch)
static constexpr bool is_octal_digit(char16_t ch)
{
return ch >= '0' && ch <= '7';
}
@ -397,10 +394,10 @@ static constexpr bool is_octal_digit(char ch)
bool Lexer::consume_octal_number()
{
consume();
if (!is_octal_digit(m_current_char))
if (!is_octal_digit(m_current_code_unit))
return false;
while (is_octal_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_octal_digit))
while (is_octal_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_octal_digit))
consume();
return true;
@ -409,16 +406,16 @@ bool Lexer::consume_octal_number()
bool Lexer::consume_hexadecimal_number()
{
consume();
if (!is_ascii_hex_digit(m_current_char))
if (!is_ascii_hex_digit(m_current_code_unit))
return false;
while (is_ascii_hex_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_ascii_hex_digit))
while (is_ascii_hex_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_ascii_hex_digit))
consume();
return true;
}
static constexpr bool is_binary_digit(char ch)
static constexpr bool is_binary_digit(char16_t ch)
{
return ch == '0' || ch == '1';
}
@ -426,10 +423,10 @@ static constexpr bool is_binary_digit(char ch)
bool Lexer::consume_binary_number()
{
consume();
if (!is_binary_digit(m_current_char))
if (!is_binary_digit(m_current_code_unit))
return false;
while (is_binary_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_binary_digit))
while (is_binary_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_binary_digit))
consume();
return true;
@ -438,40 +435,40 @@ bool Lexer::consume_binary_number()
template<typename Callback>
bool Lexer::match_numeric_literal_separator_followed_by(Callback callback) const
{
if (m_position >= m_source.length())
if (m_position >= m_source.length_in_code_units())
return false;
return m_current_char == '_'
&& callback(m_source[m_position]);
return m_current_code_unit == '_'
&& callback(m_source.code_unit_at(m_position));
}
bool Lexer::match(char a, char b) const
bool Lexer::match(char16_t a, char16_t b) const
{
if (m_position >= m_source.length())
if (m_position >= m_source.length_in_code_units())
return false;
return m_current_char == a
&& m_source[m_position] == b;
return m_current_code_unit == a
&& m_source.code_unit_at(m_position) == b;
}
bool Lexer::match(char a, char b, char c) const
bool Lexer::match(char16_t a, char16_t b, char16_t c) const
{
if (m_position + 1 >= m_source.length())
if (m_position + 1 >= m_source.length_in_code_units())
return false;
return m_current_char == a
&& m_source[m_position] == b
&& m_source[m_position + 1] == c;
return m_current_code_unit == a
&& m_source.code_unit_at(m_position) == b
&& m_source.code_unit_at(m_position + 1) == c;
}
bool Lexer::match(char a, char b, char c, char d) const
bool Lexer::match(char16_t a, char16_t b, char16_t c, char16_t d) const
{
if (m_position + 2 >= m_source.length())
if (m_position + 2 >= m_source.length_in_code_units())
return false;
return m_current_char == a
&& m_source[m_position] == b
&& m_source[m_position + 1] == c
&& m_source[m_position + 2] == d;
return m_current_code_unit == a
&& m_source.code_unit_at(m_position) == b
&& m_source.code_unit_at(m_position + 1) == c
&& m_source.code_unit_at(m_position + 2) == d;
}
bool Lexer::is_eof() const
@ -482,39 +479,32 @@ bool Lexer::is_eof() const
ALWAYS_INLINE bool Lexer::is_line_terminator() const
{
// OPTIMIZATION: Fast-path for ASCII characters.
if (m_current_char == '\n' || m_current_char == '\r')
if (m_current_code_unit == '\n' || m_current_code_unit == '\r')
return true;
if (!is_unicode_character())
if (is_ascii(m_current_code_unit))
return false;
return JS::is_line_terminator(current_code_point());
}
ALWAYS_INLINE bool Lexer::is_unicode_character() const
{
return (m_current_char & 128) != 0;
}
ALWAYS_INLINE u32 Lexer::current_code_point() const
{
static constexpr u32 const REPLACEMENT_CHARACTER = 0xFFFD;
if (m_position == 0)
return REPLACEMENT_CHARACTER;
return AK::UnicodeUtils::REPLACEMENT_CODE_POINT;
auto substring = m_source.substring_view(m_position - 1);
if (substring.is_empty())
return REPLACEMENT_CHARACTER;
if (is_ascii(substring[0]))
return substring[0];
Utf8View utf_8_view { substring };
return *utf_8_view.begin();
return AK::UnicodeUtils::REPLACEMENT_CODE_POINT;
return *substring.begin();
}
bool Lexer::is_whitespace() const
{
// OPTIMIZATION: Fast-path for ASCII characters.
if (is_ascii_space(m_current_char))
if (is_ascii_space(m_current_code_unit))
return true;
if (!is_unicode_character())
if (is_ascii(m_current_code_unit))
return false;
return JS::is_whitespace(current_code_point());
@ -525,7 +515,7 @@ bool Lexer::is_whitespace() const
// u{ CodePoint }
Optional<u32> Lexer::is_identifier_unicode_escape(size_t& identifier_length) const
{
GenericLexer lexer(source().substring_view(m_position - 1));
Utf16GenericLexer lexer(source().substring_view(m_position - 1));
if (auto code_point_or_error = lexer.consume_escaped_code_point(false); !code_point_or_error.is_error()) {
identifier_length = lexer.tell();
@ -624,7 +614,7 @@ bool Lexer::is_block_comment_end() const
bool Lexer::is_numeric_literal_start() const
{
return is_ascii_digit(m_current_char) || (m_current_char == '.' && m_position < m_source.length() && is_ascii_digit(m_source[m_position]));
return is_ascii_digit(m_current_code_unit) || (m_current_code_unit == '.' && m_position < m_source.length_in_code_units() && is_ascii_digit(m_source.code_unit_at(m_position)));
}
bool Lexer::slash_means_division() const
@ -646,7 +636,7 @@ bool Lexer::slash_means_division() const
Token Lexer::next()
{
size_t trivia_start = m_position;
auto trivia_start = m_position;
auto in_template = !m_template_states.is_empty();
bool line_has_token_yet = m_line_column > 1;
bool unterminated_comment = false;
@ -699,14 +689,14 @@ Token Lexer::next()
// bunch of Invalid* tokens (bad numeric literals, unterminated comments etc.)
StringView token_message;
Optional<FlyString> identifier;
Optional<Utf16FlyString> identifier;
size_t identifier_length = 0;
if (m_current_token.type() == TokenType::RegexLiteral && !is_eof() && is_ascii_alpha(m_current_char) && !did_consume_whitespace_or_comments) {
if (m_current_token.type() == TokenType::RegexLiteral && !is_eof() && is_ascii_alpha(m_current_code_unit) && !did_consume_whitespace_or_comments) {
token_type = TokenType::RegexFlags;
while (!is_eof() && is_ascii_alpha(m_current_char))
while (!is_eof() && is_ascii_alpha(m_current_code_unit))
consume();
} else if (m_current_char == '`') {
} else if (m_current_code_unit == '`') {
consume();
if (!in_template) {
@ -721,7 +711,7 @@ Token Lexer::next()
token_type = TokenType::TemplateLiteralEnd;
}
}
} else if (in_template && m_template_states.last().in_expr && m_template_states.last().open_bracket_count == 0 && m_current_char == '}') {
} else if (in_template && m_template_states.last().in_expr && m_template_states.last().open_bracket_count == 0 && m_current_code_unit == '}') {
consume();
token_type = TokenType::TemplateLiteralExprEnd;
m_template_states.last().in_expr = false;
@ -742,7 +732,7 @@ Token Lexer::next()
// LineContinuation
// LineTerminatorSequence
// SourceCharacter but not one of ` or \ or $ or LineTerminator
while (!match('$', '{') && m_current_char != '`' && !is_eof()) {
while (!match('$', '{') && m_current_code_unit != '`' && !is_eof()) {
if (match('\\', '$') || match('\\', '`') || match('\\', '\\'))
consume();
consume();
@ -752,12 +742,12 @@ Token Lexer::next()
else
token_type = TokenType::TemplateLiteralString;
}
} else if (m_current_char == '#') {
} else if (m_current_code_unit == '#') {
// Note: This has some duplicated code with the identifier lexing below
consume();
auto code_point = is_identifier_start(identifier_length);
if (code_point.has_value()) {
StringBuilder builder;
StringBuilder builder(StringBuilder::Mode::UTF16);
builder.append_code_point('#');
do {
builder.append_code_point(*code_point);
@ -767,7 +757,7 @@ Token Lexer::next()
code_point = is_identifier_middle(identifier_length);
} while (code_point.has_value());
identifier = builder.to_string_without_validation();
identifier = builder.to_utf16_string();
token_type = TokenType::PrivateIdentifier;
m_parsed_identifiers->identifiers.set(*identifier);
@ -778,7 +768,7 @@ Token Lexer::next()
} else if (auto code_point = is_identifier_start(identifier_length); code_point.has_value()) {
bool has_escaped_character = false;
// identifier or keyword
StringBuilder builder;
StringBuilder builder(StringBuilder::Mode::UTF16);
do {
builder.append_code_point(*code_point);
for (size_t i = 0; i < identifier_length; ++i)
@ -789,7 +779,7 @@ Token Lexer::next()
code_point = is_identifier_middle(identifier_length);
} while (code_point.has_value());
identifier = builder.to_string_without_validation();
identifier = builder.to_utf16_string();
m_parsed_identifiers->identifiers.set(*identifier);
auto it = s_keywords.find(identifier->hash(), [&](auto& entry) { return entry.key == identifier; });
@ -800,65 +790,65 @@ Token Lexer::next()
} else if (is_numeric_literal_start()) {
token_type = TokenType::NumericLiteral;
bool is_invalid_numeric_literal = false;
if (m_current_char == '0') {
if (m_current_code_unit == '0') {
consume();
if (m_current_char == '.') {
if (m_current_code_unit == '.') {
// decimal
consume();
while (is_ascii_digit(m_current_char))
while (is_ascii_digit(m_current_code_unit))
consume();
if (m_current_char == 'e' || m_current_char == 'E')
if (m_current_code_unit == 'e' || m_current_code_unit == 'E')
is_invalid_numeric_literal = !consume_exponent();
} else if (m_current_char == 'e' || m_current_char == 'E') {
} else if (m_current_code_unit == 'e' || m_current_code_unit == 'E') {
is_invalid_numeric_literal = !consume_exponent();
} else if (m_current_char == 'o' || m_current_char == 'O') {
} else if (m_current_code_unit == 'o' || m_current_code_unit == 'O') {
// octal
is_invalid_numeric_literal = !consume_octal_number();
if (m_current_char == 'n') {
if (m_current_code_unit == 'n') {
consume();
token_type = TokenType::BigIntLiteral;
}
} else if (m_current_char == 'b' || m_current_char == 'B') {
} else if (m_current_code_unit == 'b' || m_current_code_unit == 'B') {
// binary
is_invalid_numeric_literal = !consume_binary_number();
if (m_current_char == 'n') {
if (m_current_code_unit == 'n') {
consume();
token_type = TokenType::BigIntLiteral;
}
} else if (m_current_char == 'x' || m_current_char == 'X') {
} else if (m_current_code_unit == 'x' || m_current_code_unit == 'X') {
// hexadecimal
is_invalid_numeric_literal = !consume_hexadecimal_number();
if (m_current_char == 'n') {
if (m_current_code_unit == 'n') {
consume();
token_type = TokenType::BigIntLiteral;
}
} else if (m_current_char == 'n') {
} else if (m_current_code_unit == 'n') {
consume();
token_type = TokenType::BigIntLiteral;
} else if (is_ascii_digit(m_current_char)) {
} else if (is_ascii_digit(m_current_code_unit)) {
// octal without '0o' prefix. Forbidden in 'strict mode'
do {
consume();
} while (is_ascii_digit(m_current_char));
} while (is_ascii_digit(m_current_code_unit));
}
} else {
// 1...9 or period
while (is_ascii_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_ascii_digit))
while (is_ascii_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_ascii_digit))
consume();
if (m_current_char == 'n') {
if (m_current_code_unit == 'n') {
consume();
token_type = TokenType::BigIntLiteral;
} else {
if (m_current_char == '.') {
if (m_current_code_unit == '.') {
consume();
if (m_current_char == '_')
if (m_current_code_unit == '_')
is_invalid_numeric_literal = true;
while (is_ascii_digit(m_current_char) || match_numeric_literal_separator_followed_by(is_ascii_digit)) {
while (is_ascii_digit(m_current_code_unit) || match_numeric_literal_separator_followed_by(is_ascii_digit)) {
consume();
}
}
if (m_current_char == 'e' || m_current_char == 'E')
if (m_current_code_unit == 'e' || m_current_code_unit == 'E')
is_invalid_numeric_literal = is_invalid_numeric_literal || !consume_exponent();
}
}
@ -866,26 +856,26 @@ Token Lexer::next()
token_type = TokenType::Invalid;
token_message = "Invalid numeric literal"sv;
}
} else if (m_current_char == '"' || m_current_char == '\'') {
char stop_char = m_current_char;
} else if (m_current_code_unit == '"' || m_current_code_unit == '\'') {
auto stop_char = m_current_code_unit;
consume();
// Note: LS/PS line terminators are allowed in string literals.
while (m_current_char != stop_char && m_current_char != '\r' && m_current_char != '\n' && !is_eof()) {
if (m_current_char == '\\') {
while (m_current_code_unit != stop_char && m_current_code_unit != '\r' && m_current_code_unit != '\n' && !is_eof()) {
if (m_current_code_unit == '\\') {
consume();
if (m_current_char == '\r' && m_position < m_source.length() && m_source[m_position] == '\n') {
if (m_current_code_unit == '\r' && m_position < m_source.length_in_code_units() && m_source.code_unit_at(m_position) == '\n') {
consume();
}
}
consume();
}
if (m_current_char != stop_char) {
if (m_current_code_unit != stop_char) {
token_type = TokenType::UnterminatedStringLiteral;
} else {
consume();
token_type = TokenType::StringLiteral;
}
} else if (m_current_char == '/' && !slash_means_division()) {
} else if (m_current_code_unit == '/' && !slash_means_division()) {
consume();
token_type = consume_regex_literal();
} else if (m_eof) {
@ -896,55 +886,53 @@ Token Lexer::next()
token_type = TokenType::Eof;
}
} else {
bool found_token = false;
// There is only one four-char operator: >>>=
bool found_four_char_token = false;
if (match('>', '>', '>', '=')) {
found_four_char_token = true;
consume();
consume();
consume();
consume();
found_token = true;
token_type = TokenType::UnsignedShiftRightEquals;
consume();
consume();
consume();
consume();
}
bool found_three_char_token = false;
if (!found_four_char_token && m_position + 1 < m_source.length()) {
if (!found_token && m_position + 1 < m_source.length_in_code_units()) {
auto three_chars_view = m_source.substring_view(m_position - 1, 3);
if (auto type = parse_three_char_token(three_chars_view); type != TokenType::Invalid) {
found_three_char_token = true;
consume();
consume();
consume();
found_token = true;
token_type = type;
consume();
consume();
consume();
}
}
bool found_two_char_token = false;
if (!found_four_char_token && !found_three_char_token && m_position < m_source.length()) {
if (!found_token && m_position < m_source.length_in_code_units()) {
auto two_chars_view = m_source.substring_view(m_position - 1, 2);
if (auto type = parse_two_char_token(two_chars_view); type != TokenType::Invalid) {
// OptionalChainingPunctuator :: ?. [lookahead ∉ DecimalDigit]
if (!(type == TokenType::QuestionMarkPeriod && m_position + 1 < m_source.length() && is_ascii_digit(m_source[m_position + 1]))) {
found_two_char_token = true;
consume();
consume();
if (!(type == TokenType::QuestionMarkPeriod && m_position + 1 < m_source.length_in_code_units() && is_ascii_digit(m_source.code_unit_at(m_position + 1)))) {
found_token = true;
token_type = type;
consume();
consume();
}
}
}
bool found_one_char_token = false;
if (!found_four_char_token && !found_three_char_token && !found_two_char_token) {
if (auto type = s_single_char_tokens[static_cast<u8>(m_current_char)]; type != TokenType::Invalid) {
found_one_char_token = true;
consume();
if (!found_token && is_ascii(m_current_code_unit)) {
if (auto type = s_single_char_tokens[static_cast<u8>(m_current_code_unit)]; type != TokenType::Invalid) {
found_token = true;
token_type = type;
consume();
}
}
if (!found_four_char_token && !found_three_char_token && !found_two_char_token && !found_one_char_token) {
consume();
if (!found_token) {
token_type = TokenType::Invalid;
consume();
}
}
@ -956,27 +944,14 @@ Token Lexer::next()
}
}
if (m_hit_invalid_unicode.has_value()) {
value_start = m_hit_invalid_unicode.value() - 1;
m_current_token = Token(TokenType::Invalid, "Invalid unicode codepoint in source"_string,
""sv, // Since the invalid unicode can occur anywhere in the current token the trivia is not correct
m_source.substring_view(value_start + 1, min(4u, m_source.length() - value_start - 2)),
m_line_number,
m_line_column - 1,
value_start + 1);
m_hit_invalid_unicode.clear();
// Do not produce any further tokens.
VERIFY(is_eof());
} else {
m_current_token = Token(
token_type,
token_message,
m_source.substring_view(trivia_start - 1, value_start - trivia_start),
m_source.substring_view(value_start - 1, m_position - value_start),
value_start_line_number,
value_start_column_number,
value_start - 1);
}
m_current_token = Token(
token_type,
token_message,
m_source.substring_view(trivia_start - 1, value_start - trivia_start),
m_source.substring_view(value_start - 1, m_position - value_start),
value_start_line_number,
value_start_column_number,
value_start - 1);
if (identifier.has_value())
m_current_token.set_identifier_value(identifier.release_value());
@ -1003,10 +978,10 @@ Token Lexer::force_slash_as_regex()
size_t value_start = m_position - 1;
if (has_equals) {
VERIFY(m_source[value_start - 1] == '=');
VERIFY(m_source.code_unit_at(value_start - 1) == '=');
--value_start;
--m_position;
m_current_char = '=';
m_current_code_unit = '=';
}
TokenType token_type = consume_regex_literal();
@ -1035,13 +1010,14 @@ Token Lexer::force_slash_as_regex()
TokenType Lexer::consume_regex_literal()
{
while (!is_eof()) {
if (is_line_terminator() || (!m_regex_is_in_character_class && m_current_char == '/')) {
if (is_line_terminator() || (!m_regex_is_in_character_class && m_current_code_unit == '/'))
break;
} else if (m_current_char == '[') {
if (m_current_code_unit == '[') {
m_regex_is_in_character_class = true;
} else if (m_current_char == ']') {
} else if (m_current_code_unit == ']') {
m_regex_is_in_character_class = false;
} else if (!m_regex_is_in_character_class && m_current_char == '/') {
} else if (!m_regex_is_in_character_class && m_current_code_unit == '/') {
break;
}
@ -1050,7 +1026,7 @@ TokenType Lexer::consume_regex_literal()
consume();
}
if (m_current_char == '/') {
if (m_current_code_unit == '/') {
consume();
return TokenType::RegexLiteral;
}
@ -1063,8 +1039,8 @@ bool is_syntax_character(u32 code_point)
{
// SyntaxCharacter :: one of
// ^ $ \ . * + ? ( ) [ ] { } |
static constexpr Utf8View syntax_characters { "^$\\.*+?()[]{}|"sv };
return syntax_characters.contains(code_point);
static constexpr auto syntax_characters = "^$\\.*+?()[]{}|"sv;
return is_ascii(code_point) && syntax_characters.contains(static_cast<char>(code_point));
}
// https://tc39.es/ecma262/#prod-WhiteSpace

View file

@ -6,23 +6,22 @@
#pragma once
#include "Token.h"
#include <AK/ByteString.h>
#include <AK/HashMap.h>
#include <AK/String.h>
#include <AK/StringView.h>
#include <AK/Utf16String.h>
#include <LibJS/Export.h>
#include <LibJS/Token.h>
namespace JS {
class JS_API Lexer {
public:
explicit Lexer(StringView source, StringView filename = "(unknown)"sv, size_t line_number = 1, size_t line_column = 0);
explicit Lexer(Utf16String source, StringView filename = "(unknown)"sv, size_t line_number = 1, size_t line_column = 0);
Token next();
ByteString const& source() const { return m_source; }
Utf16String const& source() const { return m_source; }
String const& filename() const { return m_filename; }
void disallow_html_comments() { m_allow_html_comments = false; }
@ -37,7 +36,6 @@ private:
bool consume_binary_number();
bool consume_decimal_number();
bool is_unicode_character() const;
u32 current_code_point() const;
bool is_eof() const;
@ -50,19 +48,19 @@ private:
bool is_block_comment_start() const;
bool is_block_comment_end() const;
bool is_numeric_literal_start() const;
bool match(char, char) const;
bool match(char, char, char) const;
bool match(char, char, char, char) const;
bool match(char16_t, char16_t) const;
bool match(char16_t, char16_t, char16_t) const;
bool match(char16_t, char16_t, char16_t, char16_t) const;
template<typename Callback>
bool match_numeric_literal_separator_followed_by(Callback) const;
bool slash_means_division() const;
TokenType consume_regex_literal();
ByteString m_source;
Utf16String m_source;
size_t m_position { 0 };
Token m_current_token;
char m_current_char { 0 };
char16_t m_current_code_unit { 0 };
bool m_eof { false };
String m_filename;
@ -79,14 +77,12 @@ private:
bool m_allow_html_comments { true };
Optional<size_t> m_hit_invalid_unicode;
static HashMap<FlyString, TokenType> s_keywords;
static HashMap<Utf16FlyString, TokenType> s_keywords;
struct ParsedIdentifiers : public RefCounted<ParsedIdentifiers> {
// Resolved identifiers must be kept alive for the duration of the parsing stage, otherwise
// the only references to these strings are deleted by the Token destructor.
HashTable<FlyString> identifiers;
HashTable<Utf16FlyString> identifiers;
};
RefPtr<ParsedIdentifiers> m_parsed_identifiers;

View file

@ -11,7 +11,6 @@
#include <AK/Array.h>
#include <AK/CharacterTypes.h>
#include <AK/HashTable.h>
#include <AK/ScopeGuard.h>
#include <AK/StdLibExtras.h>
#include <AK/TemporaryChange.h>
@ -682,7 +681,7 @@ Parser::ParserState::ParserState(Lexer l, Program::Type program_type)
}
Parser::Parser(Lexer lexer, Program::Type program_type, Optional<EvalInitialState> initial_state_for_eval)
: m_source_code(SourceCode::create(lexer.filename(), String::from_byte_string(lexer.source()).release_value_but_fixme_should_propagate_errors()))
: m_source_code(SourceCode::create(lexer.filename(), lexer.source()))
, m_state(move(lexer), program_type)
, m_program_type(program_type)
{
@ -752,7 +751,7 @@ bool Parser::parse_directive(ScopeNode& body)
if (!is<StringLiteral>(expression))
break;
if (raw_value.is_one_of("'use strict'"sv, "\"use strict\"")) {
if (raw_value.is_one_of("'use strict'"sv, "\"use strict\""sv)) {
found_use_strict = true;
if (m_state.string_legacy_octal_escape_sequence_in_scope)
@ -993,6 +992,13 @@ static bool is_strict_reserved_word(StringView str)
});
}
static bool is_strict_reserved_word(Utf16View const& str)
{
return any_of(strict_reserved_words, [&str](StringView word) {
return word == str;
});
}
static bool is_simple_parameter_list(FunctionParameters const& parameters)
{
return all_of(parameters.parameters(), [](FunctionParameter const& parameter) {
@ -1084,7 +1090,7 @@ RefPtr<FunctionExpression const> Parser::try_parse_arrow_function_expression(boo
syntax_error("BindingIdentifier may not be 'arguments' or 'eval' in strict mode"_string);
if (is_async && token.value() == "await"sv)
syntax_error("'await' is a reserved identifier in async functions"_string);
auto identifier = create_ast_node<Identifier const>({ m_source_code, rule_start.position(), position() }, token.fly_string_value());
auto identifier = create_ast_node<Identifier const>({ m_source_code, rule_start.position(), position() }, token.value().to_utf8_but_should_be_ported_to_utf16());
parameters = FunctionParameters::create(Vector<FunctionParameter> { FunctionParameter { identifier, {} } });
}
@ -1159,10 +1165,12 @@ RefPtr<FunctionExpression const> Parser::try_parse_arrow_function_expression(boo
}
auto function_start_offset = rule_start.position().offset;
auto function_end_offset = position().offset - m_state.current_token.trivia().length();
auto source_text = ByteString { m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset) };
auto function_end_offset = position().offset - m_state.current_token.trivia().length_in_code_units();
auto source_text = m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset);
return create_ast_node<FunctionExpression>(
{ m_source_code, rule_start.position(), position() }, nullptr, move(source_text),
{ m_source_code, rule_start.position(), position() }, nullptr, MUST(source_text.to_byte_string()),
move(body), move(parameters), function_length, function_kind, body->in_strict_mode(),
parsing_insights, move(local_variables_names), /* is_arrow_function */ true);
}
@ -1251,7 +1259,7 @@ RefPtr<LabelledStatement const> Parser::try_parse_labelled_statement(AllowLabell
m_state.labels_in_scope.remove(identifier);
return create_ast_node<LabelledStatement>({ m_source_code, rule_start.position(), position() }, identifier, labelled_item.release_nonnull());
return create_ast_node<LabelledStatement>({ m_source_code, rule_start.position(), position() }, identifier.view().to_utf8_but_should_be_ported_to_utf16(), labelled_item.release_nonnull());
}
RefPtr<MetaProperty const> Parser::try_parse_new_target_expression()
@ -1356,7 +1364,7 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
Vector<NonnullRefPtr<ClassElement const>> elements;
RefPtr<Expression const> super_class;
RefPtr<FunctionExpression const> constructor;
HashTable<FlyString> found_private_names;
HashTable<Utf16FlyString> found_private_names;
RefPtr<Identifier const> class_name;
if (expect_class_name || match_identifier() || match(TokenType::Yield) || match(TokenType::Await)) {
@ -1395,8 +1403,8 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
consume(TokenType::CurlyOpen);
HashTable<FlyString> referenced_private_names;
HashTable<FlyString>* outer_referenced_private_names = m_state.referenced_private_names;
HashTable<Utf16FlyString> referenced_private_names;
HashTable<Utf16FlyString>* outer_referenced_private_names = m_state.referenced_private_names;
m_state.referenced_private_names = &referenced_private_names;
ScopeGuard restore_private_name_table = [&] {
m_state.referenced_private_names = outer_referenced_private_names;
@ -1433,7 +1441,7 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
is_generator = true;
}
FlyString name;
Utf16FlyString name;
if (match_property_key() || match(TokenType::PrivateIdentifier)) {
if (!is_generator && !is_async && m_state.current_token.original_value() == "static"sv) {
if (match(TokenType::Identifier)) {
@ -1467,11 +1475,11 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
switch (m_state.current_token.type()) {
case TokenType::Identifier:
name = consume().fly_string_value();
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, name.to_string());
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, name.view().to_utf8_but_should_be_ported_to_utf16());
break;
case TokenType::PrivateIdentifier:
name = consume().fly_string_value();
if (name == "#constructor")
if (name == "#constructor"sv)
syntax_error("Private property with name '#constructor' is not allowed"_string);
if (method_kind != ClassMethod::Kind::Method) {
@ -1504,11 +1512,11 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
syntax_error(MUST(String::formatted("Duplicate private field or method named '{}'", name)));
}
property_key = create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, name);
property_key = create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, name.view().to_utf8_but_should_be_ported_to_utf16());
break;
case TokenType::StringLiteral: {
auto string_literal = parse_string_literal(consume());
name = string_literal->value();
name = Utf16FlyString::from_utf8(string_literal->value());
property_key = move(string_literal);
break;
}
@ -1526,24 +1534,25 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
switch (method_kind) {
case ClassMethod::Kind::Method:
if (is_async) {
name = "async"_fly_string;
name = "async"_utf16_fly_string;
is_async = false;
} else {
VERIFY(is_static);
name = "static"_fly_string;
name = "static"_utf16_fly_string;
is_static = false;
}
break;
case ClassMethod::Kind::Getter:
name = "get"_fly_string;
name = "get"_utf16_fly_string;
method_kind = ClassMethod::Kind::Method;
break;
case ClassMethod::Kind::Setter:
name = "set"_fly_string;
name = "set"_utf16_fly_string;
method_kind = ClassMethod::Kind::Method;
break;
}
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, name.to_string());
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, name.view().to_utf8_but_should_be_ported_to_utf16());
} else if (match(TokenType::CurlyOpen) && is_static) {
auto static_start = push_start();
consume(TokenType::CurlyOpen);
@ -1688,10 +1697,11 @@ NonnullRefPtr<ClassExpression const> Parser::parse_class_expression(bool expect_
}
auto function_start_offset = rule_start.position().offset;
auto function_end_offset = position().offset - m_state.current_token.trivia().length();
auto source_text = ByteString { m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset) };
auto function_end_offset = position().offset - m_state.current_token.trivia().length_in_code_units();
return create_ast_node<ClassExpression>({ m_source_code, rule_start.position(), position() }, move(class_name), move(source_text), move(constructor), move(super_class), move(elements));
auto source_text = m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset);
return create_ast_node<ClassExpression>({ m_source_code, rule_start.position(), position() }, move(class_name), MUST(source_text.to_byte_string()), move(constructor), move(super_class), move(elements));
}
Parser::PrimaryExpressionParseResult Parser::parse_primary_expression()
@ -1759,14 +1769,14 @@ Parser::PrimaryExpressionParseResult Parser::parse_primary_expression()
auto string = m_state.current_token.value();
// This could be 'eval' or 'arguments' and thus needs a custom check (`eval[1] = true`)
if (m_state.strict_mode && (string == "let" || is_strict_reserved_word(string)))
if (m_state.strict_mode && (string == "let"sv || is_strict_reserved_word(string)))
syntax_error(MUST(String::formatted("Identifier must not be a reserved word in strict mode ('{}')", string)));
return { parse_identifier() };
}
case TokenType::NumericLiteral:
return { create_ast_node<NumericLiteral>({ m_source_code, rule_start.position(), position() }, consume_and_validate_numeric_literal().double_value()) };
case TokenType::BigIntLiteral:
return { create_ast_node<BigIntLiteral>({ m_source_code, rule_start.position(), position() }, consume().value()) };
return { create_ast_node<BigIntLiteral>({ m_source_code, rule_start.position(), position() }, MUST(consume().value().to_byte_string())) };
case TokenType::BoolLiteral:
return { create_ast_node<BooleanLiteral>({ m_source_code, rule_start.position(), position() }, consume_and_allow_division().bool_value()) };
case TokenType::StringLiteral:
@ -1842,7 +1852,7 @@ Parser::PrimaryExpressionParseResult Parser::parse_primary_expression()
syntax_error(MUST(String::formatted("Reference to undeclared private field or method '{}'", m_state.current_token.value())));
if (next_token().type() != TokenType::In)
syntax_error("Cannot have a private identifier in expression if not followed by 'in'"_string);
return { create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, consume().fly_string_value()) };
return { create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, consume().value().to_utf8_but_should_be_ported_to_utf16()) };
default:
if (match_identifier_name())
goto read_as_identifier;
@ -1856,7 +1866,7 @@ Parser::PrimaryExpressionParseResult Parser::parse_primary_expression()
NonnullRefPtr<RegExpLiteral const> Parser::parse_regexp_literal()
{
auto rule_start = push_start();
auto pattern = consume().fly_string_value().to_string();
auto pattern = consume().value().to_utf8_but_should_be_ported_to_utf16();
// Remove leading and trailing slash.
pattern = MUST(pattern.substring_from_byte_offset(1, pattern.bytes().size() - 2));
@ -1865,7 +1875,7 @@ NonnullRefPtr<RegExpLiteral const> Parser::parse_regexp_literal()
if (match(TokenType::RegexFlags)) {
auto flags_start = position();
flags = consume().fly_string_value().to_string();
flags = consume().value().to_utf8_but_should_be_ported_to_utf16();
auto parsed_flags_or_error = regex_flags_from_string(flags);
if (parsed_flags_or_error.is_error())
@ -1996,7 +2006,7 @@ NonnullRefPtr<Expression const> Parser::parse_property_key()
} else if (match(TokenType::NumericLiteral)) {
return create_ast_node<NumericLiteral>({ m_source_code, rule_start.position(), position() }, consume().double_value());
} else if (match(TokenType::BigIntLiteral)) {
return create_ast_node<BigIntLiteral>({ m_source_code, rule_start.position(), position() }, consume().value());
return create_ast_node<BigIntLiteral>({ m_source_code, rule_start.position(), position() }, MUST(consume().value().to_byte_string()));
} else if (match(TokenType::BracketOpen)) {
consume(TokenType::BracketOpen);
auto result = parse_expression(2);
@ -2005,7 +2015,7 @@ NonnullRefPtr<Expression const> Parser::parse_property_key()
} else {
if (!match_identifier_name())
expected("IdentifierName");
return create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, consume().fly_string_value().to_string());
return create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, consume().value().to_utf8_but_should_be_ported_to_utf16());
}
}
@ -2073,7 +2083,7 @@ NonnullRefPtr<ObjectExpression const> Parser::parse_object_expression()
property_type = ObjectProperty::Type::Setter;
property_key = parse_property_key();
} else {
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, identifier.fly_string_value().to_string());
property_key = create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, identifier.value().to_utf8_but_should_be_ported_to_utf16());
property_value = create_identifier_and_register_in_current_scope({ m_source_code, rule_start.position(), position() }, identifier.fly_string_value());
}
} else {
@ -2230,7 +2240,7 @@ NonnullRefPtr<StringLiteral const> Parser::parse_string_literal(Token const& tok
}
}
return create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, MUST(String::from_byte_string(string)));
return create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, string.to_utf8_but_should_be_ported_to_utf16());
}
NonnullRefPtr<TemplateLiteral const> Parser::parse_template_literal(bool is_tagged)
@ -2264,7 +2274,7 @@ NonnullRefPtr<TemplateLiteral const> Parser::parse_template_literal(bool is_tagg
else
expressions.append(move(parsed_string_value));
if (is_tagged)
raw_strings.append(create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, MUST(String::from_byte_string(token.raw_template_value()))));
raw_strings.append(create_ast_node<StringLiteral>({ m_source_code, rule_start.position(), position() }, token.raw_template_value().to_utf8_but_should_be_ported_to_utf16()));
} else if (match(TokenType::TemplateLiteralExprStart)) {
consume(TokenType::TemplateLiteralExprStart);
if (match(TokenType::TemplateLiteralExprEnd)) {
@ -2482,12 +2492,12 @@ Parser::ExpressionResult Parser::parse_secondary_expression(NonnullRefPtr<Expres
else if (is<SuperExpression>(*lhs))
syntax_error(MUST(String::formatted("Cannot access private field or method '{}' on super", m_state.current_token.value())));
return create_ast_node<MemberExpression>({ m_source_code, rule_start.position(), position() }, move(lhs), create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, consume().fly_string_value().to_string()));
return create_ast_node<MemberExpression>({ m_source_code, rule_start.position(), position() }, move(lhs), create_ast_node<PrivateIdentifier>({ m_source_code, rule_start.position(), position() }, consume().value().to_utf8_but_should_be_ported_to_utf16()));
} else if (!match_identifier_name()) {
expected("IdentifierName");
}
return create_ast_node<MemberExpression>({ m_source_code, rule_start.position(), position() }, move(lhs), create_ast_node<Identifier>({ m_source_code, rule_start.position(), position() }, consume_and_allow_division().fly_string_value()));
return create_ast_node<MemberExpression>({ m_source_code, rule_start.position(), position() }, move(lhs), create_ast_node<Identifier>({ m_source_code, rule_start.position(), position() }, consume_and_allow_division().value().to_utf8_but_should_be_ported_to_utf16()));
case TokenType::BracketOpen: {
consume(TokenType::BracketOpen);
auto expression = create_ast_node<MemberExpression>({ m_source_code, rule_start.position(), position() }, move(lhs), parse_expression(0), true);
@ -2584,7 +2594,8 @@ RefPtr<BindingPattern const> Parser::synthesize_binding_pattern(Expression const
auto source_start_offset = expression.source_range().start.offset;
auto source_end_offset = expression.source_range().end.offset;
auto source = m_state.lexer.source().substring_view(source_start_offset, source_end_offset - source_start_offset);
Lexer lexer { source, m_state.lexer.filename(), expression.source_range().start.line, expression.source_range().start.column };
Lexer lexer { Utf16String::from_utf16(source), m_state.lexer.filename(), expression.source_range().start.line, expression.source_range().start.column };
Parser parser { lexer };
parser.m_state.current_scope_pusher = m_state.current_scope_pusher;
@ -3016,8 +3027,9 @@ NonnullRefPtr<FunctionNodeType> Parser::parse_function_node(u16 parse_options, O
check_identifier_name_for_assignment_validity(name->string(), true);
auto function_start_offset = rule_start.position().offset;
auto function_end_offset = position().offset - m_state.current_token.trivia().length();
auto source_text = ByteString { m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset) };
auto function_end_offset = position().offset - m_state.current_token.trivia().length_in_code_units();
auto source_text = m_state.lexer.source().substring_view(function_start_offset, function_end_offset - function_start_offset);
parsing_insights.might_need_arguments_object = m_state.function_might_need_arguments_object;
if (parse_options & FunctionNodeParseOptions::IsConstructor) {
parsing_insights.uses_this = true;
@ -3025,7 +3037,7 @@ NonnullRefPtr<FunctionNodeType> Parser::parse_function_node(u16 parse_options, O
}
return create_ast_node<FunctionNodeType>(
{ m_source_code, rule_start.position(), position() },
name, move(source_text), move(body), parameters.release_nonnull(), function_length,
name, MUST(source_text.to_byte_string()), move(body), parameters.release_nonnull(), function_length,
function_kind, has_strict_directive, parsing_insights,
move(local_variables_names));
}
@ -3079,7 +3091,7 @@ NonnullRefPtr<FunctionParameters const> Parser::parse_formal_parameters(int& fun
syntax_error(message, Position { token.line_number(), token.line_column() });
break;
}
return create_ast_node<Identifier const>({ m_source_code, rule_start.position(), position() }, token.fly_string_value());
return create_ast_node<Identifier const>({ m_source_code, rule_start.position(), position() }, token.value().to_utf8_but_should_be_ported_to_utf16());
};
while (match(TokenType::CurlyOpen) || match(TokenType::BracketOpen) || match_identifier() || match(TokenType::TripleDot)) {
@ -3190,8 +3202,8 @@ RefPtr<BindingPattern const> Parser::parse_binding_pattern(Parser::AllowDuplicat
name = create_identifier_and_register_in_current_scope({ m_source_code, rule_start.position(), position() }, string_literal->value());
} else if (match(TokenType::BigIntLiteral)) {
auto string_value = consume().fly_string_value();
VERIFY(string_value.bytes_as_string_view().ends_with("n"sv));
name = create_identifier_and_register_in_current_scope({ m_source_code, rule_start.position(), position() }, FlyString(MUST(string_value.to_string().substring_from_byte_offset(0, string_value.bytes().size() - 1))));
VERIFY(string_value.view().ends_with('n'));
name = create_identifier_and_register_in_current_scope({ m_source_code, rule_start.position(), position() }, Utf16FlyString::from_utf16(string_value.view().substring_view(0, string_value.length_in_code_units() - 1)));
} else {
name = create_identifier_and_register_in_current_scope({ m_source_code, rule_start.position(), position() }, consume().fly_string_value());
}
@ -3508,7 +3520,7 @@ NonnullRefPtr<BreakStatement const> Parser::parse_break_statement()
{
auto rule_start = push_start();
consume(TokenType::Break);
Optional<FlyString> target_label;
Optional<Utf16FlyString> target_label;
if (match(TokenType::Semicolon)) {
consume();
} else {
@ -3525,7 +3537,8 @@ NonnullRefPtr<BreakStatement const> Parser::parse_break_statement()
if (!target_label.has_value() && !m_state.in_break_context)
syntax_error("Unlabeled 'break' not allowed outside of a loop or switch statement"_string);
return create_ast_node<BreakStatement>({ m_source_code, rule_start.position(), position() }, target_label);
auto utf8_target_label = target_label.map([](auto const& label) -> FlyString { return label.view().to_utf8_but_should_be_ported_to_utf16(); });
return create_ast_node<BreakStatement>({ m_source_code, rule_start.position(), position() }, move(utf8_target_label));
}
NonnullRefPtr<ContinueStatement const> Parser::parse_continue_statement()
@ -3535,11 +3548,12 @@ NonnullRefPtr<ContinueStatement const> Parser::parse_continue_statement()
syntax_error("'continue' not allow outside of a loop"_string);
consume(TokenType::Continue);
Optional<FlyString> target_label;
Optional<Utf16FlyString> target_label;
if (match(TokenType::Semicolon)) {
consume();
return create_ast_node<ContinueStatement>({ m_source_code, rule_start.position(), position() }, target_label);
return create_ast_node<ContinueStatement>({ m_source_code, rule_start.position(), position() }, OptionalNone {});
}
if (!m_state.current_token.trivia_contains_line_terminator() && match_identifier()) {
auto label_position = position();
target_label = consume().fly_string_value();
@ -3551,7 +3565,9 @@ NonnullRefPtr<ContinueStatement const> Parser::parse_continue_statement()
label->value = label_position;
}
consume_or_insert_semicolon();
return create_ast_node<ContinueStatement>({ m_source_code, rule_start.position(), position() }, target_label);
auto utf8_target_label = target_label.map([](auto const& label) -> FlyString { return label.view().to_utf8_but_should_be_ported_to_utf16(); });
return create_ast_node<ContinueStatement>({ m_source_code, rule_start.position(), position() }, move(utf8_target_label));
}
NonnullRefPtr<ConditionalExpression const> Parser::parse_conditional_expression(NonnullRefPtr<Expression const> test, ForbiddenTokens forbidden)
@ -3587,7 +3603,7 @@ NonnullRefPtr<OptionalChain const> Parser::parse_optional_chain(NonnullRefPtr<Ex
auto start = position();
auto private_identifier = consume();
chain.append(OptionalChain::PrivateMemberReference {
create_ast_node<PrivateIdentifier>({ m_source_code, start, position() }, private_identifier.fly_string_value()),
create_ast_node<PrivateIdentifier>({ m_source_code, start, position() }, private_identifier.value().to_utf8_but_should_be_ported_to_utf16()),
OptionalChain::Mode::Optional });
break;
}
@ -3604,7 +3620,7 @@ NonnullRefPtr<OptionalChain const> Parser::parse_optional_chain(NonnullRefPtr<Ex
auto start = position();
auto identifier = consume_and_allow_division();
chain.append(OptionalChain::MemberReference {
create_ast_node<Identifier>({ m_source_code, start, position() }, identifier.fly_string_value()),
create_ast_node<Identifier>({ m_source_code, start, position() }, identifier.value().to_utf8_but_should_be_ported_to_utf16()),
OptionalChain::Mode::Optional,
});
} else {
@ -3623,14 +3639,14 @@ NonnullRefPtr<OptionalChain const> Parser::parse_optional_chain(NonnullRefPtr<Ex
auto start = position();
auto private_identifier = consume();
chain.append(OptionalChain::PrivateMemberReference {
create_ast_node<PrivateIdentifier>({ m_source_code, start, position() }, private_identifier.fly_string_value()),
create_ast_node<PrivateIdentifier>({ m_source_code, start, position() }, private_identifier.value().to_utf8_but_should_be_ported_to_utf16()),
OptionalChain::Mode::NotOptional,
});
} else if (match_identifier_name()) {
auto start = position();
auto identifier = consume_and_allow_division();
chain.append(OptionalChain::MemberReference {
create_ast_node<Identifier>({ m_source_code, start, position() }, identifier.fly_string_value()),
create_ast_node<Identifier>({ m_source_code, start, position() }, identifier.value().to_utf8_but_should_be_ported_to_utf16()),
OptionalChain::Mode::NotOptional,
});
} else {
@ -4503,8 +4519,8 @@ Token Parser::consume(TokenType expected_type)
Token Parser::consume_and_validate_numeric_literal()
{
auto is_unprefixed_octal_number = [](StringView value) {
return value.length() > 1 && value[0] == '0' && is_ascii_digit(value[1]);
auto is_unprefixed_octal_number = [](Utf16View const& value) {
return value.length_in_code_units() > 1 && value.code_unit_at(0) == '0' && is_ascii_digit(value.code_unit_at(1));
};
auto literal_start = position();
auto token = consume(TokenType::NumericLiteral);
@ -4582,6 +4598,19 @@ void Parser::check_identifier_name_for_assignment_validity(FlyString const& name
}
}
void Parser::check_identifier_name_for_assignment_validity(Utf16FlyString const& name, bool force_strict)
{
// FIXME: this is now called from multiple places maybe the error message should be dynamic?
if (any_of(s_reserved_words, [&](auto& value) { return name == value; })) {
syntax_error("Binding pattern target may not be a reserved word"_string);
} else if (m_state.strict_mode || force_strict) {
if (name.is_one_of("arguments"sv, "eval"sv))
syntax_error("Binding pattern target may not be called 'arguments' or 'eval' in strict mode"_string);
else if (is_strict_reserved_word(name))
syntax_error(MUST(String::formatted("Binding pattern target may not be called '{}' in strict mode", name)));
}
}
FlyString Parser::consume_string_value()
{
VERIFY(match(TokenType::StringLiteral));
@ -4628,7 +4657,7 @@ ModuleRequest Parser::parse_module_request()
key = parse_string_literal(m_state.current_token)->value();
consume();
} else if (match_identifier_name()) {
key = consume().fly_string_value().to_string();
key = consume().value().to_utf8_but_should_be_ported_to_utf16();
} else {
expected("IdentifierName or StringValue as WithKey");
consume();
@ -4705,7 +4734,7 @@ NonnullRefPtr<ImportStatement const> Parser::parse_import_statement(Program& pro
if (match_imported_binding()) {
// ImportedDefaultBinding : ImportedBinding
auto id_position = position();
auto bound_name = consume().fly_string_value();
auto bound_name = consume().value().to_utf8_but_should_be_ported_to_utf16();
entries_with_location.append({ { default_string_value, bound_name }, id_position });
if (match(TokenType::Comma)) {
@ -4728,7 +4757,7 @@ NonnullRefPtr<ImportStatement const> Parser::parse_import_statement(Program& pro
if (match_imported_binding()) {
auto namespace_position = position();
auto namespace_name = consume().fly_string_value();
auto namespace_name = consume().value().to_utf8_but_should_be_ported_to_utf16();
entries_with_location.append({ ImportEntry({}, namespace_name), namespace_position });
} else {
syntax_error(MUST(String::formatted("Unexpected token: {}", m_state.current_token.name())));
@ -4744,13 +4773,13 @@ NonnullRefPtr<ImportStatement const> Parser::parse_import_statement(Program& pro
// ImportSpecifier : ImportedBinding
auto require_as = !match_imported_binding();
auto name_position = position();
auto name = consume().fly_string_value();
auto name = consume().value().to_utf8_but_should_be_ported_to_utf16();
if (match_as()) {
consume(TokenType::Identifier);
auto alias_position = position();
auto alias = consume_identifier().fly_string_value();
auto alias = consume_identifier().value().to_utf8_but_should_be_ported_to_utf16();
check_identifier_name_for_assignment_validity(alias);
entries_with_location.append({ { name, alias }, alias_position });
@ -4771,7 +4800,7 @@ NonnullRefPtr<ImportStatement const> Parser::parse_import_statement(Program& pro
consume(TokenType::Identifier);
auto alias_position = position();
auto alias = consume_identifier().fly_string_value();
auto alias = consume_identifier().value().to_utf8_but_should_be_ported_to_utf16();
check_identifier_name_for_assignment_validity(alias);
entries_with_location.append({ { move(name), alias }, alias_position });
@ -4983,7 +5012,7 @@ NonnullRefPtr<ExportStatement const> Parser::parse_export_statement(Program& pro
// IdentifierName
// StringLiteral
if (match_identifier_name()) {
return consume().fly_string_value();
return consume().value().to_utf8_but_should_be_ported_to_utf16();
}
if (match(TokenType::StringLiteral)) {
// It is a Syntax Error if ReferencedBindings of NamedExports contains any StringLiterals.
@ -5204,12 +5233,17 @@ template NonnullRefPtr<FunctionDeclaration> Parser::parse_function_node(u16, Opt
NonnullRefPtr<Identifier const> Parser::create_identifier_and_register_in_current_scope(SourceRange range, FlyString string, Optional<DeclarationKind> declaration_kind)
{
auto id = create_ast_node<Identifier const>(range, string);
auto id = create_ast_node<Identifier const>(move(range), move(string));
if (m_state.current_scope_pusher)
m_state.current_scope_pusher->register_identifier(const_cast<Identifier&>(*id), declaration_kind);
return id;
}
NonnullRefPtr<Identifier const> Parser::create_identifier_and_register_in_current_scope(SourceRange range, Utf16FlyString const& string, Optional<DeclarationKind> declaration_kind)
{
return create_identifier_and_register_in_current_scope(move(range), string.view().to_utf8_but_should_be_ported_to_utf16(), declaration_kind);
}
Parser Parser::parse_function_body_from_string(ByteString const& body_string, u16 parse_options, NonnullRefPtr<FunctionParameters const> parameters, FunctionKind kind, FunctionParsingInsights& parsing_insights)
{
RefPtr<FunctionBody const> function_body;

View file

@ -11,16 +11,13 @@
#include <AK/Assertions.h>
#include <AK/HashTable.h>
#include <AK/NonnullRefPtr.h>
#include <AK/StringBuilder.h>
#include <LibJS/AST.h>
#include <LibJS/Export.h>
#include <LibJS/Lexer.h>
#include <LibJS/ParserError.h>
#include <LibJS/Runtime/FunctionConstructor.h>
#include <LibJS/SourceRange.h>
#include <LibJS/Token.h>
#include <initializer_list>
#include <stdio.h>
namespace JS {
@ -194,17 +191,6 @@ public:
bool has_errors() const { return m_state.errors.size(); }
Vector<ParserError> const& errors() const { return m_state.errors; }
void print_errors(bool print_hint = true) const
{
for (auto& error : m_state.errors) {
if (print_hint) {
auto hint = error.source_location_hint(m_state.lexer.source());
if (!hint.is_empty())
warnln("{}", hint);
}
warnln("SyntaxError: {}", error.to_byte_string());
}
}
struct TokenMemoization {
bool try_parse_arrow_function_expression_failed;
@ -260,6 +246,7 @@ private:
Token next_token() const;
void check_identifier_name_for_assignment_validity(FlyString const&, bool force_strict = false);
void check_identifier_name_for_assignment_validity(Utf16FlyString const&, bool force_strict = false);
bool try_parse_arrow_function_expression_failed_at_position(Position const&) const;
void set_try_parse_arrow_function_expression_failed_at_position(Position const&, bool);
@ -308,9 +295,9 @@ private:
Vector<ParserError> errors;
ScopePusher* current_scope_pusher { nullptr };
HashMap<FlyString, Optional<Position>> labels_in_scope;
HashMap<Utf16FlyString, Optional<Position>> labels_in_scope;
HashMap<size_t, Position> invalid_property_range_in_object_expression;
HashTable<FlyString>* referenced_private_names { nullptr };
HashTable<Utf16FlyString>* referenced_private_names { nullptr };
bool strict_mode { false };
bool allow_super_property_lookup { false };
@ -333,6 +320,7 @@ private:
};
[[nodiscard]] NonnullRefPtr<Identifier const> create_identifier_and_register_in_current_scope(SourceRange range, FlyString string, Optional<DeclarationKind> = {});
[[nodiscard]] NonnullRefPtr<Identifier const> create_identifier_and_register_in_current_scope(SourceRange range, Utf16FlyString const& string, Optional<DeclarationKind> = {});
NonnullRefPtr<SourceCode const> m_source_code;
Vector<Position> m_rule_starts;

View file

@ -26,13 +26,15 @@ ByteString ParserError::to_byte_string() const
return ByteString::formatted("{} (line: {}, column: {})", message, position.value().line, position.value().column);
}
ByteString ParserError::source_location_hint(StringView source, char const spacer, char const indicator) const
ByteString ParserError::source_location_hint(Utf16View const& source, char spacer, char indicator) const
{
if (!position.has_value())
return {};
// We need to modify the source to match what the lexer considers one line - normalizing
// line terminators to \n is easier than splitting using all different LT characters.
ByteString source_string = source.replace("\r\n"sv, "\n"sv, ReplaceMode::All).replace("\r"sv, "\n"sv, ReplaceMode::All).replace(LINE_SEPARATOR_STRING, "\n"sv, ReplaceMode::All).replace(PARAGRAPH_SEPARATOR_STRING, "\n"sv, ReplaceMode::All);
auto source_string = source.replace("\r\n"sv, "\n"sv, ReplaceMode::All).replace("\r"sv, "\n"sv, ReplaceMode::All).replace(LINE_SEPARATOR, "\n"sv, ReplaceMode::All).replace(PARAGRAPH_SEPARATOR, "\n"sv, ReplaceMode::All);
StringBuilder builder;
builder.append(source_string.split_view('\n', SplitBehavior::KeepEmpty)[position.value().line - 1]);
builder.append('\n');

View file

@ -22,7 +22,7 @@ struct JS_API ParserError {
String to_string() const;
ByteString to_byte_string() const;
ByteString source_location_hint(StringView source, char const spacer = ' ', char const indicator = '^') const;
ByteString source_location_hint(Utf16View const& source, char spacer = ' ', char indicator = '^') const;
};
}

View file

@ -17,7 +17,7 @@ namespace JS {
GC_DEFINE_ALLOCATOR(Error);
static SourceRange dummy_source_range { SourceCode::create(String {}, String {}), {}, {} };
static SourceRange dummy_source_range { SourceCode::create({}, {}), {}, {} };
SourceRange const& TracebackFrame::source_range() const
{

View file

@ -12,27 +12,17 @@
namespace JS {
NonnullRefPtr<SourceCode const> SourceCode::create(String filename, String code)
NonnullRefPtr<SourceCode const> SourceCode::create(String filename, Utf16String code)
{
return adopt_ref(*new SourceCode(move(filename), move(code)));
}
SourceCode::SourceCode(String filename, String code)
SourceCode::SourceCode(String filename, Utf16String code)
: m_filename(move(filename))
, m_code(move(code))
{
}
String const& SourceCode::filename() const
{
return m_filename;
}
String const& SourceCode::code() const
{
return m_code;
}
void SourceCode::fill_position_cache() const
{
constexpr size_t predicted_minimum_cached_positions = 8;
@ -46,22 +36,24 @@ void SourceCode::fill_position_cache() const
size_t line = 1;
size_t column = 1;
size_t offset_of_last_starting_point = 0;
m_cached_positions.ensure_capacity(predicted_minimum_cached_positions + m_code.bytes().size() / maximum_distance_between_cached_positions);
m_cached_positions.ensure_capacity(predicted_minimum_cached_positions + (m_code.length_in_code_units() / maximum_distance_between_cached_positions));
m_cached_positions.append({ .line = 1, .column = 1, .offset = 0 });
Utf8View const view(m_code);
auto view = m_code.utf16_view();
for (auto it = view.begin(); it != view.end(); ++it) {
u32 code_point = *it;
bool is_line_terminator = code_point == '\r' || (code_point == '\n' && previous_code_point != '\r') || code_point == LINE_SEPARATOR || code_point == PARAGRAPH_SEPARATOR;
auto byte_offset = view.byte_offset_of(it);
auto offset = view.iterator_offset(it);
bool is_nonempty_line = is_line_terminator && previous_code_point != '\n' && previous_code_point != LINE_SEPARATOR && previous_code_point != PARAGRAPH_SEPARATOR && (code_point == '\n' || previous_code_point != '\r');
auto distance_between_cached_position = byte_offset - offset_of_last_starting_point;
auto distance_between_cached_position = offset - offset_of_last_starting_point;
if ((distance_between_cached_position >= minimum_distance_between_cached_positions && is_nonempty_line) || distance_between_cached_position >= maximum_distance_between_cached_positions) {
m_cached_positions.append({ .line = line, .column = column, .offset = byte_offset });
offset_of_last_starting_point = byte_offset;
m_cached_positions.append({ .line = line, .column = column, .offset = offset });
offset_of_last_starting_point = offset;
}
if (is_line_terminator) {
@ -102,11 +94,11 @@ SourceRange SourceCode::range_from_offsets(u32 start_offset, u32 end_offset) con
u32 previous_code_point = 0;
Utf8View const view(m_code);
for (auto it = view.iterator_at_byte_offset_without_validation(current.offset); it != view.end(); ++it) {
auto view = m_code.utf16_view();
for (auto it = view.iterator_at_code_unit_offset(current.offset); it != view.end(); ++it) {
// If we're on or after the start offset, this is the start position.
if (!start.has_value() && view.byte_offset_of(it) >= start_offset) {
if (!start.has_value() && view.iterator_offset(it) >= start_offset) {
start = Position {
.line = current.line,
.column = current.column,
@ -115,7 +107,7 @@ SourceRange SourceCode::range_from_offsets(u32 start_offset, u32 end_offset) con
}
// If we're on or after the end offset, this is the end position.
if (!end.has_value() && view.byte_offset_of(it) >= end_offset) {
if (!end.has_value() && view.iterator_offset(it) >= end_offset) {
end = Position {
.line = current.line,
.column = current.column,
@ -134,6 +126,7 @@ SourceRange SourceCode::range_from_offsets(u32 start_offset, u32 end_offset) con
current.column = 1;
continue;
}
current.column += 1;
}

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/String.h>
#include <AK/Utf16String.h>
#include <AK/Vector.h>
#include <LibJS/Export.h>
#include <LibJS/Forward.h>
@ -16,18 +17,18 @@ namespace JS {
class JS_API SourceCode : public RefCounted<SourceCode> {
public:
static NonnullRefPtr<SourceCode const> create(String filename, String code);
static NonnullRefPtr<SourceCode const> create(String filename, Utf16String code);
String const& filename() const;
String const& code() const;
String const& filename() const { return m_filename; }
Utf16String const& code() const { return m_code; }
SourceRange range_from_offsets(u32 start_offset, u32 end_offset) const;
private:
SourceCode(String filename, String code);
SourceCode(String filename, Utf16String code);
String m_filename;
String m_code;
Utf16String m_code;
// For fast mapping of offsets to line/column numbers, we build a list of
// starting points (with byte offsets into the source string) and which

View file

@ -67,7 +67,7 @@ void SyntaxHighlighter::rehighlight(Palette const& palette)
position.set_column(position.column() + 1);
};
auto append_token = [&](Utf8View str, Token const& token, bool is_trivia) {
auto append_token = [&](Utf16View const& str, Token const& token, bool is_trivia) {
if (str.is_empty())
return;
@ -100,10 +100,10 @@ void SyntaxHighlighter::rehighlight(Palette const& palette)
bool was_eof = false;
for (auto token = lexer.next(); !was_eof; token = lexer.next()) {
append_token(Utf8View(token.trivia()), token, true);
append_token(token.trivia(), token, true);
auto token_start_position = position;
append_token(Utf8View(token.value()), token, false);
append_token(token.value(), token, false);
if (token.type() == TokenType::Eof)
was_eof = true;

View file

@ -5,11 +5,11 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "Token.h"
#include <AK/Assertions.h>
#include <AK/CharacterTypes.h>
#include <AK/GenericLexer.h>
#include <AK/StringBuilder.h>
#include <LibJS/Token.h>
namespace JS {
@ -55,16 +55,16 @@ double Token::double_value() const
VERIFY(type() == TokenType::NumericLiteral);
auto value = this->value();
ByteString buffer;
Utf16String buffer;
if (value.contains('_')) {
buffer = value.replace("_"sv, {}, ReplaceMode::All);
value = buffer;
}
if (value.length() >= 2 && value.starts_with('0')) {
if (value.length_in_code_units() >= 2 && value.starts_with('0')) {
static constexpr auto fallback = NumericLimits<u64>::max();
auto next = value[1];
auto next = value.code_unit_at(1);
// hexadecimal
if (next == 'x' || next == 'X')
@ -79,7 +79,7 @@ double Token::double_value() const
return static_cast<double>(value.substring_view(2).to_number<u64>(TrimWhitespace::No, 2).value_or(fallback));
// also octal, but syntax error in strict mode
if (is_ascii_digit(next) && (!value.contains('8') && !value.contains('9')))
if (is_ascii_digit(next) && !value.contains_any_of({ { '8', '9' } }))
return static_cast<double>(value.substring_view(1).to_number<u64>(TrimWhitespace::No, 8).value_or(fallback));
}
@ -87,23 +87,25 @@ double Token::double_value() const
return value.to_number<double>(TrimWhitespace::No).value();
}
ByteString Token::string_value(StringValueStatus& status) const
Utf16String Token::string_value(StringValueStatus& status) const
{
VERIFY(type() == TokenType::StringLiteral || type() == TokenType::TemplateLiteralString);
auto is_template = type() == TokenType::TemplateLiteralString;
GenericLexer lexer(is_template ? value() : value().substring_view(1, value().length() - 2));
auto value = this->value();
auto encoding_failure = [&status](StringValueStatus parse_status) -> ByteString {
Utf16GenericLexer lexer(is_template ? value : value.substring_view(1, value.length_in_code_units() - 2));
auto encoding_failure = [&status](StringValueStatus parse_status) -> Utf16String {
status = parse_status;
return {};
};
StringBuilder builder;
StringBuilder builder(StringBuilder::Mode::UTF16);
while (!lexer.is_eof()) {
// No escape, consume one char and continue
if (!lexer.next_is('\\')) {
if (is_template && lexer.next_is('\r')) {
lexer.ignore();
if (lexer.next_is('\n'))
@ -113,7 +115,7 @@ ByteString Token::string_value(StringValueStatus& status) const
continue;
}
builder.append(lexer.consume());
builder.append_code_unit(lexer.consume());
continue;
}
@ -145,8 +147,8 @@ ByteString Token::string_value(StringValueStatus& status) const
continue;
}
// Line continuation
if (lexer.next_is(LINE_SEPARATOR_STRING) || lexer.next_is(PARAGRAPH_SEPARATOR_STRING)) {
lexer.ignore(3);
if (lexer.next_is(LINE_SEPARATOR) || lexer.next_is(PARAGRAPH_SEPARATOR)) {
lexer.ignore();
continue;
}
// Null-byte escape
@ -170,11 +172,11 @@ ByteString Token::string_value(StringValueStatus& status) const
// In non-strict mode LegacyOctalEscapeSequence is allowed in strings:
// https://tc39.es/ecma262/#sec-additional-syntax-string-literals
Optional<ByteString> octal_str;
Optional<Utf16View> octal_str;
auto is_octal_digit = [](char ch) { return ch >= '0' && ch <= '7'; };
auto is_zero_to_three = [](char ch) { return ch >= '0' && ch <= '3'; };
auto is_four_to_seven = [](char ch) { return ch >= '4' && ch <= '7'; };
auto is_octal_digit = [](auto ch) { return ch >= '0' && ch <= '7'; };
auto is_zero_to_three = [](auto ch) { return ch >= '0' && ch <= '3'; };
auto is_four_to_seven = [](auto ch) { return ch >= '4' && ch <= '7'; };
// OctalDigit [lookahead ∉ OctalDigit]
if (is_octal_digit(lexer.peek()) && !is_octal_digit(lexer.peek(1)))
@ -201,18 +203,19 @@ ByteString Token::string_value(StringValueStatus& status) const
if (lexer.next_is('8') || lexer.next_is('9')) {
status = StringValueStatus::LegacyOctalEscapeSequence;
builder.append(lexer.consume());
builder.append_code_unit(lexer.consume());
continue;
}
lexer.retreat();
builder.append(lexer.consume_escaped_character('\\', "b\bf\fn\nr\rt\tv\v"sv));
builder.append_code_unit(lexer.consume_escaped_character('\\', "b\bf\fn\nr\rt\tv\v"sv));
}
return builder.to_byte_string();
return builder.to_utf16_string();
}
// 12.8.6.2 Static Semantics: TRV, https://tc39.es/ecma262/#sec-static-semantics-trv
ByteString Token::raw_template_value() const
Utf16String Token::raw_template_value() const
{
return value().replace("\r\n"sv, "\n"sv, ReplaceMode::All).replace("\r"sv, "\n"sv, ReplaceMode::All);
}
@ -220,7 +223,7 @@ ByteString Token::raw_template_value() const
bool Token::bool_value() const
{
VERIFY(type() == TokenType::BoolLiteral);
return value() == "true";
return value() == "true"sv;
}
bool Token::is_identifier_name() const
@ -273,7 +276,7 @@ bool Token::is_identifier_name() const
bool Token::trivia_contains_line_terminator() const
{
return m_trivia.contains('\n') || m_trivia.contains('\r') || m_trivia.contains(LINE_SEPARATOR_STRING) || m_trivia.contains(PARAGRAPH_SEPARATOR_STRING);
return m_trivia.contains('\n') || m_trivia.contains('\r') || m_trivia.contains(LINE_SEPARATOR) || m_trivia.contains(PARAGRAPH_SEPARATOR);
}
}

View file

@ -6,34 +6,30 @@
#pragma once
#include <AK/FlyString.h>
#include <AK/String.h>
#include <AK/StringView.h>
#include <AK/Utf16FlyString.h>
#include <AK/Utf16String.h>
#include <AK/Variant.h>
namespace JS {
// U+2028 LINE SEPARATOR
constexpr char const line_separator_chars[] { (char)0xe2, (char)0x80, (char)0xa8, 0 };
constexpr StringView const LINE_SEPARATOR_STRING { line_separator_chars, sizeof(line_separator_chars) - 1 };
constexpr u32 const LINE_SEPARATOR { 0x2028 };
// U+2029 PARAGRAPH SEPARATOR
constexpr char const paragraph_separator_chars[] { (char)0xe2, (char)0x80, (char)0xa9, 0 };
constexpr StringView const PARAGRAPH_SEPARATOR_STRING { paragraph_separator_chars, sizeof(paragraph_separator_chars) - 1 };
constexpr u32 const PARAGRAPH_SEPARATOR { 0x2029 };
// U+00A0 NO BREAK SPACE
constexpr u32 const NO_BREAK_SPACE { 0x00A0 };
constexpr inline char16_t const NO_BREAK_SPACE { 0x00A0 };
// U+200C ZERO WIDTH NON-JOINER
constexpr u32 const ZERO_WIDTH_NON_JOINER { 0x200C };
// U+FEFF ZERO WIDTH NO-BREAK SPACE
constexpr u32 const ZERO_WIDTH_NO_BREAK_SPACE { 0xFEFF };
constexpr inline char16_t const ZERO_WIDTH_NON_JOINER { 0x200C };
// U+200D ZERO WIDTH JOINER
constexpr u32 const ZERO_WIDTH_JOINER { 0x200D };
constexpr inline char16_t const ZERO_WIDTH_JOINER { 0x200D };
// U+2028 LINE SEPARATOR
constexpr inline char16_t const LINE_SEPARATOR { 0x2028 };
// U+2029 PARAGRAPH SEPARATOR
constexpr inline char16_t const PARAGRAPH_SEPARATOR { 0x2029 };
// U+FEFF ZERO WIDTH NO-BREAK SPACE
constexpr inline char16_t const ZERO_WIDTH_NO_BREAK_SPACE { 0xFEFF };
#define ENUMERATE_JS_TOKENS \
__ENUMERATE_JS_TOKEN(Ampersand, Operator) \
@ -183,7 +179,7 @@ class Token {
public:
Token() = default;
Token(TokenType type, StringView message, StringView trivia, StringView value, size_t line_number, size_t line_column, size_t offset)
Token(TokenType type, StringView message, Utf16View const& trivia, Utf16View const& value, size_t line_number, size_t line_column, size_t offset)
: m_type(type)
, m_message(message)
, m_trivia(trivia)
@ -202,22 +198,23 @@ public:
static char const* name(TokenType);
StringView message() const { return m_message; }
StringView trivia() const { return m_trivia; }
StringView original_value() const { return m_original_value; }
StringView value() const
Utf16View const& trivia() const { return m_trivia; }
Utf16View const& original_value() const { return m_original_value; }
Utf16View value() const
{
return m_value.visit(
[](StringView view) { return view; },
[](FlyString const& identifier) { return identifier.bytes_as_string_view(); },
[](Empty) -> StringView { VERIFY_NOT_REACHED(); });
[](Utf16View const& view) { return view; },
[](Utf16FlyString const& identifier) { return identifier.view(); },
[](Empty) -> Utf16View { VERIFY_NOT_REACHED(); });
}
FlyString fly_string_value() const
Utf16FlyString fly_string_value() const
{
return m_value.visit(
[](StringView view) -> FlyString { return MUST(FlyString::from_utf8(view)); },
[](FlyString const& identifier) -> FlyString { return identifier; },
[](Empty) -> FlyString { VERIFY_NOT_REACHED(); });
[](Utf16View const& view) { return Utf16FlyString::from_utf16(view); },
[](Utf16FlyString const& identifier) { return identifier; },
[](Empty) -> Utf16FlyString { VERIFY_NOT_REACHED(); });
}
size_t line_number() const { return m_line_number; }
@ -233,10 +230,10 @@ public:
UnicodeEscapeOverflow,
LegacyOctalEscapeSequence,
};
ByteString string_value(StringValueStatus& status) const;
ByteString raw_template_value() const;
Utf16String string_value(StringValueStatus& status) const;
Utf16String raw_template_value() const;
void set_identifier_value(FlyString value)
void set_identifier_value(Utf16FlyString value)
{
m_value = move(value);
}
@ -247,9 +244,9 @@ public:
private:
TokenType m_type { TokenType::Invalid };
StringView m_message;
StringView m_trivia;
StringView m_original_value;
Variant<Empty, StringView, FlyString> m_value {};
Utf16View m_trivia;
Utf16View m_original_value;
Variant<Empty, Utf16View, Utf16FlyString> m_value;
size_t m_line_number { 0 };
size_t m_line_column { 0 };
size_t m_offset { 0 };

View file

@ -219,7 +219,7 @@ inline AK::Result<GC::Ref<JS::Script>, ParserError> parse_script(StringView path
if (script_or_errors.is_error()) {
auto errors = script_or_errors.release_error();
return ParserError { errors[0], errors[0].source_location_hint(contents) };
return ParserError { errors[0], errors[0].source_location_hint(Utf16String::from_utf8(contents)) };
}
return script_or_errors.release_value();
@ -232,7 +232,7 @@ inline AK::Result<GC::Ref<JS::SourceTextModule>, ParserError> parse_module(Strin
if (script_or_errors.is_error()) {
auto errors = script_or_errors.release_error();
return ParserError { errors[0], errors[0].source_location_hint(contents) };
return ParserError { errors[0], errors[0].source_location_hint(Utf16String::from_utf8(contents)) };
}
return script_or_errors.release_value();

View file

@ -199,8 +199,10 @@ static ErrorOr<bool> parse_and_run(JS::Realm& realm, StringView source, StringVi
if (!s_as_module) {
auto script_or_error = JS::Script::parse(source, realm, source_name);
if (script_or_error.is_error()) {
auto utf16_source = Utf16String::from_utf8(source);
auto error = script_or_error.error()[0];
auto hint = error.source_location_hint(source);
auto hint = error.source_location_hint(utf16_source);
if (!hint.is_empty())
outln("{}", hint);
@ -213,8 +215,10 @@ static ErrorOr<bool> parse_and_run(JS::Realm& realm, StringView source, StringVi
} else {
auto module_or_error = JS::SourceTextModule::parse(source, realm, source_name);
if (module_or_error.is_error()) {
auto utf16_source = Utf16String::from_utf8(source);
auto error = module_or_error.error()[0];
auto hint = error.source_location_hint(source);
auto hint = error.source_location_hint(utf16_source);
if (!hint.is_empty())
outln("{}", hint);
@ -619,7 +623,7 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
JS::Lexer lexer(line);
bool indenters_starting_line = true;
for (JS::Token token = lexer.next(); token.type() != JS::TokenType::Eof; token = lexer.next()) {
auto length = Utf8View { token.value() }.length();
auto length = token.value().length_in_code_units();
auto start = token.offset();
auto end = start + length;
if (indenters_starting_line) {
@ -680,8 +684,8 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
CompleteProperty,
} mode { Initial };
FlyString variable_name;
FlyString property_name;
Utf16FlyString variable_name;
Utf16FlyString property_name;
// we're only going to complete either
// - <N>
@ -727,11 +731,11 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
}
}
bool last_token_has_trivia = js_token.trivia().length() > 0;
bool last_token_has_trivia = !js_token.trivia().is_empty();
if (mode == CompleteNullProperty) {
mode = CompleteProperty;
property_name = ""_fly_string;
property_name = Utf16FlyString {};
last_token_has_trivia = false; // <name> <dot> [tab] is sensible to complete.
}
@ -740,17 +744,18 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
Vector<Line::CompletionSuggestion> results;
Function<void(JS::Shape const&, StringView)> list_all_properties = [&results, &list_all_properties](JS::Shape const& shape, auto property_pattern) {
Function<void(JS::Shape const&, Utf16FlyString const&)> list_all_properties = [&results, &list_all_properties](JS::Shape const& shape, Utf16FlyString const& property_pattern) {
for (auto const& descriptor : shape.property_table()) {
if (!descriptor.key.is_string())
continue;
auto key = descriptor.key.as_string().to_utf16_string().to_utf8_but_should_be_ported_to_utf16();
if (key.bytes_as_string_view().starts_with(property_pattern)) {
Line::CompletionSuggestion completion { key, Line::CompletionSuggestion::ForSearch };
auto key = descriptor.key.as_string().to_utf16_string();
if (key.starts_with(property_pattern.view())) {
Line::CompletionSuggestion completion { key.to_utf8_but_should_be_ported_to_utf16(), Line::CompletionSuggestion::ForSearch };
if (!results.contains_slow(completion)) { // hide duplicates
results.append(key.to_byte_string());
results.last().invariant_offset = property_pattern.length();
results.last().invariant_offset = property_pattern.length_in_code_units();
}
}
}
@ -759,11 +764,9 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
}
};
auto variable_name_utf16 = Utf16FlyString::from_utf8(variable_name);
switch (mode) {
case CompleteProperty: {
auto reference_or_error = g_vm->resolve_binding(variable_name_utf16, &global_environment);
auto reference_or_error = g_vm->resolve_binding(variable_name, &global_environment);
if (reference_or_error.is_error())
return {};
auto value_or_error = reference_or_error.value().get_value(*g_vm);
@ -785,9 +788,9 @@ static ErrorOr<int> run_repl(bool gc_on_every_allocation, bool syntax_highlight)
list_all_properties(variable.shape(), variable_name);
for (auto const& name : global_environment.declarative_record().bindings()) {
if (name.view().starts_with(variable_name_utf16.view())) {
results.empend(name.view().to_utf8_but_should_be_ported_to_utf16().to_byte_string());
results.last().invariant_offset = variable_name.bytes().size();
if (name.view().starts_with(variable_name.view())) {
results.empend(MUST(name.view().to_byte_string()));
results.last().invariant_offset = variable_name.length_in_code_units();
}
}