ladybird/Userland/Libraries/LibSQL/AST/Select.cpp
Timothy Flynn 72e41a7dbd LibSQL: Support 64-bit integer values and handle overflow errors
Currently, integers are stored in LibSQL as 32-bit signed integers, even
if the provided type is unsigned. This resulted in a series of unchecked
unsigned-to-signed conversions, and prevented storing 64-bit values.
Further, mathematical operations were performed without similar checks,
and without checking for overflow.

This changes SQL::Value to behave like SQLite for INTEGER types. In
SQLite, the INTEGER type does not imply a size or signedness of the
underlying type. Instead, SQLite determines on-the-fly what type is
needed as values are created and updated.

To do so, the SQL::Value variant can now hold an i64 or u64 integer. If
a specific type is requested, invalid conversions are now explictly an
error (e.g. converting a stored -1 to a u64 will fail). When binary
mathematical operations are performed, we now try to coerce the RHS
value to a type that works with the LHS value, failing the operation if
that isn't possible. Any overflow or invalid operation (e.g. bitshifting
a 64-bit value by more than 64 bytes) is an error.
2022-12-14 09:21:30 -05:00

146 lines
5 KiB
C++

/*
* Copyright (c) 2021, Jan de Visser <jan@de-visser.net>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/NumericLimits.h>
#include <LibSQL/AST/AST.h>
#include <LibSQL/Database.h>
#include <LibSQL/Meta.h>
#include <LibSQL/Row.h>
namespace SQL::AST {
ResultOr<ResultSet> Select::execute(ExecutionContext& context) const
{
NonnullRefPtrVector<ResultColumn> columns;
auto const& result_column_list = this->result_column_list();
VERIFY(!result_column_list.is_empty());
for (auto& table_descriptor : table_or_subquery_list()) {
if (!table_descriptor.is_table())
return Result { SQLCommand::Select, SQLErrorCode::NotYetImplemented, "Sub-selects are not yet implemented"sv };
auto table_def = TRY(context.database->get_table(table_descriptor.schema_name(), table_descriptor.table_name()));
if (result_column_list.size() == 1 && result_column_list[0].type() == ResultType::All) {
for (auto& col : table_def->columns()) {
columns.append(
create_ast_node<ResultColumn>(
create_ast_node<ColumnNameExpression>(table_def->parent()->name(), table_def->name(), col.name()),
""));
}
}
}
if (result_column_list.size() != 1 || result_column_list[0].type() != ResultType::All) {
for (auto& col : result_column_list) {
if (col.type() == ResultType::All) {
// FIXME can have '*' for example in conjunction with computed columns
return Result { SQLCommand::Select, SQLErrorCode::SyntaxError, "*"sv };
}
columns.append(col);
}
}
ResultSet result { SQLCommand::Select };
auto descriptor = adopt_ref(*new TupleDescriptor);
Tuple tuple(descriptor);
Vector<Tuple> rows;
descriptor->empend("__unity__"sv);
tuple.append(Value { true });
rows.append(tuple);
for (auto& table_descriptor : table_or_subquery_list()) {
if (!table_descriptor.is_table())
return Result { SQLCommand::Select, SQLErrorCode::NotYetImplemented, "Sub-selects are not yet implemented"sv };
auto table_def = TRY(context.database->get_table(table_descriptor.schema_name(), table_descriptor.table_name()));
if (table_def->num_columns() == 0)
continue;
auto old_descriptor_size = descriptor->size();
descriptor->extend(table_def->to_tuple_descriptor());
while (!rows.is_empty() && (rows.first().size() == old_descriptor_size)) {
auto cartesian_row = rows.take_first();
auto table_rows = TRY(context.database->select_all(*table_def));
for (auto& table_row : table_rows) {
auto new_row = cartesian_row;
new_row.extend(table_row);
rows.append(new_row);
}
}
}
bool has_ordering { false };
auto sort_descriptor = adopt_ref(*new TupleDescriptor);
for (auto& term : m_ordering_term_list) {
sort_descriptor->append(TupleElementDescriptor { .order = term.order() });
has_ordering = true;
}
Tuple sort_key(sort_descriptor);
for (auto& row : rows) {
context.current_row = &row;
if (where_clause()) {
auto where_result = TRY(where_clause()->evaluate(context)).to_bool();
if (!where_result.has_value() || !where_result.value())
continue;
}
tuple.clear();
for (auto& col : columns) {
auto value = TRY(col.expression()->evaluate(context));
tuple.append(value);
}
if (has_ordering) {
sort_key.clear();
for (auto& term : m_ordering_term_list) {
auto value = TRY(term.expression()->evaluate(context));
sort_key.append(value);
}
}
result.insert_row(tuple, sort_key);
}
if (m_limit_clause != nullptr) {
size_t limit_value = NumericLimits<size_t>::max();
size_t offset_value = 0;
auto limit = TRY(m_limit_clause->limit_expression()->evaluate(context));
if (!limit.is_null()) {
auto limit_value_maybe = limit.to_int<size_t>();
if (!limit_value_maybe.has_value())
return Result { SQLCommand::Select, SQLErrorCode::SyntaxError, "LIMIT clause must evaluate to an integer value"sv };
limit_value = limit_value_maybe.value();
}
if (m_limit_clause->offset_expression() != nullptr) {
auto offset = TRY(m_limit_clause->offset_expression()->evaluate(context));
if (!offset.is_null()) {
auto offset_value_maybe = offset.to_int<size_t>();
if (!offset_value_maybe.has_value())
return Result { SQLCommand::Select, SQLErrorCode::SyntaxError, "OFFSET clause must evaluate to an integer value"sv };
offset_value = offset_value_maybe.value();
}
}
result.limit(offset_value, limit_value);
}
return result;
}
}