mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-04-20 19:45:12 +00:00
AK/Lagom: Modify TestSuite to return how many tests failed from main
This allows us to remove the FAIL_REGEX logic from the CTest invocation of AK and LibRegex tests, as they will return a non-zero exit code on failure :^). Also means that running a failing TestSuite-enabled test with the run-test-and-shutdown script will actually print that the test failed.
This commit is contained in:
parent
5b1edc0678
commit
669b6c43aa
Notes:
sideshowbarker
2024-07-18 21:49:57 +09:00
Author: https://github.com/ADKaster Commit: https://github.com/SerenityOS/serenity/commit/669b6c43aac Pull-request: https://github.com/SerenityOS/serenity/pull/5566
2 changed files with 43 additions and 28 deletions
|
@ -34,6 +34,10 @@ namespace AK {
|
|||
template<typename... Parameters>
|
||||
void warnln(CheckedFormatString<Parameters...>&& fmtstr, const Parameters&...);
|
||||
|
||||
// Declare a helper so that we can call it from VERIFY in included headers
|
||||
// before defining TestSuite
|
||||
inline void current_test_case_did_fail();
|
||||
|
||||
}
|
||||
|
||||
using AK::warnln;
|
||||
|
@ -41,8 +45,10 @@ using AK::warnln;
|
|||
#undef VERIFY
|
||||
#define VERIFY(x) \
|
||||
do { \
|
||||
if (!(x)) \
|
||||
if (!(x)) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY({}) failed", __FILE__, __LINE__, #x); \
|
||||
current_test_case_did_fail(); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#undef VERIFY_NOT_REACHED
|
||||
|
@ -127,23 +133,28 @@ public:
|
|||
s_global = nullptr;
|
||||
}
|
||||
|
||||
void run(const NonnullRefPtrVector<TestCase>&);
|
||||
void main(const String& suite_name, int argc, char** argv);
|
||||
int run(const NonnullRefPtrVector<TestCase>&);
|
||||
int main(const String& suite_name, int argc, char** argv);
|
||||
NonnullRefPtrVector<TestCase> find_cases(const String& search, bool find_tests, bool find_benchmarks);
|
||||
void add_case(const NonnullRefPtr<TestCase>& test_case)
|
||||
{
|
||||
m_cases.append(test_case);
|
||||
}
|
||||
|
||||
void current_test_case_did_fail() { m_current_test_case_passed = false; }
|
||||
|
||||
private:
|
||||
static TestSuite* s_global;
|
||||
NonnullRefPtrVector<TestCase> m_cases;
|
||||
u64 m_testtime = 0;
|
||||
u64 m_benchtime = 0;
|
||||
String m_suite_name;
|
||||
bool m_current_test_case_passed = true;
|
||||
};
|
||||
|
||||
void TestSuite::main(const String& suite_name, int argc, char** argv)
|
||||
inline void current_test_case_did_fail() { TestSuite::the().current_test_case_did_fail(); }
|
||||
|
||||
int TestSuite::main(const String& suite_name, int argc, char** argv)
|
||||
{
|
||||
m_suite_name = suite_name;
|
||||
|
||||
|
@ -167,11 +178,12 @@ void TestSuite::main(const String& suite_name, int argc, char** argv)
|
|||
for (const auto& test : matching_tests) {
|
||||
outln(" {}", test.name());
|
||||
}
|
||||
} else {
|
||||
outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size());
|
||||
|
||||
run(matching_tests);
|
||||
return 0;
|
||||
}
|
||||
|
||||
outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size());
|
||||
|
||||
return run(matching_tests);
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool find_tests, bool find_benchmarks)
|
||||
|
@ -194,9 +206,10 @@ NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool f
|
|||
return matches;
|
||||
}
|
||||
|
||||
void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
|
||||
int TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
|
||||
{
|
||||
size_t test_count = 0;
|
||||
size_t test_failed_count = 0;
|
||||
size_t benchmark_count = 0;
|
||||
TestElapsedTimer global_timer;
|
||||
|
||||
|
@ -204,12 +217,13 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
|
|||
const auto test_type = t.is_benchmark() ? "benchmark" : "test";
|
||||
|
||||
warnln("Running {} '{}'.", test_type, t.name());
|
||||
m_current_test_case_passed = true;
|
||||
|
||||
TestElapsedTimer timer;
|
||||
t.func()();
|
||||
const auto time = timer.elapsed_milliseconds();
|
||||
|
||||
dbgln("Completed {} '{}' in {}ms", test_type, t.name(), time);
|
||||
dbgln("{} {} '{}' in {}ms", m_current_test_case_passed ? "Completed" : "Failed", test_type, t.name(), time);
|
||||
|
||||
if (t.is_benchmark()) {
|
||||
m_benchtime += time;
|
||||
|
@ -218,6 +232,10 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
|
|||
m_testtime += time;
|
||||
test_count++;
|
||||
}
|
||||
|
||||
if (!m_current_test_case_passed) {
|
||||
test_failed_count++;
|
||||
}
|
||||
}
|
||||
|
||||
dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).",
|
||||
|
@ -227,10 +245,14 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
|
|||
m_testtime,
|
||||
m_benchtime,
|
||||
global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime));
|
||||
dbgln("Out of {} tests, {} passed and {} failed.", test_count, test_count - test_failed_count, test_failed_count);
|
||||
|
||||
return (int)test_failed_count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
using AK::current_test_case_did_fail;
|
||||
using AK::TestCase;
|
||||
using AK::TestSuite;
|
||||
|
||||
|
@ -268,16 +290,19 @@ using AK::TestSuite;
|
|||
int main(int argc, char** argv) \
|
||||
{ \
|
||||
static_assert(compiletime_lenof(#x) != 0, "Set SuiteName"); \
|
||||
TestSuite::the().main(#x, argc, argv); \
|
||||
int ret = TestSuite::the().main(#x, argc, argv); \
|
||||
TestSuite::release(); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define EXPECT_EQ(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) \
|
||||
if (lhs != rhs) { \
|
||||
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
|
||||
current_test_case_did_fail(); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// If you're stuck and `EXPECT_EQ` seems to refuse to print anything useful,
|
||||
|
@ -286,12 +311,16 @@ using AK::TestSuite;
|
|||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) \
|
||||
if (lhs != rhs) { \
|
||||
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, lhs, rhs); \
|
||||
current_test_case_did_fail(); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT(x) \
|
||||
do { \
|
||||
if (!(x)) \
|
||||
if (!(x)) { \
|
||||
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT({}) failed", __FILE__, __LINE__, #x); \
|
||||
current_test_case_did_fail(); \
|
||||
} \
|
||||
} while (false)
|
||||
|
|
|
@ -177,13 +177,6 @@ if (BUILD_LAGOM)
|
|||
# FIXME: Only TestJSON needs this property
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../AK/Tests
|
||||
)
|
||||
|
||||
set_tests_properties(
|
||||
${name}_lagom
|
||||
PROPERTIES
|
||||
FAIL_REGULAR_EXPRESSION
|
||||
"FAIL"
|
||||
)
|
||||
endforeach()
|
||||
|
||||
foreach(source ${LIBREGEX_TESTS})
|
||||
|
@ -195,13 +188,6 @@ if (BUILD_LAGOM)
|
|||
COMMAND ${name}_lagom
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
set_tests_properties(
|
||||
${name}_lagom
|
||||
PROPERTIES
|
||||
FAIL_REGULAR_EXPRESSION
|
||||
"FAIL"
|
||||
)
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
|
Loading…
Add table
Reference in a new issue