diff --git a/libudpard/udpard.c b/libudpard/udpard.c index f03c941..5c47607 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -53,7 +53,7 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s /// The number of most recent transfers to keep in the history for duplicate rejection. /// Should be a power of two to allow replacement of modulo operation with a bitwise AND. /// -/// Implementation node: we used to store bitmap windows instead of a full list of recent transfer-IDs, but they +/// Implementation note: we used to store bitmap windows instead of a full list of recent transfer-IDs, but they /// were found to offer no advantage except in the perfect scenario of non-restarting senders, and an increased /// implementation complexity (more branches, more lines of code), so they were replaced with a simple list. /// The list works equally well given a non-contiguous transfer-ID stream, unlike the bitmap, thus more robust. @@ -1584,7 +1584,7 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx rx_slot_destroy(slot_ref, self->port->memory.fragment, self->port->memory.slot); } -/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. Returns NULL of OOM. +/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. Returns NULL on OOM. /// We return a pointer to pointer to allow the caller to NULL out the slot on destruction. static rx_slot_t** rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) { @@ -1705,8 +1705,8 @@ static void rx_port_accept_stateful(udpard_rx_t* const rx, } } -/// The stateless strategy accepts only single-frame transfers and does not maintain any session state. -/// It could be trivially extended to fallback to UNORDERED when multi-frame transfers are detected. +/// The stateless strategy accepts transfers that fit in the first frame after extent truncation. +/// It does not maintain any session state. static void rx_port_accept_stateless(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_us_t timestamp, @@ -1715,6 +1715,8 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, const udpard_deleter_t payload_deleter, const uint_fast8_t iface_index) { + // Stateless subscriptions only care about the prefix up to the configured extent. + // If the first frame already covers that much payload, the rest of the transfer is ignored. const size_t required_size = smaller(port->extent, frame->meta.transfer_payload_size); const bool full_transfer = (frame->base.offset == 0) && (frame->base.payload.size >= required_size); if (full_transfer) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 3a0ca97..61e737f 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -661,9 +661,9 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable); -/// A specialization of udpard_rx_port_new() for scalable stateless subscriptions, where only single-frame transfers -/// are accepted, and no attempt at deduplication is made. This is useful for the heartbeat topic mostly, and perhaps -/// other topics with a great number of publishers and/or very high traffic. +/// A specialization of udpard_rx_port_new() for scalable stateless subscriptions, where only the prefix up to the +/// configured extent is accepted from the first frame, and no attempt at deduplication is made. This is useful for +/// the heartbeat topic mostly, and perhaps other topics with a great number of publishers and/or very high traffic. bool udpard_rx_port_new_stateless(udpard_rx_port_t* const self, const size_t extent, const udpard_rx_mem_resources_t memory, diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index 61a0963..9e8b015 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -118,7 +118,7 @@ void test_subject_roundtrip() TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &rx_vtable)); // Send one multi-frame transfer over two interfaces. - std::vector payload(300U); + std::vector payload(600U); for (std::size_t i = 0; i < payload.size(); i++) { payload[i] = static_cast(i); } @@ -133,7 +133,7 @@ void test_subject_roundtrip() make_scattered(payload.data(), payload.size()), nullptr)); udpard_tx_poll(&tx, 1001, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_TRUE(!frames.empty()); + TEST_ASSERT_TRUE(frames.size() > 1U); // Deliver the first interface copy only. for (const auto& frame : frames) { diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 6904c58..721c349 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -199,7 +199,7 @@ void test_out_of_order_multiframe_reassembly() TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 4096U, rx_mem, &rx_vtable)); // Send a payload that spans multiple frames. - std::vector payload(280U); + std::vector payload(600U); for (std::size_t i = 0; i < payload.size(); i++) { payload[i] = static_cast(i ^ 0x5AU); } @@ -214,7 +214,7 @@ void test_out_of_order_multiframe_reassembly() make_scattered(payload.data(), payload.size()), nullptr)); udpard_tx_poll(&tx, 1001, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_TRUE(!frames.empty()); + TEST_ASSERT_TRUE(frames.size() > 1U); // Deliver frames in reverse order to exercise out-of-order reassembly. std::reverse(frames.begin(), frames.end()); @@ -243,6 +243,155 @@ void test_out_of_order_multiframe_reassembly() instrumented_allocator_reset(&rx_alloc_fragment); } +void test_stateless_single_frame_acceptance() +{ + seed_prng(); + + // Configure TX and RX. + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); + + udpard_tx_t tx{}; + std::vector frames; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, 0x1234123412341234ULL, 777U, 8U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 128U; + tx.mtu[1] = 128U; + tx.mtu[2] = 128U; + tx.user = &frames; + + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxState state{}; + udpard_rx_new(&rx); + rx.user = &state; + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 1U, rx_mem, &rx_vtable)); + + // Send and deliver one single-frame transfer. + const std::vector payload{ 0x10U, 0x20U, 0x30U, 0x40U }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 100U, + 10000U, + 1U, + udpard_prio_nominal, + 88U, + udpard_make_subject_endpoint(66U), + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 101U, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1U, frames.size()); + + deliver(frames.front(), rx_mem.fragment, del, &rx, &port, 200U); + udpard_rx_poll(&rx, 201U); + TEST_ASSERT_EQUAL_size_t(1U, state.count); + TEST_ASSERT_EQUAL_size_t(payload.size(), state.payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), state.payload.data(), payload.size()); + TEST_ASSERT_EQUAL_UINT64(0U, rx.errors_transfer_malformed); + + // Release all resources. + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0U, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, rx_alloc_fragment.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); +} + +void test_stateless_multiframe_first_frame_handling(const std::size_t extent, const bool expect_accept) +{ + seed_prng(); + + // Configure TX and RX. + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); + + udpard_tx_t tx{}; + std::vector frames; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, 0x5555666677778888ULL, 999U, 16U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 128U; + tx.mtu[1] = 128U; + tx.mtu[2] = 128U; + tx.user = &frames; + + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxState state{}; + udpard_rx_new(&rx); + rx.user = &state; + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, extent, rx_mem, &rx_vtable)); + + // Emit a transfer that is guaranteed to span multiple frames. + std::vector payload(600U); + for (std::size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i); + } + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 1000U, + 100000U, + 1U, + udpard_prio_nominal, + 99U, + udpard_make_subject_endpoint(67U), + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 1001U, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(frames.size() > 1U); + + // Deliver only the first frame. Stateless mode may accept it if the configured extent is already covered. + deliver(frames.front(), rx_mem.fragment, del, &rx, &port, 2000U); + udpard_rx_poll(&rx, 2001U); + if (expect_accept) { + TEST_ASSERT_EQUAL_size_t(1U, state.count); + TEST_ASSERT_EQUAL_UINT64(0U, rx.errors_transfer_malformed); + TEST_ASSERT_EQUAL_size_t(payload.size(), state.payload_size_wire); + TEST_ASSERT_GREATER_OR_EQUAL_size_t(std::min(extent, payload.size()), state.payload.size()); + TEST_ASSERT_LESS_THAN_size_t(payload.size(), state.payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), state.payload.data(), state.payload.size()); + } else { + TEST_ASSERT_EQUAL_size_t(0U, state.count); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_transfer_malformed); + } + + // Release all resources. + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0U, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, rx_alloc_fragment.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); +} + +void test_stateless_multiframe_truncation_small_extent() { test_stateless_multiframe_first_frame_handling(10U, true); } + +void test_stateless_multiframe_truncation_zero_extent() { test_stateless_multiframe_first_frame_handling(0U, true); } + +void test_stateless_multiframe_rejection_large_extent() { test_stateless_multiframe_first_frame_handling(600U, false); } + } // namespace void setUp() {} @@ -253,5 +402,9 @@ int main() UNITY_BEGIN(); RUN_TEST(test_zero_payload_transfer); RUN_TEST(test_out_of_order_multiframe_reassembly); + RUN_TEST(test_stateless_single_frame_acceptance); + RUN_TEST(test_stateless_multiframe_truncation_small_extent); + RUN_TEST(test_stateless_multiframe_truncation_zero_extent); + RUN_TEST(test_stateless_multiframe_rejection_large_extent); return UNITY_END(); } diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c index 3949aaa..408942c 100644 --- a/tests/src/test_intrusive_guards.c +++ b/tests/src/test_intrusive_guards.c @@ -41,7 +41,8 @@ static bool eject_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejecti return true; } -static const udpard_tx_vtable_t tx_vtable = { .eject = eject_stub }; +static const udpard_tx_vtable_t tx_vtable = { .eject = eject_stub }; +static const udpard_tx_vtable_t tx_vtable_null_eject = { .eject = NULL }; // RX callback stub used for constructor checks. static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) @@ -51,7 +52,9 @@ static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, (void)transfer; } -static const udpard_rx_port_vtable_t rx_vtable = { .on_message = on_message_stub }; +static const udpard_rx_port_vtable_t rx_vtable = { .on_message = on_message_stub }; +static const udpard_rx_port_vtable_t rx_vtable_null_cb = { .on_message = NULL }; +static const udpard_deleter_vtable_t del_vtable_null_free = { .free = NULL }; static void test_misc_guards(void) { @@ -82,6 +85,16 @@ static void test_tx_new_guards(void) TEST_ASSERT_FALSE(udpard_tx_new(NULL, 1U, 1U, 1U, mem_ok, &tx_vtable)); TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0U, 1U, 1U, mem_ok, &tx_vtable)); TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 1U, 1U, mem_ok, NULL)); + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 1U, 1U, mem_ok, &tx_vtable_null_eject)); + + // Reject invalid payload memory resources. + const udpard_mem_vtable_t bad_alloc = { .base = { .free = free_heap }, .alloc = NULL }; + const udpard_tx_mem_resources_t mem_bad_payload = { + .transfer = make_mem(transfer_pool), + .payload = { make_mem(payload_pool), { .vtable = &bad_alloc, .context = NULL }, make_mem(payload_pool) }, + }; + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 1U, 1U, mem_bad_payload, &tx_vtable)); + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4U, mem_ok, &tx_vtable)); udpard_tx_free(&tx); } @@ -99,15 +112,27 @@ static void test_tx_push_guards(void) TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4U, mem_ok, &tx_vtable)); // Validate argument checks for subject push. - const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); + const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); + const udpard_bytes_scattered_t empty_payload_null = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; + const udpard_bytes_scattered_t invalid_payload_ptr = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; TEST_ASSERT_FALSE( udpard_tx_push(NULL, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); TEST_ASSERT_FALSE( udpard_tx_push(&tx, 2, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, -1, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); + tx.local_uid = 0U; + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); + tx.local_uid = 1U; TEST_ASSERT_FALSE( udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); TEST_ASSERT_FALSE(udpard_tx_push( &tx, 0, 1, 1U, udpard_prio_fast, 1U, (udpard_udpip_ep_t){ .ip = 0U, .port = 0U }, empty_payload, NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), invalid_payload_ptr, NULL)); + TEST_ASSERT_TRUE( + udpard_tx_push(&tx, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload_null, NULL)); TEST_ASSERT_TRUE( udpard_tx_push(&tx, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); udpard_tx_free(&tx); @@ -126,17 +151,85 @@ static void test_tx_push_unicast_guards(void) TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 2U, 4U, mem_ok, &tx_vtable)); // Validate argument checks for unicast push. - const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); + const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); + const udpard_bytes_scattered_t empty_payload_null = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; + const udpard_bytes_scattered_t invalid_payload_ptr = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX] = { 0 }; endpoints[0] = (udpard_udpip_ep_t){ .ip = 0x0A000001U, .port = 9000U }; TEST_ASSERT_FALSE(udpard_tx_push_unicast(NULL, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); TEST_ASSERT_FALSE(udpard_tx_push_unicast(&tx, 2, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + TEST_ASSERT_FALSE(udpard_tx_push_unicast(&tx, -1, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + tx.local_uid = 0U; + TEST_ASSERT_FALSE(udpard_tx_push_unicast(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + tx.local_uid = 2U; + TEST_ASSERT_FALSE(udpard_tx_push_unicast(&tx, 0, 1, udpard_prio_nominal, endpoints, invalid_payload_ptr, NULL)); + TEST_ASSERT_TRUE(udpard_tx_push_unicast(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload_null, NULL)); TEST_ASSERT_TRUE(udpard_tx_push_unicast(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); endpoints[0] = (udpard_udpip_ep_t){ .ip = 0U, .port = 0U }; TEST_ASSERT_FALSE(udpard_tx_push_unicast(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); udpard_tx_free(&tx); } +static void test_tx_poll_and_free_guards(void) +{ + // Validate no-op guard behavior for poll/free helpers. + static byte_t transfer_pool[1024]; + static byte_t payload_pool[1024]; + const udpard_tx_mem_resources_t mem_ok = { + .transfer = make_mem(transfer_pool), + .payload = { make_mem(payload_pool), make_mem(payload_pool), make_mem(payload_pool) }, + }; + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 10U, 11U, 4U, mem_ok, &tx_vtable)); + udpard_tx_poll(NULL, 0, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&tx, -1, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(NULL)); + udpard_tx_free(&tx); + udpard_tx_free(NULL); +} + +static void test_rx_port_ctor_guards(void) +{ + // Reject invalid memory resources and callback descriptors. + static byte_t session_pool[1024]; + static byte_t fragment_pool[1024]; + const udpard_rx_mem_resources_t rx_mem_ok = { + .session = make_mem(session_pool), + .slot = make_mem(session_pool), + .fragment = make_mem(fragment_pool), + }; + const udpard_mem_vtable_t bad_alloc = { .base = { .free = free_heap }, .alloc = NULL }; + const udpard_mem_vtable_t bad_free = { .base = { .free = NULL }, .alloc = alloc_heap }; + const udpard_rx_mem_resources_t rx_mem_bad_session = { + .session = { .vtable = &bad_alloc, .context = NULL }, + .slot = make_mem(session_pool), + .fragment = make_mem(fragment_pool), + }; + const udpard_rx_mem_resources_t rx_mem_bad_slot = { + .session = make_mem(session_pool), + .slot = { .vtable = &bad_alloc, .context = NULL }, + .fragment = make_mem(fragment_pool), + }; + const udpard_rx_mem_resources_t rx_mem_bad_fragment = { + .session = make_mem(session_pool), + .slot = make_mem(session_pool), + .fragment = { .vtable = &bad_free, .context = NULL }, + }; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 1U, rx_mem_ok, &rx_vtable)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 1U, rx_mem_bad_session, &rx_vtable)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 1U, rx_mem_bad_slot, &rx_vtable)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 1U, rx_mem_bad_fragment, &rx_vtable)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 1U, rx_mem_ok, NULL)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 1U, rx_mem_ok, &rx_vtable_null_cb)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1U, rx_mem_ok, &rx_vtable)); + + // Wrapper constructors should fail consistently when base validation fails. + TEST_ASSERT_FALSE(udpard_rx_port_new_stateless(&port, 1U, rx_mem_bad_session, &rx_vtable)); + TEST_ASSERT_FALSE(udpard_rx_port_new_unicast(&port, 1U, rx_mem_bad_session, &rx_vtable)); +} + static void test_rx_port_push_guards(void) { // Prepare RX and port. @@ -163,9 +256,14 @@ static void test_rx_port_push_guards(void) (void)header_serialize(datagram, meta, 0U, crc_full(0U, NULL)); const udpard_bytes_mut_t payload = { .size = sizeof(datagram), .data = datagram }; const udpard_deleter_t del = { .vtable = &del_vtable, .context = NULL }; + const udpard_deleter_t del_bad = { .vtable = &del_vtable_null_free, .context = NULL }; TEST_ASSERT_FALSE( udpard_rx_port_push(NULL, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, NULL, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, -1, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 0U, .port = 1U }, payload, del, 0U)); TEST_ASSERT_FALSE( udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 99U)); TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, @@ -175,8 +273,15 @@ static void test_rx_port_push_guards(void) (udpard_bytes_mut_t){ .size = 1U, .data = NULL }, del, 0U)); + TEST_ASSERT_FALSE(udpard_rx_port_push( + &rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, (udpard_deleter_t){ 0 }, 0U)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del_bad, 0U)); TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); + // Free should ignore null arguments. + udpard_rx_port_free(NULL, &port); + udpard_rx_port_free(&rx, NULL); udpard_rx_port_free(&rx, &port); } @@ -190,6 +295,8 @@ int main(void) RUN_TEST(test_tx_new_guards); RUN_TEST(test_tx_push_guards); RUN_TEST(test_tx_push_unicast_guards); + RUN_TEST(test_tx_poll_and_free_guards); + RUN_TEST(test_rx_port_ctor_guards); RUN_TEST(test_rx_port_push_guards); return UNITY_END(); } diff --git a/tests/src/test_intrusive_misc.c b/tests/src/test_intrusive_misc.c index e67c1f0..33433ab 100644 --- a/tests/src/test_intrusive_misc.c +++ b/tests/src/test_intrusive_misc.c @@ -4,7 +4,33 @@ /// SPDX-License-Identifier: MIT #include // NOLINT(bugprone-suspicious-include) +#include "helpers.h" #include +#include + +// Allocates one standalone fragment for intrusive fragment-tree checks. +static udpard_fragment_t* make_fragment(const udpard_mem_t fragment_memory, + const udpard_mem_t payload_memory, + const udpard_deleter_t payload_deleter, + const size_t offset, + const void* const data, + const size_t size) +{ + udpard_fragment_t* const out = mem_res_alloc(fragment_memory, sizeof(udpard_fragment_t)); + TEST_ASSERT_NOT_NULL(out); + void* payload = NULL; + if (size > 0U) { + payload = mem_res_alloc(payload_memory, size); + TEST_ASSERT_NOT_NULL(payload); + (void)memcpy(payload, data, size); + } + mem_zero(sizeof(*out), out); + out->offset = offset; + out->view = (udpard_bytes_t){ .size = size, .data = payload }; + out->origin = (udpard_bytes_mut_t){ .size = size, .data = payload }; + out->payload_deleter = payload_deleter; + return out; +} static void test_crc_streamed(void) { @@ -151,6 +177,95 @@ static void test_list(void) TEST_ASSERT_NULL(list.tail); } +static void test_misc_helpers(void) +{ + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + // Check trivial helper branches directly. + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX] = { 0 }; + endpoints[1] = (udpard_udpip_ep_t){ .ip = 0x0A000001U, .port = 9999U }; + TEST_ASSERT_TRUE(mem_same(mem_frag, mem_frag)); + TEST_ASSERT_FALSE(mem_same(mem_frag, mem_payload)); + // Use same context with different vtables to force the second mem_same() predicate. + const udpard_mem_vtable_t alt_vtable = { .base = { .free = instrumented_allocator_free }, + .alloc = instrumented_allocator_alloc }; + const udpard_mem_t alt_vtable_same_context = { .vtable = &alt_vtable, .context = mem_frag.context }; + TEST_ASSERT_FALSE(mem_same(mem_frag, alt_vtable_same_context)); + TEST_ASSERT_EQUAL_UINT16(0U, valid_ep_bitmap(NULL)); + TEST_ASSERT_EQUAL_UINT16((uint16_t)(1U << 1U), valid_ep_bitmap(endpoints)); + mem_free_payload(del_payload, (udpard_bytes_mut_t){ 0 }); + + // Exercise memory-resource validation failures. + const udpard_mem_vtable_t missing_alloc = { .base = { .free = instrumented_allocator_free }, .alloc = NULL }; + const udpard_mem_vtable_t missing_free = { .base = { .free = NULL }, .alloc = instrumented_allocator_alloc }; + TEST_ASSERT_FALSE(mem_validate((udpard_mem_t){ 0 })); + TEST_ASSERT_FALSE(mem_validate((udpard_mem_t){ .vtable = &missing_alloc, .context = &alloc_payload })); + TEST_ASSERT_FALSE(mem_validate((udpard_mem_t){ .vtable = &missing_free, .context = &alloc_payload })); + TEST_ASSERT_TRUE(mem_validate(mem_payload)); + + // Read across an empty fragment to cover the scattered-reader fast path. + const udpard_bytes_scattered_t tail = { .bytes = { .size = 2U, .data = "CD" }, .next = NULL }; + const udpard_bytes_scattered_t mid = { .bytes = { .size = 0U, .data = "" }, .next = &tail }; + const udpard_bytes_scattered_t head = { .bytes = { .size = 2U, .data = "AB" }, .next = &mid }; + bytes_scattered_reader_t rdr = { .cursor = &head, .position = 0U }; + char out[4] = { 0 }; + TEST_ASSERT_EQUAL_size_t(4U, bytes_scattered_size(head)); + bytes_scattered_read(&rdr, sizeof(out), out); + TEST_ASSERT_EQUAL_MEMORY("ABCD", out, sizeof(out)); + + // Compare fragment ends explicitly. + udpard_fragment_t probe = { 0 }; + probe.offset = 5U; + probe.view.size = 3U; + size_t key = 7U; + TEST_ASSERT_EQUAL_INT32(-1, cavl_compare_fragment_end(&key, &probe.index_offset)); + key = 8U; + TEST_ASSERT_EQUAL_INT32(0, cavl_compare_fragment_end(&key, &probe.index_offset)); + key = 9U; + TEST_ASSERT_EQUAL_INT32(+1, cavl_compare_fragment_end(&key, &probe.index_offset)); + + // Exercise fragment helpers on null inputs. + char sink = 0; + const udpard_fragment_t* null_cursor = NULL; + TEST_ASSERT_NULL(udpard_fragment_seek(NULL, 0U)); + TEST_ASSERT_NULL(udpard_fragment_next(NULL)); + TEST_ASSERT_EQUAL_size_t(0U, udpard_fragment_gather(NULL, 0U, 1U, &sink)); + TEST_ASSERT_EQUAL_size_t(0U, udpard_fragment_gather(&null_cursor, 0U, 1U, &sink)); + + // Drive each disjunct in is_listed(). + udpard_list_t list = { .head = NULL, .tail = NULL }; + udpard_listed_t member = { .next = NULL, .prev = NULL }; + TEST_ASSERT_FALSE(is_listed(&list, &member)); + member.next = &member; + TEST_ASSERT_TRUE(is_listed(&list, &member)); + member.next = NULL; + member.prev = &member; + TEST_ASSERT_TRUE(is_listed(&list, &member)); + member.prev = NULL; + list.head = &member; + TEST_ASSERT_TRUE(is_listed(&list, &member)); + + // Free a small tree starting from a child to cover descent and ascent. + udpard_fragment_t* const root = make_fragment(mem_frag, mem_payload, del_payload, 2U, "BB", 2U); + udpard_fragment_t* const left = make_fragment(mem_frag, mem_payload, del_payload, 0U, "AA", 2U); + udpard_fragment_t* const rght = make_fragment(mem_frag, mem_payload, del_payload, 4U, "CC", 2U); + root->index_offset.lr[0] = &left->index_offset; + root->index_offset.lr[1] = &rght->index_offset; + left->index_offset.up = &root->index_offset; + rght->index_offset.up = &root->index_offset; + udpard_fragment_free_all(left, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0U, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); +} + void setUp(void) {} void tearDown(void) {} @@ -160,5 +275,6 @@ int main(void) UNITY_BEGIN(); RUN_TEST(test_crc_streamed); RUN_TEST(test_list); + RUN_TEST(test_misc_helpers); return UNITY_END(); } diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 2c41b67..66dc0a6 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -62,6 +62,113 @@ static udpard_bytes_mut_t make_datagram(const udpard_mem_t mem, return (udpard_bytes_mut_t){ .size = total_size, .data = data }; } +typedef struct +{ + instrumented_allocator_t alloc_rx_frag; + instrumented_allocator_t alloc_rx_ses; + instrumented_allocator_t alloc_dgram; + udpard_rx_mem_resources_t rx_mem; + udpard_mem_t dgram_mem; + udpard_deleter_t dgram_del; +} rx_mem_fixture_t; + +// Initializes the common RX-side allocators used by intrusive tests. +static void rx_mem_fixture_init(rx_mem_fixture_t* const self) +{ + instrumented_allocator_new(&self->alloc_rx_frag); + instrumented_allocator_new(&self->alloc_rx_ses); + instrumented_allocator_new(&self->alloc_dgram); + self->rx_mem = (udpard_rx_mem_resources_t){ + .session = instrumented_allocator_make_resource(&self->alloc_rx_ses), + .slot = instrumented_allocator_make_resource(&self->alloc_rx_ses), + .fragment = instrumented_allocator_make_resource(&self->alloc_rx_frag), + }; + self->dgram_mem = instrumented_allocator_make_resource(&self->alloc_dgram); + self->dgram_del = instrumented_allocator_make_deleter(&self->alloc_dgram); +} + +// Verifies that all intrusive RX allocators are fully released. +static void rx_mem_fixture_fini(rx_mem_fixture_t* const self) +{ + TEST_ASSERT_EQUAL_size_t(0U, self->alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, self->alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, self->alloc_dgram.allocated_fragments); + instrumented_allocator_reset(&self->alloc_rx_frag); + instrumented_allocator_reset(&self->alloc_rx_ses); + instrumented_allocator_reset(&self->alloc_dgram); +} + +// Builds a valid first-frame datagram with a custom total transfer size. +static udpard_bytes_mut_t make_first_frame_datagram(const udpard_mem_t mem, + const udpard_prio_t prio, + const uint64_t transfer_id, + const uint64_t sender_uid, + const size_t transfer_payload_size, + const void* const payload, + const size_t payload_size) +{ + const size_t total_size = HEADER_SIZE_BYTES + payload_size; + byte_t* const data = mem_res_alloc(mem, total_size); + TEST_ASSERT_NOT_NULL(data); + if (payload_size > 0U) { + (void)memcpy(&data[HEADER_SIZE_BYTES], payload, payload_size); + } + const meta_t meta = { + .priority = prio, + .transfer_payload_size = (uint32_t)transfer_payload_size, + .transfer_id = transfer_id, + .sender_uid = sender_uid, + }; + const uint32_t prefix_crc = crc_full(payload_size, &data[HEADER_SIZE_BYTES]); + (void)header_serialize(data, meta, 0U, prefix_crc); + return (udpard_bytes_mut_t){ .size = total_size, .data = data }; +} + +// Builds an intrusive RX frame with allocator-backed payload ownership. +static rx_frame_t make_frame(const udpard_mem_t mem, + const udpard_prio_t prio, + const uint64_t transfer_id, + const uint64_t sender_uid, + const size_t offset, + const size_t transfer_payload_size, + const void* const payload, + const size_t payload_size, + const uint32_t crc) +{ + void* data = NULL; + if (payload_size > 0U) { + data = mem_res_alloc(mem, payload_size); + TEST_ASSERT_NOT_NULL(data); + (void)memcpy(data, payload, payload_size); + } + return (rx_frame_t){ + .base = + { + .offset = offset, + .payload = { .size = payload_size, .data = data }, + .origin = { .size = payload_size, .data = data }, + .crc = crc, + }, + .meta = + { + .priority = prio, + .transfer_payload_size = (uint32_t)transfer_payload_size, + .transfer_id = transfer_id, + .sender_uid = sender_uid, + }, + }; +} + +// Counts fragments in offset order for tree-shape checks. +static size_t fragment_count(udpard_tree_t* const root) +{ + size_t out = 0U; + for (udpard_tree_t* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) { + out++; + } + return out; +} + static void test_rx_single_frame(void) { // Prepare RX and allocators. @@ -265,6 +372,439 @@ static void test_rx_unicast_remote_endpoint_tracking(void) instrumented_allocator_reset(&alloc_dgram); } +static void test_rx_stateful_session_oom(void) +{ + // Fail the session allocation and ensure the payload is released. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + fx.alloc_rx_ses.limit_fragments = 0U; + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, fx.rx_mem, &callbacks)); + + static const byte_t payload[] = { 0x42U }; + const udpard_bytes_mut_t dgram = + make_datagram(fx.dgram_mem, udpard_prio_nominal, 700U, 0x0102030405060708ULL, 0U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 5000, (udpard_udpip_ep_t){ .ip = 0x0A000004U, .port = 7400U }, dgram, fx.dgram_del, 0U)); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_oom); + TEST_ASSERT_EQUAL_size_t(0U, cap.count); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + rx_mem_fixture_fini(&fx); +} + +static void test_rx_idle_session_retirement(void) +{ + // Retire an idle session through udpard_rx_poll(). + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, fx.rx_mem, &callbacks)); + + static const byte_t payload[] = { 1U, 2U, 3U }; + const udpard_bytes_mut_t dgram = + make_datagram(fx.dgram_mem, udpard_prio_nominal, 701U, 0x1111222233334444ULL, 0U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 6000, (udpard_udpip_ep_t){ .ip = 0x0A000005U, .port = 7500U }, dgram, fx.dgram_del, 0U)); + udpard_rx_poll(&rx, 6001); + TEST_ASSERT_EQUAL_size_t(1U, cap.count); + TEST_ASSERT_NOT_NULL(port.index_session_by_remote_uid); + TEST_ASSERT_EQUAL_size_t(1U, fx.alloc_rx_ses.allocated_fragments); + + udpard_rx_poll(&rx, 6000 + SESSION_LIFETIME); + TEST_ASSERT_NULL(port.index_session_by_remote_uid); + TEST_ASSERT_NULL(rx.list_session_by_animation.head); + TEST_ASSERT_NULL(rx.list_session_by_animation.tail); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_rx_ses.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + rx_mem_fixture_fini(&fx); +} + +static void test_rx_stateless_first_frame_extent_handling(void) +{ + // Accept a first-frame prefix if it covers the extent; otherwise reject it. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + static const struct + { + size_t extent; + bool accept; + } cases[] = { + { 10U, true }, + { 0U, true }, + { 48U, false }, + }; + static const byte_t payload[] = { + 0x00U, 0x01U, 0x02U, 0x03U, 0x04U, 0x05U, 0x06U, 0x07U, 0x08U, 0x09U, 0x0AU, 0x0BU, 0x0CU, 0x0DU, 0x0EU, 0x0FU, + 0x10U, 0x11U, 0x12U, 0x13U, 0x14U, 0x15U, 0x16U, 0x17U, 0x18U, 0x19U, 0x1AU, 0x1BU, 0x1CU, 0x1DU, 0x1EU, 0x1FU, + }; + + for (size_t i = 0U; i < (sizeof(cases) / sizeof(cases[0])); i++) { + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, cases[i].extent, fx.rx_mem, &callbacks)); + + const udpard_bytes_mut_t dgram = make_first_frame_datagram( + fx.dgram_mem, udpard_prio_nominal, 702U + i, 0xABCDEF1234567890ULL, 64U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, + &port, + 7000 + (udpard_us_t)i, + (udpard_udpip_ep_t){ .ip = 0x0A000006U, .port = 7600U }, + dgram, + fx.dgram_del, + 0U)); + if (cases[i].accept) { + TEST_ASSERT_EQUAL_size_t(1U, cap.count); + TEST_ASSERT_EQUAL_UINT64(0U, rx.errors_transfer_malformed); + TEST_ASSERT_EQUAL_size_t(sizeof(payload), cap.payload_size); + TEST_ASSERT_EQUAL_MEMORY(payload, cap.payload, sizeof(payload)); + } else { + TEST_ASSERT_EQUAL_size_t(0U, cap.count); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_transfer_malformed); + } + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + udpard_rx_port_free(&rx, &port); + } + rx_mem_fixture_fini(&fx); +} + +static void test_rx_stateless_nonzero_offset_rejected(void) +{ + // Stateless mode requires the accepted frame to start at offset zero. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 10U, fx.rx_mem, &callbacks)); + + static const byte_t payload[] = { 0xAAU, 0xBBU, 0xCCU, 0xDDU }; + const udpard_bytes_mut_t dgram = + make_datagram(fx.dgram_mem, udpard_prio_nominal, 704U, 0x2222333344445555ULL, 8U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 8100, (udpard_udpip_ep_t){ .ip = 0x0A000008U, .port = 7710U }, dgram, fx.dgram_del, 0U)); + TEST_ASSERT_EQUAL_size_t(0U, cap.count); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_transfer_malformed); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + rx_mem_fixture_fini(&fx); +} + +static void test_rx_stateless_fragment_oom(void) +{ + // Fail the stateless fragment allocation and report OOM. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + fx.alloc_rx_frag.limit_fragments = 0U; + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 1024U, fx.rx_mem, &callbacks)); + + static const byte_t payload[] = { 0x21U, 0x22U, 0x23U }; + const udpard_bytes_mut_t dgram = + make_datagram(fx.dgram_mem, udpard_prio_high, 703U, 0x0101010101010101ULL, 0U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 8000, (udpard_udpip_ep_t){ .ip = 0x0A000007U, .port = 7700U }, dgram, fx.dgram_del, 0U)); + TEST_ASSERT_EQUAL_size_t(0U, cap.count); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_oom); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + rx_mem_fixture_fini(&fx); +} + +static void test_rx_session_get_slot_paths(void) +{ + // Reuse, timeout, and sacrifice slots explicitly. + rx_mem_fixture_t fx = { 0 }; + udpard_rx_port_t port = { 0 }; + rx_mem_fixture_init(&fx); + port.memory = fx.rx_mem; + + rx_session_t reuse = { 0 }; + reuse.port = &port; + reuse.slots[2] = rx_slot_new(port.memory.slot); + TEST_ASSERT_NOT_NULL(reuse.slots[2]); + reuse.slots[2]->transfer_id = 11U; + TEST_ASSERT_EQUAL_PTR(&reuse.slots[2], rx_session_get_slot(&reuse, 1U, 11U)); + rx_slot_destroy(&reuse.slots[2], port.memory.fragment, port.memory.slot); + + rx_session_t timed = { 0 }; + timed.port = &port; + timed.slots[0] = rx_slot_new(port.memory.slot); + TEST_ASSERT_NOT_NULL(timed.slots[0]); + timed.slots[0]->transfer_id = 22U; + timed.slots[0]->ts_max = 5U; + TEST_ASSERT_EQUAL_PTR(&timed.slots[0], rx_session_get_slot(&timed, 5U + SESSION_LIFETIME, 23U)); + TEST_ASSERT_NOT_NULL(timed.slots[0]); + TEST_ASSERT_EQUAL(HEAT_DEATH, timed.slots[0]->ts_min); + rx_slot_destroy(&timed.slots[0], port.memory.fragment, port.memory.slot); + + rx_session_t full = { 0 }; + full.port = &port; + for (size_t i = 0U; i < RX_SLOT_COUNT; i++) { + full.slots[i] = rx_slot_new(port.memory.slot); + TEST_ASSERT_NOT_NULL(full.slots[i]); + full.slots[i]->transfer_id = (uint64_t)i; + full.slots[i]->ts_max = 100LL + (udpard_us_t)i; + } + full.slots[2]->ts_max = 1U; + TEST_ASSERT_EQUAL_PTR(&full.slots[2], rx_session_get_slot(&full, 10U, 99U)); + TEST_ASSERT_NOT_NULL(full.slots[2]); + TEST_ASSERT_EQUAL(HEAT_DEATH, full.slots[2]->ts_min); + for (size_t i = 0U; i < RX_SLOT_COUNT; i++) { + rx_slot_destroy(&full.slots[i], port.memory.fragment, port.memory.slot); + } + + rx_mem_fixture_fini(&fx); +} + +static void test_rx_slot_update_paths(void) +{ + // Cover mismatch, OOM, and finalize-failure slot updates. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + + // Force slot allocation failure. + fx.alloc_rx_ses.limit_fragments = 0U; + TEST_ASSERT_NULL(rx_slot_new(fx.rx_mem.slot)); + fx.alloc_rx_ses.limit_fragments = SIZE_MAX; + + rx_slot_t* slot = rx_slot_new(fx.rx_mem.slot); + TEST_ASSERT_NOT_NULL(slot); + static const byte_t mismatch_payload[] = { 0x01U }; + rx_frame_t mismatch = + make_frame(fx.dgram_mem, udpard_prio_low, 800U, 1U, 0U, 5U, mismatch_payload, sizeof(mismatch_payload), 0U); + slot->ts_min = 0U; + slot->ts_max = 0U; + slot->total_size = 4U; + slot->priority = udpard_prio_high; + uint64_t errors_oom = 0U; + TEST_ASSERT_EQUAL(rx_slot_failure, + rx_slot_update(slot, 0U, fx.rx_mem.fragment, fx.dgram_del, &mismatch, 16U, &errors_oom)); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + rx_slot_destroy(&slot, fx.rx_mem.fragment, fx.rx_mem.slot); + + // Trigger partial-initialization branch and priority-only mismatch. + slot = rx_slot_new(fx.rx_mem.slot); + TEST_ASSERT_NOT_NULL(slot); + static const byte_t prio_payload[] = { 0x7EU }; + rx_frame_t prio_mismatch = + make_frame(fx.dgram_mem, udpard_prio_nominal, 803U, 4U, 0U, 1U, prio_payload, sizeof(prio_payload), 0U); + slot->ts_min = HEAT_DEATH; + slot->ts_max = 0U; + slot->total_size = prio_mismatch.meta.transfer_payload_size; + slot->priority = udpard_prio_high; + errors_oom = 0U; + TEST_ASSERT_EQUAL(rx_slot_failure, + rx_slot_update(slot, 0U, fx.rx_mem.fragment, fx.dgram_del, &prio_mismatch, 16U, &errors_oom)); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + rx_slot_destroy(&slot, fx.rx_mem.fragment, fx.rx_mem.slot); + + slot = rx_slot_new(fx.rx_mem.slot); + TEST_ASSERT_NOT_NULL(slot); + fx.alloc_rx_frag.limit_fragments = 0U; + static const byte_t oom_payload[] = { 0x02U }; + rx_frame_t oom = + make_frame(fx.dgram_mem, udpard_prio_nominal, 801U, 2U, 0U, 1U, oom_payload, sizeof(oom_payload), 0U); + errors_oom = 0U; + TEST_ASSERT_EQUAL(rx_slot_incomplete, + rx_slot_update(slot, 1U, fx.rx_mem.fragment, fx.dgram_del, &oom, 16U, &errors_oom)); + TEST_ASSERT_EQUAL_UINT64(1U, errors_oom); + TEST_ASSERT_NULL(slot->fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + fx.alloc_rx_frag.limit_fragments = SIZE_MAX; + rx_slot_destroy(&slot, fx.rx_mem.fragment, fx.rx_mem.slot); + + slot = rx_slot_new(fx.rx_mem.slot); + TEST_ASSERT_NOT_NULL(slot); + static const byte_t crc_payload[] = { 0x03U, 0x04U, 0x05U }; + rx_frame_t crc_bad = make_frame( + fx.dgram_mem, udpard_prio_nominal, 802U, 3U, 0U, sizeof(crc_payload), crc_payload, sizeof(crc_payload), 0U); + errors_oom = 0U; + TEST_ASSERT_EQUAL(rx_slot_failure, + rx_slot_update(slot, 2U, fx.rx_mem.fragment, fx.dgram_del, &crc_bad, 16U, &errors_oom)); + rx_slot_destroy(&slot, fx.rx_mem.fragment, fx.rx_mem.slot); + + rx_mem_fixture_fini(&fx); +} + +static void test_rx_session_update_failure_paths(void) +{ + // Cover compare, slot-OOM, and slot-update failure branches. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + udpard_rx_t rx = { 0 }; + udpard_rx_new(&rx); + udpard_rx_port_t port = { 0 }; + port.memory = fx.rx_mem; + port.extent = 64U; + port.vtable = &callbacks; + + // Directly cover uid_a < uid_b comparator branch. + const uint64_t key = 1U; + rx_session_t cmp = { 0 }; + cmp.remote.uid = 2U; + TEST_ASSERT_EQUAL_INT32(-1, cavl_compare_rx_session_by_remote_uid(&key, &cmp.index_remote_uid)); + + // Force slot allocation failure in rx_session_update(). + rx_session_t ses_oom = { 0 }; + ses_oom.port = &port; + ses_oom.remote.uid = 10U; + fx.alloc_rx_ses.limit_fragments = 0U; + static const byte_t p0[] = { 0x44U }; + rx_frame_t f0 = + make_frame(fx.dgram_mem, udpard_prio_nominal, 1000U, ses_oom.remote.uid, 0U, 1U, p0, sizeof(p0), 0U); + rx_session_update( + &ses_oom, &rx, 100U, (udpard_udpip_ep_t){ .ip = 0x0A000009U, .port = 7720U }, &f0, fx.dgram_del, 0U); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_oom); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + fx.alloc_rx_ses.limit_fragments = SIZE_MAX; + + // Force rx_slot_update() failure path in rx_session_update(). + rx_session_t ses_fail = { 0 }; + ses_fail.port = &port; + ses_fail.remote.uid = 11U; + ses_fail.slots[0] = rx_slot_new(port.memory.slot); + TEST_ASSERT_NOT_NULL(ses_fail.slots[0]); + ses_fail.slots[0]->transfer_id = 2000U; + ses_fail.slots[0]->ts_min = 0U; + ses_fail.slots[0]->ts_max = 0U; + ses_fail.slots[0]->total_size = 2U; + ses_fail.slots[0]->priority = udpard_prio_nominal; + static const byte_t p1[] = { 0x55U }; + rx_frame_t f1 = + make_frame(fx.dgram_mem, udpard_prio_nominal, 2000U, ses_fail.remote.uid, 0U, 1U, p1, sizeof(p1), 0U); + rx_session_update( + &ses_fail, &rx, 101U, (udpard_udpip_ep_t){ .ip = 0x0A00000AU, .port = 7730U }, &f1, fx.dgram_del, 0U); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_transfer_malformed); + TEST_ASSERT_NULL(ses_fail.slots[0]); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + rx_mem_fixture_fini(&fx); +} + +static void test_rx_port_free_with_incomplete_transfer(void) +{ + // Ensure rx_port_free() destroys sessions containing unfinished slots. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, fx.rx_mem, &callbacks)); + + static const byte_t payload[] = { + 0x10U, 0x11U, 0x12U, 0x13U, 0x14U, 0x15U, 0x16U, 0x17U, 0x18U, 0x19U, 0x1AU, 0x1BU, 0x1CU, 0x1DU, 0x1EU, 0x1FU, + 0x20U, 0x21U, 0x22U, 0x23U, 0x24U, 0x25U, 0x26U, 0x27U, 0x28U, 0x29U, 0x2AU, 0x2BU, 0x2CU, 0x2DU, 0x2EU, 0x2FU, + }; + const udpard_bytes_mut_t dgram = make_first_frame_datagram( + fx.dgram_mem, udpard_prio_nominal, 3000U, 0xDEADBEEF00112233ULL, 64U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 9000, (udpard_udpip_ep_t){ .ip = 0x0A00000BU, .port = 7740U }, dgram, fx.dgram_del, 0U)); + TEST_ASSERT_EQUAL_size_t(0U, cap.count); + TEST_ASSERT_NOT_NULL(port.index_session_by_remote_uid); + + udpard_rx_port_free(&rx, &port); + TEST_ASSERT_NULL(port.index_session_by_remote_uid); + TEST_ASSERT_NULL(rx.list_session_by_animation.head); + TEST_ASSERT_NULL(rx.list_session_by_animation.tail); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + rx_mem_fixture_fini(&fx); +} + +static void test_rx_fragment_tree_update_paths(void) +{ + // Cover extent rejection, heuristic rejection, victim eviction, and prefix updates. + rx_mem_fixture_t fx = { 0 }; + rx_mem_fixture_init(&fx); + + udpard_tree_t* root = NULL; + size_t prefix = 0U; + static const byte_t one[] = { 0xAAU }; + rx_frame_t beyond = make_frame(fx.dgram_mem, udpard_prio_nominal, 900U, 1U, 4U, 5U, one, sizeof(one), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, beyond.base, 5U, 4U, &prefix)); + TEST_ASSERT_NULL(root); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + rx_frame_t zero_extent = make_frame(fx.dgram_mem, udpard_prio_nominal, 901U, 1U, 1U, 2U, one, sizeof(one), 0U); + TEST_ASSERT_EQUAL( + rx_fragment_tree_rejected, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, zero_extent.base, 2U, 0U, &prefix)); + TEST_ASSERT_NULL(root); + TEST_ASSERT_EQUAL_size_t(0U, fx.alloc_dgram.allocated_fragments); + + static const byte_t aaaa[] = { 'A', 'A', 'A', 'A' }; + static const byte_t bbbb[] = { 'B', 'B' }; + static const byte_t cccc[] = { 'C', 'C', 'C', 'C' }; + static const byte_t dddd[] = { 'D', 'D', 'D', 'D' }; + rx_frame_t f0 = make_frame(fx.dgram_mem, udpard_prio_nominal, 902U, 1U, 0U, 8U, aaaa, sizeof(aaaa), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, f0.base, 8U, 8U, &prefix)); + rx_frame_t contained = make_frame(fx.dgram_mem, udpard_prio_nominal, 903U, 1U, 1U, 8U, bbbb, sizeof(bbbb), 0U); + TEST_ASSERT_EQUAL( + rx_fragment_tree_rejected, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, contained.base, 8U, 8U, &prefix)); + rx_frame_t f2 = make_frame(fx.dgram_mem, udpard_prio_nominal, 904U, 1U, 2U, 8U, cccc, sizeof(cccc), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, f2.base, 8U, 8U, &prefix)); + TEST_ASSERT_EQUAL_size_t(6U, prefix); + TEST_ASSERT_EQUAL_size_t(6U, rx_fragment_tree_update_covered_prefix(root, prefix, 7U, 1U)); + rx_frame_t reject = make_frame(fx.dgram_mem, udpard_prio_nominal, 905U, 1U, 1U, 8U, dddd, sizeof(dddd), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, reject.base, 8U, 8U, &prefix)); + TEST_ASSERT_EQUAL_size_t(2U, fragment_count(root)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(fx.rx_mem.fragment)); + root = NULL; + + prefix = 0U; + static const byte_t bridge[] = { 'X', 'X', 'X', 'X', 'X', 'X' }; + rx_frame_t left = make_frame(fx.dgram_mem, udpard_prio_nominal, 906U, 1U, 0U, 12U, aaaa, sizeof(aaaa), 0U); + rx_frame_t victim = make_frame(fx.dgram_mem, udpard_prio_nominal, 907U, 1U, 4U, 12U, bbbb, sizeof(bbbb), 0U); + rx_frame_t right = make_frame(fx.dgram_mem, udpard_prio_nominal, 908U, 1U, 6U, 12U, cccc, sizeof(cccc), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, left.base, 12U, 12U, &prefix)); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, victim.base, 12U, 12U, &prefix)); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, right.base, 12U, 12U, &prefix)); + TEST_ASSERT_EQUAL_size_t(3U, fragment_count(root)); + rx_frame_t join = make_frame(fx.dgram_mem, udpard_prio_nominal, 909U, 1U, 2U, 12U, bridge, sizeof(bridge), 0U); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, + rx_fragment_tree_update(&root, fx.rx_mem.fragment, fx.dgram_del, join.base, 12U, 12U, &prefix)); + TEST_ASSERT_EQUAL_size_t(3U, fragment_count(root)); + udpard_fragment_t* const mid = udpard_fragment_seek((udpard_fragment_t*)root, 5U); + TEST_ASSERT_NOT_NULL(mid); + TEST_ASSERT_EQUAL_size_t(2U, mid->offset); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(fx.rx_mem.fragment)); + + rx_mem_fixture_fini(&fx); +} + void setUp(void) {} void tearDown(void) {} @@ -275,5 +815,15 @@ int main(void) RUN_TEST(test_rx_duplicate_rejected_and_freed); RUN_TEST(test_rx_malformed_frame); RUN_TEST(test_rx_unicast_remote_endpoint_tracking); + RUN_TEST(test_rx_stateful_session_oom); + RUN_TEST(test_rx_idle_session_retirement); + RUN_TEST(test_rx_stateless_first_frame_extent_handling); + RUN_TEST(test_rx_stateless_nonzero_offset_rejected); + RUN_TEST(test_rx_stateless_fragment_oom); + RUN_TEST(test_rx_session_get_slot_paths); + RUN_TEST(test_rx_slot_update_paths); + RUN_TEST(test_rx_session_update_failure_paths); + RUN_TEST(test_rx_port_free_with_incomplete_transfer); + RUN_TEST(test_rx_fragment_tree_update_paths); return UNITY_END(); } diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index c02715b..f15e2aa 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -9,8 +9,10 @@ typedef struct { - bool allow; - size_t count; + bool allow; + bool retain_first; + size_t count; + udpard_bytes_t held; struct { uint64_t transfer_id; @@ -36,6 +38,10 @@ static bool eject_capture(udpard_tx_t* const tx, udpard_tx_ejection_t* const eje if (!st->allow) { return false; } + if (st->retain_first && (st->count == 0U)) { + st->held = ejection->datagram; + udpard_tx_refcount_inc(ejection->datagram); + } if (st->count < (sizeof(st->items) / sizeof(st->items[0]))) { meta_t meta = { 0 }; uint32_t offset = 0; @@ -66,7 +72,7 @@ static void fixture_init(tx_fixture_t* const self, const size_t queue_limit, con for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { self->mem.payload[i] = instrumented_allocator_make_resource(&self->payload_alloc); } - self->eject = (eject_state_t){ .allow = allow_eject, .count = 0U }; + self->eject = (eject_state_t){ .allow = allow_eject, .retain_first = false, .count = 0U, .held = { 0 } }; TEST_ASSERT_TRUE(udpard_tx_new(&self->tx, 0x1122334455667788ULL, 123U, queue_limit, self->mem, &tx_vtable)); for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { self->tx.mtu[i] = mtu; @@ -184,6 +190,236 @@ static void test_tx_transfer_id_masking(void) fixture_fini(&fx); } +static void test_tx_capacity_failure(void) +{ + // Reject a transfer that cannot ever fit into the queue. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 1U, 128U, true); + byte_t data[600] = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_push(&fx.tx, + 0, + 10000, + 1U, + udpard_prio_nominal, + 1U, + udpard_make_subject_endpoint(444U), + make_scattered(data, 600U), + NULL)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_capacity); + TEST_ASSERT_EQUAL_size_t(0U, fx.transfer_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.payload_alloc.allocated_fragments); + fixture_fini(&fx); +} + +static void test_tx_spool_oom_rollback(void) +{ + // Abort a partially built spool cleanly on payload-frame OOM. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 4U, 128U, true); + fx.payload_alloc.limit_fragments = 1U; + byte_t data[600] = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_push(&fx.tx, + 0, + 10000, + 1U, + udpard_prio_nominal, + 2U, + udpard_make_subject_endpoint(555U), + make_scattered(data, 600U), + NULL)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_oom); + TEST_ASSERT_EQUAL_size_t(0U, fx.tx.enqueued_frames_count); + TEST_ASSERT_EQUAL_size_t(0U, fx.transfer_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.payload_alloc.allocated_fragments); + fixture_fini(&fx); +} + +static void test_tx_refcount_retention(void) +{ + // Retain one frame, then verify capacity handling while no transfer remains to sacrifice. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 2U, 128U, true); + fx.eject.retain_first = true; + const byte_t data[] = { 0xAB }; + TEST_ASSERT_TRUE(udpard_tx_push(&fx.tx, + 0, + 10000, + 1U, + udpard_prio_nominal, + 3U, + udpard_make_subject_endpoint(666U), + make_scattered(data, 1U), + NULL)); + udpard_tx_poll(&fx.tx, 1, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1U, fx.eject.count); + TEST_ASSERT_EQUAL_size_t(1U, fx.payload_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(&fx.tx)); + + // With only a retained frame left, queue-space reclamation cannot sacrifice anything. + byte_t large[600] = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_push(&fx.tx, + 2, + 10000, + 1U, + udpard_prio_nominal, + 4U, + udpard_make_subject_endpoint(667U), + make_scattered(large, 600U), + NULL)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_capacity); + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(NULL)); + udpard_tx_refcount_inc((udpard_bytes_t){ 0 }); + udpard_tx_refcount_dec((udpard_bytes_t){ 0 }); + + udpard_tx_free(&fx.tx); + TEST_ASSERT_EQUAL_size_t(0U, fx.transfer_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1U, fx.payload_alloc.allocated_fragments); + udpard_tx_refcount_dec(fx.eject.held); + TEST_ASSERT_EQUAL_size_t(0U, fx.payload_alloc.allocated_fragments); + instrumented_allocator_reset(&fx.transfer_alloc); + instrumented_allocator_reset(&fx.payload_alloc); +} + +static void test_tx_validate_and_compare_deadlines(void) +{ + // Exercise constructor validation and deadline comparison branches directly. + instrumented_allocator_t payload_alloc = { 0 }; + instrumented_allocator_new(&payload_alloc); + const udpard_mem_t valid = instrumented_allocator_make_resource(&payload_alloc); + const udpard_tx_mem_resources_t memory = { + .transfer = { 0 }, + .payload = { valid, valid, valid }, + }; + udpard_tx_t tx = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 1U, 1U, memory, &tx_vtable)); + + tx_transfer_t early = { 0 }; + tx_transfer_t late = { 0 }; + early.deadline = 1; + late.deadline = 2; + TEST_ASSERT_EQUAL_INT32(-1, tx_cavl_compare_deadline(&early, &late.index_deadline)); + TEST_ASSERT_EQUAL_INT32(+1, tx_cavl_compare_deadline(&late, &early.index_deadline)); + + tx_transfer_t a = { 0 }; + tx_transfer_t b = { 0 }; + a.deadline = 3; + b.deadline = 3; + const int32_t ab = tx_cavl_compare_deadline(&a, &b.index_deadline); + const int32_t ba = tx_cavl_compare_deadline(&b, &a.index_deadline); + TEST_ASSERT_TRUE((ab == -1) || (ab == +1)); + TEST_ASSERT_EQUAL_INT32(-ab, ba); + instrumented_allocator_reset(&payload_alloc); +} + +static void test_tx_transfer_alloc_oom(void) +{ + // Fail transfer-object allocation before any payload spool is attempted. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 4U, 128U, true); + fx.transfer_alloc.limit_fragments = 0U; + const byte_t data[] = { 0x5AU }; + TEST_ASSERT_FALSE(udpard_tx_push(&fx.tx, + 0, + 10000, + 1U, + udpard_prio_nominal, + 10U, + udpard_make_subject_endpoint(777U), + make_scattered(data, sizeof(data)), + NULL)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_oom); + TEST_ASSERT_EQUAL_size_t(0U, fx.transfer_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, fx.payload_alloc.allocated_fragments); + fixture_fini(&fx); +} + +static void test_tx_eject_stall(void) +{ + // If ejection makes no progress, pending queues should remain intact. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 8U, 128U, false); + const byte_t data[] = { 0x11U, 0x22U }; + TEST_ASSERT_TRUE(udpard_tx_push(&fx.tx, + 0, + 100000, + 1U, + udpard_prio_nominal, + 11U, + udpard_make_subject_endpoint(778U), + make_scattered(data, sizeof(data)), + NULL)); + TEST_ASSERT_EQUAL_UINT16(1U, udpard_tx_pending_ifaces(&fx.tx)); + udpard_tx_poll(&fx.tx, 1, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(0U, fx.eject.count); + TEST_ASSERT_EQUAL_UINT16(1U, udpard_tx_pending_ifaces(&fx.tx)); + fixture_fini(&fx); +} + +static void test_tx_sharing_branches(void) +{ + // Exercise shareability and prediction logic with mixed allocators. + instrumented_allocator_t alloc_a = { 0 }; + instrumented_allocator_t alloc_b = { 0 }; + instrumented_allocator_new(&alloc_a); + instrumented_allocator_new(&alloc_b); + const udpard_mem_t mem_a = instrumented_allocator_make_resource(&alloc_a); + const udpard_mem_t mem_b = instrumented_allocator_make_resource(&alloc_b); + TEST_ASSERT_TRUE(tx_spool_shareable(128U, mem_a, 128U, mem_a, 120U)); + TEST_ASSERT_TRUE(tx_spool_shareable(256U, mem_a, 128U, mem_a, 120U)); + TEST_ASSERT_FALSE(tx_spool_shareable(256U, mem_a, 128U, mem_a, 200U)); + TEST_ASSERT_FALSE(tx_spool_shareable(128U, mem_a, 128U, mem_b, 120U)); + + const size_t mtu[UDPARD_IFACE_COUNT_MAX] = { 128U, 128U, 128U }; + const udpard_mem_t mem[UDPARD_IFACE_COUNT_MAX] = { mem_a, mem_b, mem_a }; + const size_t predicted_nonshareable = tx_predict_frame_count(mtu, mem, 0x7U, 10U); + const udpard_mem_t mem_all_same[UDPARD_IFACE_COUNT_MAX] = { mem_a, mem_a, mem_a }; + const size_t predicted_shareable = tx_predict_frame_count(mtu, mem_all_same, 0x7U, 10U); + TEST_ASSERT_EQUAL_size_t(2U, predicted_nonshareable); + TEST_ASSERT_EQUAL_size_t(1U, predicted_shareable); + + // Push over two interfaces backed by different payload memory resources to prevent deduplication. + instrumented_allocator_t alloc_transfer = { 0 }; + instrumented_allocator_t alloc_p0 = { 0 }; + instrumented_allocator_t alloc_p1 = { 0 }; + instrumented_allocator_t alloc_p2 = { 0 }; + instrumented_allocator_new(&alloc_transfer); + instrumented_allocator_new(&alloc_p0); + instrumented_allocator_new(&alloc_p1); + instrumented_allocator_new(&alloc_p2); + const udpard_tx_mem_resources_t tx_mem = { + .transfer = instrumented_allocator_make_resource(&alloc_transfer), + .payload = { instrumented_allocator_make_resource(&alloc_p0), + instrumented_allocator_make_resource(&alloc_p1), + instrumented_allocator_make_resource(&alloc_p2) }, + }; + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0102030405060708ULL, 42U, 8U, tx_mem, &tx_vtable)); + tx.user = NULL; + tx.mtu[0] = tx.mtu[1] = tx.mtu[2] = 128U; + const byte_t p[] = { 0xABU }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 10000, + (1U << 0U) | (1U << 1U), + udpard_prio_nominal, + 12U, + udpard_make_subject_endpoint(779U), + make_scattered(p, 1U), + NULL)); + TEST_ASSERT_EQUAL_size_t(2U, tx.enqueued_frames_count); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0U, alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, alloc_p0.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, alloc_p1.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0U, alloc_p2.allocated_fragments); + instrumented_allocator_reset(&alloc_transfer); + instrumented_allocator_reset(&alloc_p0); + instrumented_allocator_reset(&alloc_p1); + instrumented_allocator_reset(&alloc_p2); + instrumented_allocator_reset(&alloc_a); + instrumented_allocator_reset(&alloc_b); +} + void setUp(void) {} void tearDown(void) {} @@ -195,5 +431,12 @@ int main(void) RUN_TEST(test_tx_expiration); RUN_TEST(test_tx_sacrifice_oldest); RUN_TEST(test_tx_transfer_id_masking); + RUN_TEST(test_tx_capacity_failure); + RUN_TEST(test_tx_spool_oom_rollback); + RUN_TEST(test_tx_refcount_retention); + RUN_TEST(test_tx_validate_and_compare_deadlines); + RUN_TEST(test_tx_transfer_alloc_oom); + RUN_TEST(test_tx_eject_stall); + RUN_TEST(test_tx_sharing_branches); return UNITY_END(); }