From mboxrd@z Thu Jan 1 00:00:00 1970 From: David Hunt Subject: [PATCH v4 3/6] test: unit tests for new distributor burst api Date: Mon, 9 Jan 2017 07:50:45 +0000 Message-ID: <1483948248-91364-4-git-send-email-david.hunt@intel.com> References: <1482381428-148094-2-git-send-email-david.hunt@intel.com> <1483948248-91364-1-git-send-email-david.hunt@intel.com> Cc: bruce.richardson@intel.com, David Hunt To: dev@dpdk.org Return-path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 261C53777 for ; Mon, 9 Jan 2017 15:50:05 +0100 (CET) In-Reply-To: <1483948248-91364-1-git-send-email-david.hunt@intel.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: David Hunt --- app/test/test_distributor.c | 501 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 392 insertions(+), 109 deletions(-) diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c index 85cb8f3..3871f86 100644 --- a/app/test/test_distributor.c +++ b/app/test/test_distributor.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,11 +40,24 @@ #include #include #include +#include #define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */ #define BURST 32 #define BIG_BATCH 1024 +#define DIST_SINGLE 0 +#define DIST_BURST 1 +#define DIST_NUM_TYPES 2 + +struct worker_params { + struct rte_distributor *d; + struct rte_distributor_burst *db; + int dist_type; +}; + +struct worker_params worker_params; + /* statics - all zero-initialized by default */ static volatile int quit; /**< general quit variable for all threads */ static volatile int zero_quit; /**< var for when we just want thr0 to quit*/ @@ -81,17 +94,36 @@ static int handle_work(void *arg) { struct rte_mbuf *pkt = NULL; - struct rte_distributor *d = arg; - unsigned count = 0; - unsigned id = __sync_fetch_and_add(&worker_idx, 1); - - pkt = rte_distributor_get_pkt(d, id, NULL); - while (!quit) { + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; + unsigned int count = 0, num = 0; + unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + int i; + + if (wp->dist_type == DIST_SINGLE) { + pkt = rte_distributor_get_pkt(d, id, NULL); + while (!quit) { + worker_stats[id].handled_packets++, count++; + pkt = rte_distributor_get_pkt(d, id, pkt); + } worker_stats[id].handled_packets++, count++; - pkt = rte_distributor_get_pkt(d, id, pkt); + rte_distributor_return_pkt(d, id, pkt); + } else { + for (i = 0; i < 8; i++) + buf[i] = NULL; + num = rte_distributor_get_pkt_burst(db, id, buf, buf, num); + while (!quit) { + worker_stats[id].handled_packets += num; + count += num; + num = rte_distributor_get_pkt_burst(db, id, + buf, buf, num); + } + worker_stats[id].handled_packets += num; + count += num; + rte_distributor_return_pkt_burst(db, id, buf, num); } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); return 0; } @@ -107,12 +139,21 @@ handle_work(void *arg) * not necessarily in the same order (as different flows). */ static int -sanity_test(struct rte_distributor *d, struct rte_mempool *p) +sanity_test(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; struct rte_mbuf *bufs[BURST]; - unsigned i; + struct rte_mbuf *returns[BURST*2]; + unsigned int i; + unsigned int retries; + unsigned int count = 0; + + if (wp->dist_type == DIST_SINGLE) + printf("=== Basic distributor sanity tests (single) ===\n"); + else + printf("=== Basic distributor sanity tests (burst) ===\n"); - printf("=== Basic distributor sanity tests ===\n"); clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); @@ -124,8 +165,21 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BURST; i++) bufs[i]->hash.usr = 0; - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, bufs, BURST); + rte_distributor_flush(d); + } else { + rte_distributor_process_burst(db, bufs, BURST); + count = 0; + do { + + rte_distributor_flush_burst(db); + count += rte_distributor_returned_pkts_burst(db, + returns, BURST*2); + } while (count < BURST); + } + + if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -146,8 +200,18 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BURST; i++) bufs[i]->hash.usr = (i & 1) << 8; - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, bufs, BURST); + rte_distributor_flush(d); + } else { + rte_distributor_process_burst(db, bufs, BURST); + count = 0; + do { + rte_distributor_flush_burst(db); + count += rte_distributor_returned_pkts_burst(db, + returns, BURST*2); + } while (count < BURST); + } if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -155,24 +219,32 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) return -1; } + for (i = 0; i < rte_lcore_count() - 1; i++) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); printf("Sanity test with two hash values done\n"); - - if (worker_stats[0].handled_packets != 16 || - worker_stats[1].handled_packets != 16) - return -1; } /* give a different hash value to each packet, * so load gets distributed */ clear_packet_count(); for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = i; + bufs[i]->hash.usr = i+1; + + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, bufs, BURST); + rte_distributor_flush(d); + } else { + rte_distributor_process_burst(db, bufs, BURST); + count = 0; + do { + rte_distributor_flush_burst(db); + count += rte_distributor_returned_pkts_burst(db, + returns, BURST*2); + } while (count < BURST); + } - rte_distributor_process(d, bufs, BURST); - rte_distributor_flush(d); if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -194,8 +266,15 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) unsigned num_returned = 0; /* flush out any remaining packets */ - rte_distributor_flush(d); - rte_distributor_clear_returns(d); + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_flush(d); + rte_distributor_clear_returns(d); + } else { + rte_distributor_flush_burst(db); + rte_distributor_clear_returns_burst(db); + } + + if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; @@ -203,28 +282,59 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) for (i = 0; i < BIG_BATCH; i++) many_bufs[i]->hash.usr = i << 2; - for (i = 0; i < BIG_BATCH/BURST; i++) { - rte_distributor_process(d, &many_bufs[i*BURST], BURST); + if (wp->dist_type == DIST_SINGLE) { + printf("===testing single big burst===\n"); + for (i = 0; i < BIG_BATCH/BURST; i++) { + rte_distributor_process(d, &many_bufs[i*BURST], BURST); + num_returned += rte_distributor_returned_pkts(d, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + } + rte_distributor_flush(d); num_returned += rte_distributor_returned_pkts(d, &return_bufs[num_returned], BIG_BATCH - num_returned); + } else { + printf("===testing burst big burst===\n"); + for (i = 0; i < BIG_BATCH/BURST; i++) { + rte_distributor_process_burst(db, + &many_bufs[i*BURST], BURST); + count = rte_distributor_returned_pkts_burst(db, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + num_returned += count; + } + rte_distributor_flush_burst(db); + count = rte_distributor_returned_pkts_burst(db, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + num_returned += count; } - rte_distributor_flush(d); - num_returned += rte_distributor_returned_pkts(d, - &return_bufs[num_returned], BIG_BATCH - num_returned); + retries = 0; + do { + rte_distributor_flush_burst(db); + count = rte_distributor_returned_pkts_burst(db, + &return_bufs[num_returned], + BIG_BATCH - num_returned); + num_returned += count; + retries++; + } while ((num_returned < BIG_BATCH) && (retries < 100)); + if (num_returned != BIG_BATCH) { - printf("line %d: Number returned is not the same as " - "number sent\n", __LINE__); + printf("line %d: Missing packets, expected %d\n", + __LINE__, num_returned); return -1; } + /* big check - make sure all packets made it back!! */ for (i = 0; i < BIG_BATCH; i++) { unsigned j; struct rte_mbuf *src = many_bufs[i]; - for (j = 0; j < BIG_BATCH; j++) + for (j = 0; j < BIG_BATCH; j++) { if (return_bufs[j] == src) break; + } if (j == BIG_BATCH) { printf("Error: could not find source packet #%u\n", i); @@ -234,7 +344,6 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p) printf("Sanity test of returned packets done\n"); rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH); - printf("\n"); return 0; } @@ -249,18 +358,40 @@ static int handle_work_with_free_mbufs(void *arg) { struct rte_mbuf *pkt = NULL; - struct rte_distributor *d = arg; - unsigned count = 0; - unsigned id = __sync_fetch_and_add(&worker_idx, 1); - - pkt = rte_distributor_get_pkt(d, id, NULL); - while (!quit) { + struct rte_mbuf *buf[8] __rte_cache_aligned; + struct worker_params *wp = arg; + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; + unsigned int count = 0; + unsigned int i; + unsigned int num = 0; + unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + + if (wp->dist_type == DIST_SINGLE) { + pkt = rte_distributor_get_pkt(d, id, NULL); + while (!quit) { + worker_stats[id].handled_packets++, count++; + rte_pktmbuf_free(pkt); + pkt = rte_distributor_get_pkt(d, id, pkt); + } worker_stats[id].handled_packets++, count++; - rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, pkt); + rte_distributor_return_pkt(d, id, pkt); + } else { + for (i = 0; i < 8; i++) + buf[i] = NULL; + num = rte_distributor_get_pkt_burst(db, id, buf, buf, num); + while (!quit) { + worker_stats[id].handled_packets += num; + count += num; + for (i = 0; i < num; i++) + rte_pktmbuf_free(buf[i]); + num = rte_distributor_get_pkt_burst(db, + id, buf, buf, num); + } + worker_stats[id].handled_packets += num; + count += num; + rte_distributor_return_pkt_burst(db, id, buf, num); } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); return 0; } @@ -270,26 +401,45 @@ handle_work_with_free_mbufs(void *arg) * library. */ static int -sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p) +sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; unsigned i; struct rte_mbuf *bufs[BURST]; - printf("=== Sanity test with mbuf alloc/free ===\n"); + if (wp->dist_type == DIST_SINGLE) + printf("=== Sanity test with mbuf alloc/free (single) ===\n"); + else + printf("=== Sanity test with mbuf alloc/free (burst) ===\n"); + clear_packet_count(); for (i = 0; i < ((1<dist_type == DIST_SINGLE) + rte_distributor_process(d, NULL, 0); + else + rte_distributor_process_burst(db, NULL, 0); + } for (j = 0; j < BURST; j++) { bufs[j]->hash.usr = (i+j) << 1; rte_mbuf_refcnt_set(bufs[j], 1); } - rte_distributor_process(d, bufs, BURST); + if (wp->dist_type == DIST_SINGLE) + rte_distributor_process(d, bufs, BURST); + else + rte_distributor_process_burst(db, bufs, BURST); } - rte_distributor_flush(d); + if (wp->dist_type == DIST_SINGLE) + rte_distributor_flush(d); + else + rte_distributor_flush_burst(db); + + rte_delay_us(10000); + if (total_packet_count() < (1<d; + struct rte_distributor_burst *db = wp->db; + unsigned int count = 0; + unsigned int num = 0; + unsigned int total = 0; + unsigned int i; + unsigned int returned = 0; + const unsigned int id = __sync_fetch_and_add(&worker_idx, 1); + + if (wp->dist_type == DIST_SINGLE) + pkt = rte_distributor_get_pkt(d, id, NULL); + else + num = rte_distributor_get_pkt_burst(db, id, buf, buf, num); - pkt = rte_distributor_get_pkt(d, id, NULL); /* wait for quit single globally, or for worker zero, wait * for zero_quit */ while (!quit && !(id == 0 && zero_quit)) { - worker_stats[id].handled_packets++, count++; - rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, NULL); + if (wp->dist_type == DIST_SINGLE) { + worker_stats[id].handled_packets++, count++; + rte_pktmbuf_free(pkt); + pkt = rte_distributor_get_pkt(d, id, NULL); + num = 1; + total += num; + } else { + worker_stats[id].handled_packets += num; + count += num; + for (i = 0; i < num; i++) + rte_pktmbuf_free(buf[i]); + num = rte_distributor_get_pkt_burst(db, + id, buf, buf, num); + total += num; + } + } + worker_stats[id].handled_packets += num; + count += num; + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_return_pkt(d, id, pkt); + } else { + returned = rte_distributor_return_pkt_burst(db, id, buf, num); } - worker_stats[id].handled_packets++, count++; - rte_distributor_return_pkt(d, id, pkt); if (id == 0) { /* for worker zero, allow it to restart to pick up last packet @@ -326,13 +504,29 @@ handle_work_for_shutdown_test(void *arg) */ while (zero_quit) usleep(100); - pkt = rte_distributor_get_pkt(d, id, NULL); + if (wp->dist_type == DIST_SINGLE) { + pkt = rte_distributor_get_pkt(d, id, NULL); + } else { + num = rte_distributor_get_pkt_burst(db, + id, buf, buf, num); + } while (!quit) { worker_stats[id].handled_packets++, count++; rte_pktmbuf_free(pkt); - pkt = rte_distributor_get_pkt(d, id, NULL); + if (wp->dist_type == DIST_SINGLE) { + pkt = rte_distributor_get_pkt(d, id, NULL); + } else { + num = rte_distributor_get_pkt_burst(db, + id, buf, buf, num); + } + } + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_return_pkt(d, id, pkt); + } else { + returned = rte_distributor_return_pkt_burst(db, + id, buf, num); + printf("Num returned = %d\n", returned); } - rte_distributor_return_pkt(d, id, pkt); } return 0; } @@ -344,26 +538,37 @@ handle_work_for_shutdown_test(void *arg) * library. */ static int -sanity_test_with_worker_shutdown(struct rte_distributor *d, +sanity_test_with_worker_shutdown(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; struct rte_mbuf *bufs[BURST]; unsigned i; printf("=== Sanity test of worker shutdown ===\n"); clear_packet_count(); + if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; } - /* now set all hash values in all buffers to zero, so all pkts go to the - * one worker thread */ + /* + * Now set all hash values in all buffers to same value so all + * pkts go to the one worker thread + */ for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = 0; + bufs[i]->hash.usr = 1; + + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, bufs, BURST); + } else { + rte_distributor_process_burst(db, bufs, BURST); + rte_distributor_flush_burst(db); + } - rte_distributor_process(d, bufs, BURST); /* at this point, we will have processed some packets and have a full * backlog for the other ones at worker 0. */ @@ -374,14 +579,25 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, return -1; } for (i = 0; i < BURST; i++) - bufs[i]->hash.usr = 0; + bufs[i]->hash.usr = 1; /* get worker zero to quit */ zero_quit = 1; - rte_distributor_process(d, bufs, BURST); + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, bufs, BURST); + /* flush the distributor */ + rte_distributor_flush(d); + } else { + rte_distributor_process_burst(db, bufs, BURST); + /* flush the distributor */ + rte_distributor_flush_burst(db); + } + rte_delay_us(10000); + + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, + worker_stats[i].handled_packets); - /* flush the distributor */ - rte_distributor_flush(d); if (total_packet_count() != BURST * 2) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -389,10 +605,6 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, return -1; } - for (i = 0; i < rte_lcore_count() - 1; i++) - printf("Worker %u handled %u packets\n", i, - worker_stats[i].handled_packets); - printf("Sanity test with worker shutdown passed\n\n"); return 0; } @@ -401,13 +613,18 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d, * one worker shuts down.. */ static int -test_flush_with_worker_shutdown(struct rte_distributor *d, +test_flush_with_worker_shutdown(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; struct rte_mbuf *bufs[BURST]; unsigned i; - printf("=== Test flush fn with worker shutdown ===\n"); + if (wp->dist_type == DIST_SINGLE) + printf("=== Test flush fn with worker shutdown (single) ===\n"); + else + printf("=== Test flush fn with worker shutdown (burst) ===\n"); clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { @@ -420,7 +637,11 @@ test_flush_with_worker_shutdown(struct rte_distributor *d, for (i = 0; i < BURST; i++) bufs[i]->hash.usr = 0; - rte_distributor_process(d, bufs, BURST); + if (wp->dist_type == DIST_SINGLE) + rte_distributor_process(d, bufs, BURST); + else + rte_distributor_process_burst(db, bufs, BURST); + /* at this point, we will have processed some packets and have a full * backlog for the other ones at worker 0. */ @@ -429,9 +650,18 @@ test_flush_with_worker_shutdown(struct rte_distributor *d, zero_quit = 1; /* flush the distributor */ - rte_distributor_flush(d); + if (wp->dist_type == DIST_SINGLE) + rte_distributor_flush(d); + else + rte_distributor_flush_burst(db); + + rte_delay_us(10000); zero_quit = 0; + for (i = 0; i < rte_lcore_count() - 1; i++) + printf("Worker %u handled %u packets\n", i, + worker_stats[i].handled_packets); + if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", @@ -439,10 +669,6 @@ test_flush_with_worker_shutdown(struct rte_distributor *d, return -1; } - for (i = 0; i < rte_lcore_count() - 1; i++) - printf("Worker %u handled %u packets\n", i, - worker_stats[i].handled_packets); - printf("Flush test with worker shutdown passed\n\n"); return 0; } @@ -451,6 +677,7 @@ static int test_error_distributor_create_name(void) { struct rte_distributor *d = NULL; + struct rte_distributor_burst *db = NULL; char *name = NULL; d = rte_distributor_create(name, rte_socket_id(), @@ -460,6 +687,13 @@ int test_error_distributor_create_name(void) return -1; } + db = rte_distributor_create_burst(name, rte_socket_id(), + rte_lcore_count() - 1); + if (db != NULL || rte_errno != EINVAL) { + printf("ERROR: No error on create_burst() with NULL param\n"); + return -1; + } + return 0; } @@ -468,20 +702,32 @@ static int test_error_distributor_create_numworkers(void) { struct rte_distributor *d = NULL; + struct rte_distributor_burst *db = NULL; + d = rte_distributor_create("test_numworkers", rte_socket_id(), RTE_MAX_LCORE + 10); if (d != NULL || rte_errno != EINVAL) { printf("ERROR: No error on create() with num_workers > MAX\n"); return -1; } + + db = rte_distributor_create_burst("test_numworkers", rte_socket_id(), + RTE_MAX_LCORE + 10); + if (db != NULL || rte_errno != EINVAL) { + printf("ERROR: No error on create_burst() num_workers > MAX\n"); + return -1; + } + return 0; } /* Useful function which ensures that all worker functions terminate */ static void -quit_workers(struct rte_distributor *d, struct rte_mempool *p) +quit_workers(struct worker_params *wp, struct rte_mempool *p) { + struct rte_distributor *d = wp->d; + struct rte_distributor_burst *db = wp->db; const unsigned num_workers = rte_lcore_count() - 1; unsigned i; struct rte_mbuf *bufs[RTE_MAX_LCORE]; @@ -491,12 +737,20 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p) quit = 1; for (i = 0; i < num_workers; i++) bufs[i]->hash.usr = i << 1; - rte_distributor_process(d, bufs, num_workers); + if (wp->dist_type == DIST_SINGLE) + rte_distributor_process(d, bufs, num_workers); + else + rte_distributor_process_burst(db, bufs, num_workers); rte_mempool_put_bulk(p, (void *)bufs, num_workers); - rte_distributor_process(d, NULL, 0); - rte_distributor_flush(d); + if (wp->dist_type == DIST_SINGLE) { + rte_distributor_process(d, NULL, 0); + rte_distributor_flush(d); + } else { + rte_distributor_process_burst(db, NULL, 0); + rte_distributor_flush_burst(db); + } rte_eal_mp_wait_lcore(); quit = 0; worker_idx = 0; @@ -506,7 +760,9 @@ static int test_distributor(void) { static struct rte_distributor *d; + static struct rte_distributor_burst *db; static struct rte_mempool *p; + int i; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); @@ -525,6 +781,19 @@ test_distributor(void) rte_distributor_clear_returns(d); } + if (db == NULL) { + db = rte_distributor_create_burst("Test_dist_burst", + rte_socket_id(), + rte_lcore_count() - 1); + if (db == NULL) { + printf("Error creating burst distributor\n"); + return -1; + } + } else { + rte_distributor_flush_burst(db); + rte_distributor_clear_returns_burst(db); + } + const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { @@ -536,31 +805,45 @@ test_distributor(void) } } - rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER); - if (sanity_test(d, p) < 0) - goto err; - quit_workers(d, p); + worker_params.d = d; + worker_params.db = db; - rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER); - if (sanity_test_with_mbuf_alloc(d, p) < 0) - goto err; - quit_workers(d, p); + for (i = 0; i < DIST_NUM_TYPES; i++) { - if (rte_lcore_count() > 2) { - rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, - SKIP_MASTER); - if (sanity_test_with_worker_shutdown(d, p) < 0) - goto err; - quit_workers(d, p); + worker_params.dist_type = i; - rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, - SKIP_MASTER); - if (test_flush_with_worker_shutdown(d, p) < 0) + rte_eal_mp_remote_launch(handle_work, + &worker_params, SKIP_MASTER); + if (sanity_test(&worker_params, p) < 0) goto err; - quit_workers(d, p); + quit_workers(&worker_params, p); - } else { - printf("Not enough cores to run tests for worker shutdown\n"); + rte_eal_mp_remote_launch(handle_work_with_free_mbufs, + &worker_params, SKIP_MASTER); + if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0) + goto err; + quit_workers(&worker_params, p); + + if (rte_lcore_count() > 2) { + rte_eal_mp_remote_launch(handle_work_for_shutdown_test, + &worker_params, + SKIP_MASTER); + if (sanity_test_with_worker_shutdown(&worker_params, + p) < 0) + goto err; + quit_workers(&worker_params, p); + + rte_eal_mp_remote_launch(handle_work_for_shutdown_test, + &worker_params, + SKIP_MASTER); + if (test_flush_with_worker_shutdown(&worker_params, + p) < 0) + goto err; + quit_workers(&worker_params, p); + + } else { + printf("Too few cores to run worker shutdown test\n"); + } } if (test_error_distributor_create_numworkers() == -1 || @@ -572,7 +855,7 @@ test_distributor(void) return 0; err: - quit_workers(d, p); + quit_workers(&worker_params, p); return -1; } -- 2.7.4