10 #ifndef _RTE_CUCKOO_HASH_H_ 11 #define _RTE_CUCKOO_HASH_H_ 13 #if defined(RTE_ARCH_X86) 14 #include "rte_cmp_x86.h" 17 #if defined(RTE_ARCH_ARM64) 18 #include "rte_cmp_arm64.h" 22 #if defined(RTE_LIBRTE_HASH_DEBUG) 23 #define RETURN_IF_TRUE(cond, retval) do { \ 28 #define RETURN_IF_TRUE(cond, retval) 34 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 39 enum cmp_jump_table_case {
74 enum cmp_jump_table_case {
92 ADD_KEY_SINGLEWRITER = 0,
94 ADD_KEY_MULTIWRITER_TM,
98 #define RTE_HASH_BUCKET_ENTRIES 8 100 #define NULL_SIGNATURE 0 104 #define KEY_ALIGNMENT 16 106 #define LCORE_CACHE_SIZE 64 108 #define RTE_HASH_MAX_PUSHES 100 110 #define RTE_HASH_BFS_QUEUE_MAX_LEN 1000 112 #define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4 114 #define RTE_HASH_TSX_MAX_RETRY 10 118 void *objs[LCORE_CACHE_SIZE];
122 struct rte_hash_key {
129 } __attribute__((aligned(KEY_ALIGNMENT)));
132 enum rte_hash_sig_compare_function {
133 RTE_HASH_COMPARE_SCALAR = 0,
134 RTE_HASH_COMPARE_SSE,
135 RTE_HASH_COMPARE_AVX2,
141 hash_sig_t sig_current[RTE_HASH_BUCKET_ENTRIES];
143 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES];
147 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
192 struct queue_node *prev;
uint32_t(* rte_hash_function)(const void *key, uint32_t key_len, uint32_t init_val)
enum rte_hash_sig_compare_function sig_cmp_fn
uint32_t key_len __rte_cache_aligned
rte_hash_function hash_func
struct rte_mbuf __rte_cache_aligned
#define RTE_HASH_NAMESIZE
char name[RTE_HASH_NAMESIZE]
struct rte_ring * free_slots
rte_spinlock_t * multiwriter_lock
int(* rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len)
struct lcore_cache * local_free_slots
uint8_t hw_trans_mem_support
rte_hash_cmp_eq_t rte_hash_custom_cmp_eq
uint32_t hash_func_init_val
enum add_key_case add_key
struct rte_hash_bucket * buckets
enum cmp_jump_table_case cmp_jump_table_idx