34 #include <rte_compat.h> 42 #ifndef RTE_BBDEV_MAX_DEVS 43 #define RTE_BBDEV_MAX_DEVS 128 58 uint16_t __rte_experimental
70 bool __rte_experimental
83 uint16_t __rte_experimental
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \ 88 i < RTE_BBDEV_MAX_DEVS; \ 89 i = rte_bbdev_find_next(i)) 112 int __rte_experimental
129 int __rte_experimental
158 int __rte_experimental
173 int __rte_experimental
186 int __rte_experimental
199 int __rte_experimental
216 int __rte_experimental
231 int __rte_experimental
257 int __rte_experimental
268 int __rte_experimental
297 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \ 298 { RTE_BBDEV_OP_NONE } 326 int __rte_experimental
352 int __rte_experimental
357 struct rte_bbdev_queue_data {
365 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
366 struct rte_bbdev_queue_data *q_data,
371 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
372 struct rte_bbdev_queue_data *q_data,
377 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
378 struct rte_bbdev_queue_data *q_data,
382 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
383 struct rte_bbdev_queue_data *q_data,
386 #define RTE_BBDEV_NAME_MAX_LEN 64 394 struct rte_bbdev_data { 398 struct rte_bbdev_queue_data *queues;
408 struct rte_bbdev_callback;
412 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
420 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
422 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
424 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
426 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
428 struct rte_bbdev_data *data;
432 struct rte_bbdev_cb_list list_cbs;
437 extern struct rte_bbdev rte_bbdev_devices[];
460 static inline uint16_t
464 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
465 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
466 return dev->enqueue_enc_ops(q_data, ops, num_ops);
490 static inline uint16_t
494 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
495 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
496 return dev->enqueue_dec_ops(q_data, ops, num_ops);
520 static inline uint16_t
524 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
525 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
526 return dev->dequeue_enc_ops(q_data, ops, num_ops);
551 static inline uint16_t
555 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
556 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
557 return dev->dequeue_dec_ops(q_data, ops, num_ops);
602 int __rte_experimental
625 int __rte_experimental
645 int __rte_experimental
661 int __rte_experimental
688 int __rte_experimental
bool hardware_accelerated
int __rte_experimental rte_bbdev_close(uint16_t dev_id)
TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback)
int __rte_experimental rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
uint16_t __rte_experimental rte_bbdev_find_next(uint16_t dev_id)
uint64_t dequeue_err_count
static uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
bool __rte_experimental rte_bbdev_is_valid(uint16_t dev_id)
struct rte_mbuf __rte_cache_aligned
uint8_t max_queue_priority
struct rte_bbdev_driver_info drv
int __rte_experimental rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
int __rte_experimental rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
int __rte_experimental rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
unsigned int max_num_queues
enum rte_bbdev_op_type op_type
int __rte_experimental rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
static uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
#define RTE_BBDEV_NAME_MAX_LEN
static uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
int __rte_experimental rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_queue_info *queue_info)
int __rte_experimental rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
struct rte_bbdev_queue_conf conf
uint16_t __rte_experimental rte_bbdev_count(void)
bool queue_intr_supported
int __rte_experimental rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
int __rte_experimental rte_bbdev_stats_reset(uint16_t dev_id)
int __rte_experimental rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, void *data)
int __rte_experimental rte_bbdev_intr_enable(uint16_t dev_id)
static uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
int __rte_experimental rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
int __rte_experimental rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_queue_conf *conf)
int __rte_experimental rte_bbdev_start(uint16_t dev_id)
const struct rte_bus * bus
const struct rte_bbdev_op_cap * capabilities
int __rte_experimental rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
struct rte_bbdev_queue_conf default_queue_conf
int __rte_experimental rte_bbdev_stop(uint16_t dev_id)
enum rte_cpu_flag_t * cpu_flag_reqs
uint64_t enqueue_err_count
void(* rte_bbdev_cb_fn)(uint16_t dev_id, enum rte_bbdev_event_type event, void *cb_arg, void *ret_param)