From mboxrd@z Thu Jan 1 00:00:00 1970 From: Fan Zhang Subject: [PATCH v4 4/8] lib/librte_vhost: add request handler Date: Thu, 29 Mar 2018 07:52:51 -0500 Message-ID: <1522327975-28769-5-git-send-email-roy.fan.zhang@intel.com> References: <20180326095114.11605-1-roy.fan.zhang@intel.com> <1522327975-28769-1-git-send-email-roy.fan.zhang@intel.com> Cc: maxime.coquelin@redhat.com, jianjay.zhou@huawei.com, jianfeng.tan@intel.com To: dev@dpdk.org Return-path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 4539A2BD8 for ; Thu, 29 Mar 2018 14:53:04 +0200 (CEST) In-Reply-To: <1522327975-28769-1-git-send-email-roy.fan.zhang@intel.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds the implementation that parses virtio crypto request to dpdk crypto operation. Signed-off-by: Fan Zhang --- lib/librte_vhost/vhost_crypto.c | 609 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 609 insertions(+) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index c639b20..8fdb33f 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -426,3 +426,612 @@ vhost_crypto_msg_post_handler(int vid, void *msg, uint32_t *require_reply) return ret; } + +static __rte_always_inline struct vring_desc * +find_write_desc(struct vring_desc *head, struct vring_desc *desc) +{ + if (desc->flags & VRING_DESC_F_WRITE) + return desc; + + while (desc->flags & VRING_DESC_F_NEXT) { + desc = &head[desc->next]; + if (desc->flags & VRING_DESC_F_WRITE) + return desc; + } + + return NULL; +} + +static struct virtio_crypto_inhdr * +reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem, + struct vring_desc *desc) +{ + while (desc->flags & VRING_DESC_F_NEXT) + desc = &head[desc->next]; + + return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem, desc->addr); +} + +static __rte_always_inline int +move_desc(struct vring_desc *head, struct vring_desc **cur_desc, + uint32_t size) +{ + struct vring_desc *desc = *cur_desc; + int left = size; + + rte_prefetch0(&head[desc->next]); + left -= desc->len; + + while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { + desc = &head[desc->next]; + rte_prefetch0(&head[desc->next]); + left -= desc->len; + } + + if (unlikely(left < 0)) { + VC_LOG_ERR("Incorrect virtio descriptor"); + return -1; + } + + *cur_desc = &head[desc->next]; + return 0; +} + +static int +copy_data(void *dst_data, struct vring_desc *head, struct rte_vhost_memory *mem, + struct vring_desc **cur_desc, uint32_t size) +{ + struct vring_desc *desc = *cur_desc; + uint32_t to_copy; + uint8_t *data = dst_data; + uint8_t *src; + int left = size; + + rte_prefetch0(&head[desc->next]); + to_copy = RTE_MIN(desc->len, (uint32_t)left); + src = GPA_TO_VVA(uint8_t *, mem, desc->addr); + rte_memcpy((uint8_t *)data, src, to_copy); + left -= to_copy; + + while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { + desc = &head[desc->next]; + rte_prefetch0(&head[desc->next]); + to_copy = RTE_MIN(desc->len, (uint32_t)left); + src = GPA_TO_VVA(uint8_t *, mem, desc->addr); + rte_memcpy(data + size - left, src, to_copy); + left -= to_copy; + } + + if (unlikely(left < 0)) { + VC_LOG_ERR("Incorrect virtio descriptor"); + return -1; + } + + *cur_desc = &head[desc->next]; + + return 0; +} + +static __rte_always_inline void * +get_data_ptr(struct vring_desc *head, struct rte_vhost_memory *mem, + struct vring_desc **cur_desc, uint32_t size) +{ + void *data; + + data = GPA_TO_VVA(void *, mem, (*cur_desc)->addr); + if (unlikely(!data)) { + VC_LOG_ERR("Failed to get object"); + return NULL; + } + + if (unlikely(move_desc(head, cur_desc, size) < 0)) + return NULL; + + return data; +} + +static int +write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req) +{ + struct rte_mbuf *mbuf = op->sym->m_dst; + struct vring_desc *head = vc_req->head; + struct rte_vhost_memory *mem = vc_req->mem; + struct vring_desc *desc = vc_req->wb_desc; + int left = vc_req->wb_len; + uint32_t to_write; + uint8_t *src_data = mbuf->buf_addr, *dst; + + rte_prefetch0(&head[desc->next]); + to_write = RTE_MIN(desc->len, (uint32_t)left); + dst = GPA_TO_VVA(uint8_t *, mem, desc->addr); + rte_memcpy(dst, src_data, to_write); + left -= to_write; + src_data += to_write; + +#ifdef RTE_LIBRTE_VHOST_DEBUG + printf("desc addr %llu len %u:", desc->addr, desc->len); + rte_hexdump(stdout, "", dst, to_write); +#endif + + while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { + desc = &head[desc->next]; + rte_prefetch0(&head[desc->next]); + to_write = RTE_MIN(desc->len, (uint32_t)left); + dst = GPA_TO_VVA(uint8_t *, mem, desc->addr); + rte_memcpy(dst, src_data, to_write); +#ifdef RTE_LIBRTE_VHOST_DEBUG + printf("desc addr %llu len %u:", desc->addr, desc->len); + rte_hexdump(stdout, "DST:", dst, to_write); +#endif + left -= to_write; + src_data += to_write; + } + + if (unlikely(left < 0)) { + VC_LOG_ERR("Incorrect virtio descriptor"); + return -1; + } + + return 0; +} + +static uint8_t +prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + struct vhost_crypto_data_req *vc_req, + struct virtio_crypto_cipher_data_req *cipher, + struct vring_desc *cur_desc) +{ + struct vring_desc *head = vc_req->head; + struct vring_desc *desc = cur_desc; + struct rte_vhost_memory *mem = vc_req->mem; + struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; + uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); + uint8_t ret = 0; + + /* prepare */ + /* iv */ + if (unlikely(copy_data(iv_data, head, mem, &desc, + cipher->para.iv_len) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "IV:", iv_data, cipher->para.iv_len); +#endif + + m_src->data_len = cipher->para.src_data_len; + + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, + cipher->para.src_data_len); + m_src->buf_addr = get_data_ptr(head, mem, &desc, + cipher->para.src_data_len); + if (unlikely(m_src->buf_iova == 0 || + m_src->buf_addr == NULL)) { + VC_LOG_ERR("zero_copy may fail due to cross page data"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + if (unlikely(cipher->para.src_data_len > + RTE_MBUF_DEFAULT_BUF_SIZE)) { + VC_LOG_ERR("Not enough space to do data copy"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), head, + mem, &desc, cipher->para.src_data_len)) + < 0) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + break; + default: + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + break; + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "SRC:", rte_pktmbuf_mtod(m_src, void *), + cipher->para.src_data_len); +#endif + + /* dst */ + desc = find_write_desc(head, desc); + if (unlikely(!desc)) { + VC_LOG_ERR("Cannot find write location"); + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, + desc->addr, cipher->para.dst_data_len); + m_dst->buf_addr = get_data_ptr(head, mem, &desc, + cipher->para.dst_data_len); + if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { + VC_LOG_ERR("zero_copy may fail due to cross page data"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + + m_dst->data_len = cipher->para.dst_data_len; + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_desc = desc; + vc_req->wb_len = cipher->para.dst_data_len; + break; + default: + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + break; + } + + /* src data */ + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + + op->sym->cipher.data.offset = 0; + op->sym->cipher.data.length = cipher->para.src_data_len; + + vc_req->inhdr = get_data_ptr(head, mem, &desc, INHDR_LEN); + if (unlikely(vc_req->inhdr == NULL)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + vc_req->inhdr->status = VIRTIO_CRYPTO_OK; + vc_req->len = cipher->para.dst_data_len + INHDR_LEN; + + return 0; + +error_exit: + vc_req->len = INHDR_LEN; + return ret; +} + +static uint8_t +prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, + struct vhost_crypto_data_req *vc_req, + struct virtio_crypto_alg_chain_data_req *chain, + struct vring_desc *cur_desc) +{ + struct vring_desc *head = vc_req->head; + struct vring_desc *desc = cur_desc; + struct rte_vhost_memory *mem = vc_req->mem; + struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; + uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); + uint32_t digest_offset; + void *digest_addr; + uint8_t ret = 0; + + /* prepare */ + /* iv */ + if (unlikely(copy_data(iv_data, head, mem, &desc, + chain->para.iv_len) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "IV:", iv_data, chain->para.iv_len); +#endif + m_src->data_len = chain->para.src_data_len; + m_dst->data_len = chain->para.dst_data_len; + + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, + chain->para.src_data_len); + m_src->buf_addr = get_data_ptr(head, mem, &desc, + chain->para.src_data_len); + if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { + VC_LOG_ERR("zero_copy may fail due to cross page data"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + if (unlikely(chain->para.src_data_len > + RTE_MBUF_DEFAULT_BUF_SIZE)) { + VC_LOG_ERR("Not enough space to do data copy"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), head, + mem, &desc, chain->para.src_data_len)) < 0) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + break; + default: + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + break; + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "SRC:", rte_pktmbuf_mtod(m_src, void *), + chain->para.src_data_len); +#endif + + /* dst */ + desc = find_write_desc(head, desc); + if (unlikely(!desc)) { + VC_LOG_ERR("Cannot find write location"); + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, + desc->addr, chain->para.dst_data_len); + m_dst->buf_addr = get_data_ptr(head, mem, &desc, + chain->para.dst_data_len); + if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { + VC_LOG_ERR("zero_copy may fail due to cross page data"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + + op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev, + desc->addr, chain->para.hash_result_len); + op->sym->auth.digest.data = get_data_ptr(head, mem, &desc, + chain->para.hash_result_len); + if (unlikely(op->sym->auth.digest.phys_addr == 0)) { + VC_LOG_ERR("zero_copy may fail due to cross page data"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + digest_offset = m_dst->data_len; + digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *, + digest_offset); + + vc_req->wb_desc = desc; + vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len; + + if (unlikely(move_desc(head, &desc, + chain->para.dst_data_len) < 0)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + if (unlikely(copy_data(digest_addr, head, mem, &desc, + chain->para.hash_result_len)) < 0) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + op->sym->auth.digest.data = digest_addr; + op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst, + digest_offset); + break; + default: + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + break; + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "Digest:", op->sym->auth.digest.data, + chain->para.hash_result_len); +#endif + + /* record inhdr */ + vc_req->inhdr = get_data_ptr(head, mem, &desc, INHDR_LEN); + if (unlikely(vc_req->inhdr == NULL)) { + ret = VIRTIO_CRYPTO_BADMSG; + goto error_exit; + } + + vc_req->inhdr->status = VIRTIO_CRYPTO_OK; + + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + + op->sym->cipher.data.offset = chain->para.cipher_start_src_offset; + op->sym->cipher.data.length = chain->para.src_data_len - + chain->para.cipher_start_src_offset; + + op->sym->auth.data.offset = chain->para.hash_start_src_offset; + op->sym->auth.data.length = chain->para.len_to_hash; + + vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len + + INHDR_LEN; + return 0; + +error_exit: + vc_req->len = INHDR_LEN; + return ret; +} + +/** + * Process on descriptor + */ +static __rte_always_inline int +vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, + struct vhost_virtqueue *vq, struct rte_crypto_op *op, + struct vring_desc *head, uint16_t desc_idx, + struct rte_vhost_memory *mem) +{ + struct vhost_crypto_data_req *vc_req = RTE_PTR_ADD(op->sym->m_src, + sizeof(struct rte_mbuf)); + struct rte_cryptodev_sym_session *session; + struct virtio_crypto_op_data_req *req; + struct virtio_crypto_inhdr *inhdr; + struct vring_desc *desc = NULL; + uint64_t session_id; + int err = 0; + + vc_req->desc_idx = desc_idx; + + if (likely(head->flags & VRING_DESC_F_INDIRECT)) { + head = GPA_TO_VVA(struct vring_desc *, mem, head->addr); + if (unlikely(!head)) + return 0; + desc_idx = 0; + } + + desc = head; + + vc_req->mem = mem; + vc_req->head = head; + vc_req->vq = vq; + + vc_req->zero_copy = vcrypto->option; + + req = get_data_ptr(head, mem, &desc, sizeof(*req)); + if (unlikely(req == NULL)) { + err = VIRTIO_CRYPTO_ERR; + VC_LOG_ERR("Invalid descriptor"); + goto error_exit; + } + + switch (req->header.opcode) { + case VIRTIO_CRYPTO_CIPHER_ENCRYPT: + case VIRTIO_CRYPTO_CIPHER_DECRYPT: + session_id = req->header.session_id; + + /* one branch to avoid unnecessary table lookup */ + if (vcrypto->cache_session_id != session_id) { + err = rte_hash_lookup_data(vcrypto->session_map, + &session_id, (void **)&session); + if (unlikely(err < 0)) { + err = VIRTIO_CRYPTO_ERR; + VC_LOG_ERR("Failed to retrieve session id %lu", + session_id); + goto error_exit; + } + + vcrypto->cache_session = session; + vcrypto->cache_session_id = session_id; + } + + session = vcrypto->cache_session; + + err = rte_crypto_op_attach_sym_session(op, session); + if (unlikely(err < 0)) { + err = VIRTIO_CRYPTO_ERR; + VC_LOG_ERR("Failed to attach session to op"); + goto error_exit; + } + + switch (req->u.sym_req.op_type) { + case VIRTIO_CRYPTO_SYM_OP_NONE: + err = VIRTIO_CRYPTO_NOTSUPP; + break; + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + err = prepare_sym_cipher_op(vcrypto, op, vc_req, + &req->u.sym_req.u.cipher, desc); + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + err = prepare_sym_chain_op(vcrypto, op, vc_req, + &req->u.sym_req.u.chain, desc); + break; + } + if (unlikely(err != 0)) { + VC_LOG_ERR("Failed to process sym request"); + goto error_exit; + } + break; + default: + VC_LOG_ERR("Unsupported symmetric crypto request type %u", + req->header.opcode); + goto error_exit; + } + + return 0; + +error_exit: + + inhdr = reach_inhdr(head, mem, desc); + if (likely(inhdr != NULL)) + inhdr->status = (uint8_t)err; + + return -1; +} + +static __rte_always_inline struct vhost_virtqueue * +vhost_crypto_finalize_one_request(struct rte_crypto_op *op, + struct vhost_virtqueue *old_vq) +{ + struct rte_mbuf *m_src = op->sym->m_src; + struct rte_mbuf *m_dst = op->sym->m_dst; + struct vhost_crypto_data_req *vc_req = RTE_PTR_ADD(m_src, + sizeof(struct rte_mbuf)); + uint16_t desc_idx; + int ret = 0; + + if (unlikely(!vc_req)) { + VC_LOG_ERR("Failed to retrieve vc_req"); + return NULL; + } + + if (old_vq && (vc_req->vq != old_vq)) + return vc_req->vq; + + desc_idx = vc_req->desc_idx; + + if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; + else { + if (vc_req->zero_copy == 0) { + ret = write_back_data(op, vc_req); + if (unlikely(ret != 0)) + vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; + } + } + +#ifdef RTE_LIBRTE_VHOST_DEBUG + rte_hexdump(stdout, "DST:", rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *), + m_dst->data_len); +#endif + + vc_req->vq->used->ring[desc_idx].id = desc_idx; + vc_req->vq->used->ring[desc_idx].len = vc_req->len; + + rte_mempool_put(m_dst->pool, (void *)m_dst); + rte_mempool_put(m_src->pool, (void *)m_src); + + return vc_req->vq; +} + +static __rte_always_inline uint16_t +vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops, + uint16_t nb_ops, int *callfd) +{ + uint16_t processed = 1; + struct vhost_virtqueue *vq, *tmp_vq; + + if (unlikely(nb_ops == 0)) + return 0; + + vq = vhost_crypto_finalize_one_request(ops[0], NULL); + if (unlikely(vq == NULL)) + return 0; + tmp_vq = vq; + + while ((processed < nb_ops)) { + tmp_vq = vhost_crypto_finalize_one_request(ops[processed], + tmp_vq); + + if (unlikely(vq != tmp_vq)) + break; + + processed++; + } + + *callfd = vq->callfd; + + *(volatile uint16_t *)&vq->used->idx += processed; + + return processed; +} -- 2.7.4