diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /drivers/vdpa | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/alibaba/eni_vdpa.c | 8 | ||||
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_base.c | 11 | ||||
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_base.h | 2 | ||||
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_main.c | 15 | ||||
-rw-r--r-- | drivers/vdpa/pds/aux_drv.c | 2 | ||||
-rw-r--r-- | drivers/vdpa/pds/vdpa_dev.c | 20 | ||||
-rw-r--r-- | drivers/vdpa/pds/vdpa_dev.h | 1 | ||||
-rw-r--r-- | drivers/vdpa/vdpa.c | 214 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim.c | 12 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/iova_domain.c | 27 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/iova_domain.h | 8 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/vduse_dev.c | 34 | ||||
-rw-r--r-- | drivers/vdpa/virtio_pci/vp_vdpa.c | 8 |
13 files changed, 348 insertions, 14 deletions
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c index cce3d18371..ad7f3447fe 100644 --- a/drivers/vdpa/alibaba/eni_vdpa.c +++ b/drivers/vdpa/alibaba/eni_vdpa.c @@ -254,6 +254,13 @@ static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa) return vp_legacy_get_queue_size(ldev, 0); } +static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) +{ + struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); + + return vp_legacy_get_queue_size(ldev, qid); +} + static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, struct vdpa_vq_state *state) { @@ -416,6 +423,7 @@ static const struct vdpa_config_ops eni_vdpa_ops = { .reset = eni_vdpa_reset, .get_vq_num_max = eni_vdpa_get_vq_num_max, .get_vq_num_min = eni_vdpa_get_vq_num_min, + .get_vq_size = eni_vdpa_get_vq_size, .get_vq_state = eni_vdpa_get_vq_state, .set_vq_state = eni_vdpa_set_vq_state, .set_vq_cb = eni_vdpa_set_vq_cb, diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 060f837a4f..472daa588a 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -69,20 +69,19 @@ static int ifcvf_read_config_range(struct pci_dev *dev, return 0; } -static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid) +u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid) { u16 queue_size; + if (qid >= hw->nr_vring) + return 0; + vp_iowrite16(qid, &hw->common_cfg->queue_select); queue_size = vp_ioread16(&hw->common_cfg->queue_size); return queue_size; } -/* This function returns the max allowed safe size for - * all virtqueues. It is the minimal size that can be - * suppprted by all virtqueues. - */ u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw) { u16 queue_size, max_size, qid; @@ -94,7 +93,7 @@ u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw) if (!queue_size) continue; - max_size = min(queue_size, max_size); + max_size = max(queue_size, max_size); } return max_size; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index b57849c643..0f34771702 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -28,6 +28,7 @@ #define IFCVF_PCI_MAX_RESOURCE 6 #define IFCVF_LM_BAR 4 +#define IFCVF_MIN_VQ_SIZE 64 #define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__) #define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__) @@ -131,4 +132,5 @@ void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready); void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features); u64 ifcvf_get_driver_features(struct ifcvf_hw *hw); u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw); +u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index e98fa8100f..80d0a04608 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -456,6 +456,11 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return ifcvf_get_max_vq_size(vf); } +static u16 ifcvf_vdpa_get_vq_num_min(struct vdpa_device *vdpa_dev) +{ + return IFCVF_MIN_VQ_SIZE; +} + static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_vq_state *state) { @@ -597,6 +602,14 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, return -EINVAL; } +static u16 ifcvf_vdpa_get_vq_size(struct vdpa_device *vdpa_dev, + u16 qid) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return ifcvf_get_vq_size(vf, qid); +} + static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev, u16 idx) { @@ -624,6 +637,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .set_status = ifcvf_vdpa_set_status, .reset = ifcvf_vdpa_reset, .get_vq_num_max = ifcvf_vdpa_get_vq_num_max, + .get_vq_num_min = ifcvf_vdpa_get_vq_num_min, .get_vq_state = ifcvf_vdpa_get_vq_state, .set_vq_state = ifcvf_vdpa_set_vq_state, .set_vq_cb = ifcvf_vdpa_set_vq_cb, @@ -632,6 +646,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .set_vq_num = ifcvf_vdpa_set_vq_num, .set_vq_address = ifcvf_vdpa_set_vq_address, .get_vq_irq = ifcvf_vdpa_get_vq_irq, + .get_vq_size = ifcvf_vdpa_get_vq_size, .kick_vq = ifcvf_vdpa_kick_vq, .get_generation = ifcvf_vdpa_get_generation, .get_device_id = ifcvf_vdpa_get_device_id, diff --git a/drivers/vdpa/pds/aux_drv.c b/drivers/vdpa/pds/aux_drv.c index 186e9ee22e..f57330cf90 100644 --- a/drivers/vdpa/pds/aux_drv.c +++ b/drivers/vdpa/pds/aux_drv.c @@ -93,8 +93,8 @@ static void pds_vdpa_remove(struct auxiliary_device *aux_dev) struct device *dev = &aux_dev->dev; vdpa_mgmtdev_unregister(&vdpa_aux->vdpa_mdev); + pds_vdpa_release_irqs(vdpa_aux->pdsv); vp_modern_remove(&vdpa_aux->vd_mdev); - pci_free_irq_vectors(vdpa_aux->padev->vf_pdev); pds_vdpa_debugfs_del_vdpadev(vdpa_aux); kfree(vdpa_aux); diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c index 25c0fe5ec3..301d95e085 100644 --- a/drivers/vdpa/pds/vdpa_dev.c +++ b/drivers/vdpa/pds/vdpa_dev.c @@ -426,12 +426,18 @@ err_release: return err; } -static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) +void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) { - struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; - struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; + struct pds_vdpa_aux *vdpa_aux; + struct pci_dev *pdev; int qid; + if (!pdsv) + return; + + pdev = pdsv->vdpa_aux->padev->vf_pdev; + vdpa_aux = pdsv->vdpa_aux; + if (!vdpa_aux->nintrs) return; @@ -612,6 +618,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, struct device *dma_dev; struct pci_dev *pdev; struct device *dev; + u8 status; int err; int i; @@ -638,6 +645,13 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, dma_dev = &pdev->dev; pdsv->vdpa_dev.dma_dev = dma_dev; + status = pds_vdpa_get_status(&pdsv->vdpa_dev); + if (status == 0xff) { + dev_err(dev, "Broken PCI - status %#x\n", status); + err = -ENXIO; + goto err_unmap; + } + pdsv->supported_features = mgmt->supported_features; if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h index d984ba24a7..84bdb45871 100644 --- a/drivers/vdpa/pds/vdpa_dev.h +++ b/drivers/vdpa/pds/vdpa_dev.h @@ -46,5 +46,6 @@ struct pds_vdpa_device { #define PDS_VDPA_PACKED_INVERT_IDX 0x8000 +void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv); int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux); #endif /* _VDPA_DEV_H_ */ diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index d0695680b2..6cb96a1e8b 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -115,7 +115,7 @@ static const struct attribute_group vdpa_dev_group = { }; __ATTRIBUTE_GROUPS(vdpa_dev); -static struct bus_type vdpa_bus = { +static const struct bus_type vdpa_bus = { .name = "vdpa", .dev_groups = vdpa_dev_groups, .match = vdpa_dev_match, @@ -945,6 +945,215 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms } static int +vdpa_dev_blk_capacity_config_fill(struct sk_buff *msg, + const struct virtio_blk_config *config) +{ + u64 val_u64; + + val_u64 = __virtio64_to_cpu(true, config->capacity); + + return nla_put_u64_64bit(msg, VDPA_ATTR_DEV_BLK_CFG_CAPACITY, + val_u64, VDPA_ATTR_PAD); +} + +static int +vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u32 val_u32; + + if ((features & BIT_ULL(VIRTIO_BLK_F_SIZE_MAX)) == 0) + return 0; + + val_u32 = __virtio32_to_cpu(true, config->size_max); + + return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32); +} + +/* fill the block size*/ +static int +vdpa_dev_blk_block_size_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u32 val_u32; + + if ((features & BIT_ULL(VIRTIO_BLK_F_BLK_SIZE)) == 0) + return 0; + + val_u32 = __virtio32_to_cpu(true, config->blk_size); + + return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, val_u32); +} + +static int +vdpa_dev_blk_seg_max_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u32 val_u32; + + if ((features & BIT_ULL(VIRTIO_BLK_F_SEG_MAX)) == 0) + return 0; + + val_u32 = __virtio32_to_cpu(true, config->seg_max); + + return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, val_u32); +} + +static int vdpa_dev_blk_mq_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u16 val_u16; + + if ((features & BIT_ULL(VIRTIO_BLK_F_MQ)) == 0) + return 0; + + val_u16 = __virtio16_to_cpu(true, config->num_queues); + + return nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, val_u16); +} + +static int vdpa_dev_blk_topology_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u16 min_io_size; + u32 opt_io_size; + + if ((features & BIT_ULL(VIRTIO_BLK_F_TOPOLOGY)) == 0) + return 0; + + min_io_size = __virtio16_to_cpu(true, config->min_io_size); + opt_io_size = __virtio32_to_cpu(true, config->opt_io_size); + + if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP, + config->physical_block_exp)) + return -EMSGSIZE; + + if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET, + config->alignment_offset)) + return -EMSGSIZE; + + if (nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, min_io_size)) + return -EMSGSIZE; + + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, opt_io_size)) + return -EMSGSIZE; + + return 0; +} + +static int vdpa_dev_blk_discard_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u32 val_u32; + + if ((features & BIT_ULL(VIRTIO_BLK_F_DISCARD)) == 0) + return 0; + + val_u32 = __virtio32_to_cpu(true, config->max_discard_sectors); + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, val_u32)) + return -EMSGSIZE; + + val_u32 = __virtio32_to_cpu(true, config->max_discard_seg); + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, val_u32)) + return -EMSGSIZE; + + val_u32 = __virtio32_to_cpu(true, config->discard_sector_alignment); + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN, val_u32)) + return -EMSGSIZE; + + return 0; +} + +static int +vdpa_dev_blk_write_zeroes_config_fill(struct sk_buff *msg, u64 features, + const struct virtio_blk_config *config) +{ + u32 val_u32; + + if ((features & BIT_ULL(VIRTIO_BLK_F_WRITE_ZEROES)) == 0) + return 0; + + val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_sectors); + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, val_u32)) + return -EMSGSIZE; + + val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_seg); + if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, val_u32)) + return -EMSGSIZE; + + return 0; +} + +static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features) +{ + u8 ro; + + ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1; + if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro)) + return -EMSGSIZE; + + return 0; +} + +static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features) +{ + u8 flush; + + flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1; + if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush)) + return -EMSGSIZE; + + return 0; +} + +static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev, + struct sk_buff *msg) +{ + struct virtio_blk_config config = {}; + u64 features_device; + + vdev->config->get_config(vdev, 0, &config, sizeof(config)); + + features_device = vdev->config->get_device_features(vdev); + + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device, + VDPA_ATTR_PAD)) + return -EMSGSIZE; + + if (vdpa_dev_blk_capacity_config_fill(msg, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_seg_size_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_block_size_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_seg_max_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_mq_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_topology_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_discard_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_write_zeroes_config_fill(msg, features_device, &config)) + return -EMSGSIZE; + + if (vdpa_dev_blk_ro_config_fill(msg, features_device)) + return -EMSGSIZE; + + if (vdpa_dev_blk_flush_config_fill(msg, features_device)) + return -EMSGSIZE; + + return 0; +} + +static int vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { @@ -988,6 +1197,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, case VIRTIO_ID_NET: err = vdpa_dev_net_config_fill(vdev, msg); break; + case VIRTIO_ID_BLOCK: + err = vdpa_dev_blk_config_fill(vdev, msg); + break; default: err = -EOPNOTSUPP; break; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 18584ce70b..8ffea8430f 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -311,6 +311,17 @@ static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) vq->num = num; } +static u16 vdpasim_get_vq_size(struct vdpa_device *vdpa, u16 idx) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + if (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK) + return vq->num; + else + return VDPASIM_QUEUE_MAX; +} + static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); @@ -775,6 +786,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = { .get_driver_features = vdpasim_get_driver_features, .set_config_cb = vdpasim_set_config_cb, .get_vq_num_max = vdpasim_get_vq_num_max, + .get_vq_size = vdpasim_get_vq_size, .get_device_id = vdpasim_get_device_id, .get_vendor_id = vdpasim_get_vendor_id, .get_status = vdpasim_get_status, diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 5e4a77b9ba..791d38d628 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -373,6 +373,26 @@ static void vduse_domain_free_iova(struct iova_domain *iovad, free_iova_fast(iovad, iova >> shift, iova_len); } +void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + read_lock(&domain->bounce_lock); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + vduse_domain_bounce(domain, dma_addr, size, DMA_TO_DEVICE); + read_unlock(&domain->bounce_lock); +} + +void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + read_lock(&domain->bounce_lock); + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE); + read_unlock(&domain->bounce_lock); +} + dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, @@ -393,7 +413,8 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa)) goto err_unlock; - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE); read_unlock(&domain->bounce_lock); @@ -411,9 +432,9 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, enum dma_data_direction dir, unsigned long attrs) { struct iova_domain *iovad = &domain->stream_iovad; - read_lock(&domain->bounce_lock); - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE); vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size); diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h index 173e979b84..f92f22a726 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -44,6 +44,14 @@ int vduse_domain_set_map(struct vduse_iova_domain *domain, void vduse_domain_clear_map(struct vduse_iova_domain *domain, struct vhost_iotlb *iotlb); +void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir); + +void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir); + dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 1d24da79c3..73c89701fc 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -541,6 +541,17 @@ static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) vq->num = num; } +static u16 vduse_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 idx) +{ + struct vduse_dev *dev = vdpa_to_vduse(vdpa); + struct vduse_virtqueue *vq = dev->vqs[idx]; + + if (vq->num) + return vq->num; + else + return vq->num_max; +} + static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) { @@ -773,6 +784,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { .kick_vq = vduse_vdpa_kick_vq, .set_vq_cb = vduse_vdpa_set_vq_cb, .set_vq_num = vduse_vdpa_set_vq_num, + .get_vq_size = vduse_vdpa_get_vq_size, .set_vq_ready = vduse_vdpa_set_vq_ready, .get_vq_ready = vduse_vdpa_get_vq_ready, .set_vq_state = vduse_vdpa_set_vq_state, @@ -798,6 +810,26 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { .free = vduse_vdpa_free, }; +static void vduse_dev_sync_single_for_device(struct device *dev, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + struct vduse_dev *vdev = dev_to_vduse(dev); + struct vduse_iova_domain *domain = vdev->domain; + + vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); +} + +static void vduse_dev_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + struct vduse_dev *vdev = dev_to_vduse(dev); + struct vduse_iova_domain *domain = vdev->domain; + + vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); +} + static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, @@ -858,6 +890,8 @@ static size_t vduse_dev_max_mapping_size(struct device *dev) } static const struct dma_map_ops vduse_dev_dma_ops = { + .sync_single_for_device = vduse_dev_sync_single_for_device, + .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, .map_page = vduse_dev_map_page, .unmap_page = vduse_dev_unmap_page, .alloc = vduse_dev_alloc_coherent, diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 281287fae8..df5f4a3bcc 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -328,6 +328,13 @@ static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, vp_modern_set_queue_size(mdev, qid, num); } +static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_get_queue_size(mdev, qid); +} + static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, u64 desc_area, u64 driver_area, u64 device_area) @@ -449,6 +456,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = { .set_vq_ready = vp_vdpa_set_vq_ready, .get_vq_ready = vp_vdpa_get_vq_ready, .set_vq_num = vp_vdpa_set_vq_num, + .get_vq_size = vp_vdpa_get_vq_size, .set_vq_address = vp_vdpa_set_vq_address, .kick_vq = vp_vdpa_kick_vq, .get_generation = vp_vdpa_get_generation, |