Commit 7e642c02 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge remote-tracking branch 'vhost/linux-next'

parents 6c005216 b03f1d91
......@@ -691,6 +691,18 @@ case what's actually required is:
p = READ_ONCE(b);
}
Alternatively, a control dependency can be converted to a data dependency,
e.g.:
q = READ_ONCE(a);
if (q) {
b = dependent_ptr_mb(b, q);
p = READ_ONCE(b);
}
Note how the result of dependent_ptr_mb must be used with the following
accesses in order to have an effect.
However, stores are not speculated. This means that ordering -is- provided
for load-store control dependencies, as in the following example:
......@@ -836,6 +848,12 @@ out-guess your code. More generally, although READ_ONCE() does force
the compiler to actually emit code for a given load, it does not force
the compiler to use the results.
Converting to a data dependency helps with this too:
q = READ_ONCE(a);
b = dependent_ptr_mb(b, q);
WRITE_ONCE(b, 1);
In addition, control dependencies apply only to the then-clause and
else-clause of the if-statement in question. In particular, it does
not necessarily apply to code following the if-statement:
......@@ -875,6 +893,8 @@ to the CPU containing it. See the section on "Multicopy atomicity"
for more information.
In summary:
(*) Control dependencies can order prior loads against later stores.
......
......@@ -59,6 +59,7 @@
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define ARCH_NEEDS_READ_BARRIER_DEPENDS 1
#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP
......
......@@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
struct _vop_vdev *vdev = to_vopvdev(dev);
struct vop_device *vpdev = vdev->vpdev;
struct mic_device_ctrl __iomem *dc = vdev->dc;
int i, err, retry;
int i, err, retry, queue_idx = 0;
/* We must have this many virtqueues. */
if (nvqs > ioread8(&vdev->desc->num_vq))
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
__func__, i, names[i]);
vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
......
......@@ -1330,7 +1330,8 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
static void free_old_xmit_skbs(struct send_queue *sq, struct netdev_queue *txq,
bool use_napi)
{
struct sk_buff *skb;
unsigned int len;
......@@ -1343,7 +1344,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
dev_consume_skb_any(skb);
napi_consume_skb(skb, use_napi);
}
/* Avoid overhead when no packets have been processed
......@@ -1352,6 +1353,9 @@ static void free_old_xmit_skbs(struct send_queue *sq)
if (!packets)
return;
if (use_napi)
netdev_tx_completed_queue(txq, packets, bytes);
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
......@@ -1369,7 +1373,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, txq, true);
__netif_tx_unlock(txq);
}
......@@ -1445,7 +1449,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
__netif_tx_lock(txq, raw_smp_processor_id());
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, txq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
......@@ -1510,13 +1514,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
bool more = skb->xmit_more;
bool use_napi = sq->napi.weight;
unsigned int bytes = skb->len;
bool kick;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, txq, use_napi);
if (use_napi && kick)
if (use_napi && !more)
virtqueue_enable_cb_delayed(sq->vq);
/* timestamp packet in software */
......@@ -1557,7 +1563,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
free_old_xmit_skbs(sq, txq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
......@@ -1565,7 +1571,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
if (kick || netif_xmit_stopped(txq)) {
if (use_napi)
kick = __netdev_tx_sent_queue(txq, bytes, more);
else
kick = !more || netif_xmit_stopped(txq);
if (kick) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kicks++;
......
......@@ -741,6 +741,16 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
struct resource *b_res;
b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
/*
* Don't re-check after this was called once already:
* important since bridge might be in use.
* Note: this is only reliable because as per spec all PCI to PCI
* bridges support memory unconditionally so IORESOURCE_MEM is set.
*/
if (b_res[1].flags & IORESOURCE_MEM)
return;
b_res[1].flags |= IORESOURCE_MEM;
pci_read_config_word(bridge, PCI_IO_BASE, &io);
......
......@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
const bool * ctx,
struct irq_affinity *desc)
{
int i, ret;
int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
......
......@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
int ret, i;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
......@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
ctx ? ctx[i] : false, ccw);
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
names[i], ctx ? ctx[i] : false,
ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
......
......@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
struct virtio_scsi_ctrl_tmf_resp __user *resp;
struct virtio_scsi_ctrl_tmf_resp rsp;
struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
resp = vq->iov[vc->out].iov_base;
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
if (!ret)
iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
......@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
struct virtio_scsi_ctrl_an_resp __user *resp;
struct virtio_scsi_ctrl_an_resp rsp;
struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
rsp.response = VIRTIO_SCSI_S_OK;
resp = vq->iov[vc->out].iov_base;
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
if (!ret)
iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
......
......@@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type))
if (ret != sizeof(type)) {
ret = -EINVAL;
goto done;
}
switch (type) {
case VHOST_IOTLB_MSG:
......@@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
if (ret != sizeof(msg))
if (ret != sizeof(msg)) {
ret = -EINVAL;
goto done;
}
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;
......
......@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_MAX
};
enum virtio_balloon_config_read {
VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
};
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
......@@ -77,14 +81,20 @@ struct virtio_balloon {
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
bool stop_update;
/* Bitmap to indicate if reading the related config fields are needed */
unsigned long config_read_bitmap;
/* The list of allocated free pages, waiting to be given back to mm */
struct list_head free_page_list;
spinlock_t free_page_list_lock;
/* The number of free page blocks on the above list */
unsigned long num_free_page_blocks;
/* The cmd id received from host */
u32 cmd_id_received;
/*
* The cmd id received from host.
* Read it via virtio_balloon_cmd_id_received to get the latest value
* sent from host.
*/
u32 cmd_id_received_cache;
/* The cmd id that is actively in use */
__virtio32 cmd_id_active;
/* Buffer to store the stop sign */
......@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
return num_returned;
}
static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
{
if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
return;
/* No need to queue the work if the bit was already set. */
if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap))
return;
queue_work(vb->balloon_wq, &vb->report_free_page_work);
}
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
unsigned long flags;
s64 diff = towards_target(vb);
if (diff) {
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update)
queue_work(system_freezable_wq,
&vb->update_balloon_size_work);
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
virtio_cread(vdev, struct virtio_balloon_config,
free_page_report_cmd_id, &vb->cmd_id_received);
if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
/* Pass ULONG_MAX to give back all the free pages */
return_free_pages_to_mm(vb, ULONG_MAX);
} else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
vb->cmd_id_received !=
virtio32_to_cpu(vdev, vb->cmd_id_active)) {
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update) {
queue_work(vb->balloon_wq,
&vb->report_free_page_work);
}
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update) {
queue_work(system_freezable_wq,
&vb->update_balloon_size_work);
virtio_balloon_queue_free_page_work(vb);
}
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static void update_balloon_size(struct virtio_balloon *vb)
......@@ -453,9 +457,12 @@ static void update_balloon_size_func(struct work_struct *work)
update_balloon_size_work);
diff = towards_target(vb);
if (!diff)
return;
if (diff > 0)
diff -= fill_balloon(vb, diff);
else if (diff < 0)
else
diff += leak_balloon(vb, -diff);
update_balloon_size(vb);
......@@ -527,6 +534,17 @@ static int init_vqs(struct virtio_balloon *vb)
return 0;
}
static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap))
virtio_cread(vb->vdev, struct virtio_balloon_config,
free_page_report_cmd_id,
&vb->cmd_id_received_cache);
return vb->cmd_id_received_cache;
}
static int send_cmd_id_start(struct virtio_balloon *vb)
{
struct scatterlist sg;
......@@ -537,7 +555,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused))
;
vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
if (!err)
......@@ -620,7 +639,8 @@ static int send_free_pages(struct virtio_balloon *vb)
* stop the reporting.
*/
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
if (cmd_id_active != vb->cmd_id_received)
if (unlikely(cmd_id_active !=
virtio_balloon_cmd_id_received(vb)))
break;
/*
......@@ -637,11 +657,9 @@ static int send_free_pages(struct virtio_balloon *vb)
return 0;
}
static void report_free_page_func(struct work_struct *work)
static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
{
int err;
struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
report_free_page_work);
struct device *dev = &vb->vdev->dev;
/* Start by sending the received cmd id to host with an outbuf. */
......@@ -659,6 +677,23 @@ static void report_free_page_func(struct work_struct *work)
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
}
static void report_free_page_func(struct work_struct *work)
{
struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
report_free_page_work);
u32 cmd_id_received;
cmd_id_received = virtio_balloon_cmd_id_received(vb);
if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
/* Pass ULONG_MAX to give back all the free pages */
return_free_pages_to_mm(vb, ULONG_MAX);
} else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
cmd_id_received !=
virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
virtio_balloon_report_free_page(vb);
}
}
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
......@@ -885,12 +920,11 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
vb->num_free_page_blocks = 0;
spin_lock_init(&vb->free_page_list_lock);
INIT_LIST_HEAD(&vb->free_page_list);
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
......
......@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
int i, err;
int i, err, queue_idx = 0;
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
......@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return err;
for (i = 0; i < nvqs; ++i) {
vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
......
......@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
int i, err, nvectors, allocated_vectors;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
......@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
msix_vec);
if (IS_ERR(vqs[i])) {
......@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err;
int i, err, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
......@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
vqs[i] = NULL;
continue;
}
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
......
......@@ -676,6 +676,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
void *ret;
unsigned int i;
u16 last_used;
bool nomore;
START_USE(vq);
......@@ -684,14 +685,15 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
return NULL;
}
if (!more_used_split(vq)) {
nomore = !more_used_split(vq);
if (nomore) {
pr_debug("No more buffers in queue\n");
END_USE(vq);
return NULL;
}
/* Only get used array entries after they have been exposed by host. */
virtio_rmb(vq->weak_barriers);
vq = dependent_ptr_mb(vq, nomore);
last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
i = virtio32_to_cpu(_vq->vdev,
......@@ -1339,8 +1341,10 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used, id;
bool nomore;
void *ret;
START_USE(vq);
if (unlikely(vq->broken)) {
......@@ -1348,14 +1352,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
return NULL;
}
if (!more_used_packed(vq)) {
nomore = !more_used_packed(vq);
if (nomore) {
pr_debug("No more buffers in queue\n");
END_USE(vq);
return NULL;
}
/* Only get used elements after they have been exposed by host. */
virtio_rmb(vq->weak_barriers);
vq = dependent_ptr_mb(vq, nomore);
last_used = vq->last_used_idx;
id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
......
......@@ -70,6 +70,24 @@
#define __smp_read_barrier_depends() read_barrier_depends()
#endif
#if defined(COMPILER_HAS_OPTIMIZER_HIDE_VAR) && \
!defined(ARCH_NEEDS_READ_BARRIER_DEPENDS)
#define dependent_ptr_mb(ptr, val) ({ \
long dependent_ptr_mb_val = (long)(val); \
long dependent_ptr_mb_ptr = (long)(ptr) - dependent_ptr_mb_val; \
\
BUILD_BUG_ON(sizeof(val) > sizeof(long)); \
OPTIMIZER_HIDE_VAR(dependent_ptr_mb_val); \
(typeof(ptr))(dependent_ptr_mb_ptr + dependent_ptr_mb_val); \
})
#else
#define dependent_ptr_mb(ptr, val) ({ mb(); (ptr); })
#endif
#ifdef CONFIG_SMP
#ifndef smp_mb
......
......@@ -161,9 +161,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
__asm__ ("" : "=rm" (var) : "0" (var))
#define COMPILER_HAS_OPTIMIZER_HIDE_VAR 1
#endif
/* Not-quite-unique ID. */
......
......@@ -12,6 +12,11 @@ struct irq_affinity;
/**
* virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations
* getting/setting a value as a simple read/write! Generally speaking,
* any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic
* context.
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
......@@ -22,7 +27,7 @@ struct irq_affinity;
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
* @generation: config generation counter
* @generation: config generation counter (optional)
* vdev: the virtio_device
* Returns the config generation counter
* @get_status: read the status byte
......@@ -48,17 +53,17 @@ struct irq_affinity;
* @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 32 feature bits (all we currently need).
* Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
* Returns 0 on success or error status
* @bus_name: return the bus name associated with the device
* @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
* the caller can then copy.
* @set_vq_affinity: set the affinity for a virtqueue.
* @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
*/
typedef void vq_callback_t(struct virtqueue *);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment