Merge "Merge 479174d402
("Merge tag 'platform-drivers-x86-v6.1-5' of git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86") into android-mainline" into android-mainline
This commit is contained in:
commit
6fe5cd619c
@ -7213,14 +7213,13 @@ veto the transition.
|
||||
:Parameters: args[0] is the maximum poll time in nanoseconds
|
||||
:Returns: 0 on success; -1 on error
|
||||
|
||||
This capability overrides the kvm module parameter halt_poll_ns for the
|
||||
target VM.
|
||||
KVM_CAP_HALT_POLL overrides the kvm.halt_poll_ns module parameter to set the
|
||||
maximum halt-polling time for all vCPUs in the target VM. This capability can
|
||||
be invoked at any time and any number of times to dynamically change the
|
||||
maximum halt-polling time.
|
||||
|
||||
VCPU polling allows a VCPU to poll for wakeup events instead of immediately
|
||||
scheduling during guest halts. The maximum time a VCPU can spend polling is
|
||||
controlled by the kvm module parameter halt_poll_ns. This capability allows
|
||||
the maximum halt time to specified on a per-VM basis, effectively overriding
|
||||
the module parameter for the target VM.
|
||||
See Documentation/virt/kvm/halt-polling.rst for more information on halt
|
||||
polling.
|
||||
|
||||
7.21 KVM_CAP_X86_USER_SPACE_MSR
|
||||
-------------------------------
|
||||
|
@ -119,6 +119,19 @@ These module parameters can be set from the debugfs files in:
|
||||
Note: that these module parameters are system wide values and are not able to
|
||||
be tuned on a per vm basis.
|
||||
|
||||
Any changes to these parameters will be picked up by new and existing vCPUs the
|
||||
next time they halt, with the notable exception of VMs using KVM_CAP_HALT_POLL
|
||||
(see next section).
|
||||
|
||||
KVM_CAP_HALT_POLL
|
||||
=================
|
||||
|
||||
KVM_CAP_HALT_POLL is a VM capability that allows userspace to override halt_poll_ns
|
||||
on a per-VM basis. VMs using KVM_CAP_HALT_POLL ignore halt_poll_ns completely (but
|
||||
still obey halt_poll_ns_grow, halt_poll_ns_grow_start, and halt_poll_ns_shrink).
|
||||
|
||||
See Documentation/virt/kvm/api.rst for more information on this capability.
|
||||
|
||||
Further Notes
|
||||
=============
|
||||
|
@ -17,4 +17,5 @@ KVM
|
||||
|
||||
locking
|
||||
vcpu-requests
|
||||
halt-polling
|
||||
review-checklist
|
||||
|
@ -10,7 +10,6 @@ KVM for x86 systems
|
||||
amd-memory-encryption
|
||||
cpuid
|
||||
errata
|
||||
halt-polling
|
||||
hypercalls
|
||||
mmu
|
||||
msr
|
||||
|
@ -37,7 +37,22 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
|
||||
dcache_clean_poc(start, start + size);
|
||||
/*
|
||||
* The architecture only requires a clean to the PoC here in order to
|
||||
* meet the requirements of the DMA API. However, some vendors (i.e.
|
||||
* Qualcomm) abuse the DMA API for transferring buffers from the
|
||||
* non-secure to the secure world, resetting the system if a non-secure
|
||||
* access shows up after the buffer has been transferred:
|
||||
*
|
||||
* https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
|
||||
*
|
||||
* Using clean+invalidate appears to make this issue less likely, but
|
||||
* the drivers themselves still need fixing as the CPU could issue a
|
||||
* speculative read from the buffer via the linear mapping irrespective
|
||||
* of the cache maintenance we use. Once the drivers are fixed, we can
|
||||
* relax this to a clean operation.
|
||||
*/
|
||||
dcache_clean_inval_poc(start, start + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
@ -546,8 +546,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
|
||||
scb_s->eca |= scb_o->eca & ECA_CEI;
|
||||
/* Epoch Extension */
|
||||
if (test_kvm_facility(vcpu->kvm, 139))
|
||||
if (test_kvm_facility(vcpu->kvm, 139)) {
|
||||
scb_s->ecd |= scb_o->ecd & ECD_MEF;
|
||||
scb_s->epdx = scb_o->epdx;
|
||||
}
|
||||
|
||||
/* etoken */
|
||||
if (test_kvm_facility(vcpu->kvm, 156))
|
||||
|
@ -10574,8 +10574,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
|
||||
vcpu->mmio_needed = 0;
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
|
||||
/* Page is swapped out. Do synthetic halt */
|
||||
|
@ -813,7 +813,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
|
||||
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
|
||||
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
|
||||
/*
|
||||
* Set this now to ensure that drivers see the correct q->memory value
|
||||
* in the queue_setup op.
|
||||
*/
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = memory;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
set_queue_coherency(q, non_coherent_mem);
|
||||
|
||||
/*
|
||||
@ -823,22 +829,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
|
||||
plane_sizes, q->alloc_devs);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
/* Check that driver has set sane values */
|
||||
if (WARN_ON(!num_planes))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(!num_planes)) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
if (WARN_ON(!plane_sizes[i]))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(!plane_sizes[i])) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Finally, allocate buffers and video memory */
|
||||
allocated_buffers =
|
||||
__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
|
||||
if (allocated_buffers == 0) {
|
||||
dprintk(q, 1, "memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -879,7 +890,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
|
||||
* from q->num_buffers.
|
||||
* from q->num_buffers and it will reset q->memory to
|
||||
* VB2_MEMORY_UNKNOWN.
|
||||
*/
|
||||
__vb2_queue_free(q, allocated_buffers);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
@ -895,6 +907,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
q->waiting_for_buffers = !q->is_output;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = VB2_MEMORY_UNKNOWN;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
|
||||
|
||||
@ -906,6 +924,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
unsigned int num_planes = 0, num_buffers, allocated_buffers;
|
||||
unsigned plane_sizes[VB2_MAX_PLANES] = { };
|
||||
bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
|
||||
bool no_previous_buffers = !q->num_buffers;
|
||||
int ret;
|
||||
|
||||
if (q->num_buffers == VB2_MAX_FRAME) {
|
||||
@ -913,13 +932,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
if (!q->num_buffers) {
|
||||
if (no_previous_buffers) {
|
||||
if (q->waiting_in_dqbuf && *count) {
|
||||
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
|
||||
/*
|
||||
* Set this now to ensure that drivers see the correct q->memory
|
||||
* value in the queue_setup op.
|
||||
*/
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = memory;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
q->waiting_for_buffers = !q->is_output;
|
||||
set_queue_coherency(q, non_coherent_mem);
|
||||
} else {
|
||||
@ -945,14 +970,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
ret = call_qop(q, queue_setup, q, &num_buffers,
|
||||
&num_planes, plane_sizes, q->alloc_devs);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
/* Finally, allocate buffers and video memory */
|
||||
allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
|
||||
num_planes, plane_sizes);
|
||||
if (allocated_buffers == 0) {
|
||||
dprintk(q, 1, "memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -983,7 +1009,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
|
||||
* from q->num_buffers.
|
||||
* from q->num_buffers and it will reset q->memory to
|
||||
* VB2_MEMORY_UNKNOWN.
|
||||
*/
|
||||
__vb2_queue_free(q, allocated_buffers);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
@ -998,6 +1025,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
*count = allocated_buffers;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (no_previous_buffers) {
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = VB2_MEMORY_UNKNOWN;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
|
||||
|
||||
@ -2164,6 +2199,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
|
||||
struct vb2_buffer *vb;
|
||||
unsigned int buffer, plane;
|
||||
|
||||
/*
|
||||
* Sanity checks to ensure the lock is held, MEMORY_MMAP is
|
||||
* used and fileio isn't active.
|
||||
*/
|
||||
lockdep_assert_held(&q->mmap_lock);
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(q, 1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go over all buffers and their planes, comparing the given offset
|
||||
* with an offset assigned to each plane. If a match is found,
|
||||
@ -2265,11 +2316,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
||||
int ret;
|
||||
unsigned long length;
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check memory area access mode.
|
||||
*/
|
||||
@ -2291,14 +2337,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
||||
|
||||
mutex_lock(&q->mmap_lock);
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(q, 1, "mmap: file io in progress\n");
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the plane corresponding to the offset passed by userspace.
|
||||
* Find the plane corresponding to the offset passed by userspace. This
|
||||
* will return an error if not MEMORY_MMAP or file I/O is in progress.
|
||||
*/
|
||||
ret = __find_plane_by_offset(q, off, &buffer, &plane);
|
||||
if (ret)
|
||||
@ -2351,22 +2392,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&q->mmap_lock);
|
||||
|
||||
/*
|
||||
* Find the plane corresponding to the offset passed by userspace.
|
||||
* Find the plane corresponding to the offset passed by userspace. This
|
||||
* will return an error if not MEMORY_MMAP or file I/O is in progress.
|
||||
*/
|
||||
ret = __find_plane_by_offset(q, off, &buffer, &plane);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto unlock;
|
||||
|
||||
vb = q->bufs[buffer];
|
||||
|
||||
vaddr = vb2_plane_vaddr(vb, plane);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return vaddr ? (unsigned long)vaddr : -EINVAL;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
|
||||
#endif
|
||||
|
@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data);
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
|
||||
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
|
||||
void xenvif_carrier_on(struct xenvif *vif);
|
||||
|
||||
|
@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
skb_clear_hash(skb);
|
||||
|
||||
xenvif_rx_queue_tail(queue, skb);
|
||||
if (!xenvif_rx_queue_tail(queue, skb))
|
||||
goto drop;
|
||||
|
||||
xenvif_kick_thread(queue);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
drop:
|
||||
vif->dev->stats.tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -332,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||
|
||||
|
||||
struct xenvif_tx_cb {
|
||||
u16 pending_idx;
|
||||
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
|
||||
u8 copy_count;
|
||||
};
|
||||
|
||||
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
||||
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
|
||||
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
|
||||
|
||||
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
|
||||
u16 pending_idx,
|
||||
@ -370,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *txp,
|
||||
struct gnttab_map_grant_ref *gop,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb)
|
||||
static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *first,
|
||||
struct xen_netif_tx_request *txfrags,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb,
|
||||
unsigned int extra_count,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
skb_frag_t *frags = shinfo->frags;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
int start;
|
||||
u16 pending_idx;
|
||||
pending_ring_idx_t index;
|
||||
unsigned int nr_slots;
|
||||
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
|
||||
struct xen_netif_tx_request *txp = first;
|
||||
|
||||
nr_slots = shinfo->nr_frags;
|
||||
nr_slots = shinfo->nr_frags + 1;
|
||||
|
||||
/* Skip first skb fragment if it is on same page as header fragment. */
|
||||
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
||||
copy_count(skb) = 0;
|
||||
|
||||
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, txp++, gop++) {
|
||||
/* Create copy ops for exactly data_len bytes into the skb head. */
|
||||
__skb_put(skb, data_len);
|
||||
while (data_len > 0) {
|
||||
int amount = data_len > txp->size ? txp->size : data_len;
|
||||
|
||||
cop->source.u.ref = txp->gref;
|
||||
cop->source.domid = queue->vif->domid;
|
||||
cop->source.offset = txp->offset;
|
||||
|
||||
cop->dest.domid = DOMID_SELF;
|
||||
cop->dest.offset = (offset_in_page(skb->data +
|
||||
skb_headlen(skb) -
|
||||
data_len)) & ~XEN_PAGE_MASK;
|
||||
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
|
||||
- data_len);
|
||||
|
||||
cop->len = amount;
|
||||
cop->flags = GNTCOPY_source_gref;
|
||||
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
|
||||
copy_count(skb)++;
|
||||
|
||||
cop++;
|
||||
data_len -= amount;
|
||||
|
||||
if (amount == txp->size) {
|
||||
/* The copy op covered the full tx_request */
|
||||
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
txp, sizeof(*txp));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
(txp == first) ? extra_count : 0;
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
queue->pending_cons++;
|
||||
nr_slots--;
|
||||
} else {
|
||||
/* The copy op partially covered the tx_request.
|
||||
* The remainder will be mapped.
|
||||
*/
|
||||
txp->offset += amount;
|
||||
txp->size -= amount;
|
||||
}
|
||||
}
|
||||
|
||||
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, gop++) {
|
||||
index = pending_index(queue->pending_cons++);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp,
|
||||
txp == first ? extra_count : 0, gop);
|
||||
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
}
|
||||
|
||||
if (frag_overflow) {
|
||||
@ -415,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
|
||||
skb_shinfo(skb)->frag_list = nskb;
|
||||
}
|
||||
|
||||
return gop;
|
||||
(*copy_ops) = cop - queue->tx_copy_ops;
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
}
|
||||
|
||||
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
|
||||
@ -451,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct gnttab_copy **gopp_copy)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop_map = *gopp_map;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
u16 pending_idx;
|
||||
/* This always points to the shinfo of the skb being checked, which
|
||||
* could be either the first or the one on the frag_list
|
||||
*/
|
||||
@ -462,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct skb_shared_info *first_shinfo = NULL;
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
const bool sharedslot = nr_frags &&
|
||||
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
|
||||
frag_get_pending_idx(&shinfo->frags[0]) ==
|
||||
copy_pending_idx(skb, copy_count(skb) - 1);
|
||||
int i, err;
|
||||
|
||||
/* Check status of header. */
|
||||
err = (*gopp_copy)->status;
|
||||
if (unlikely(err)) {
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
for (i = 0; i < copy_count(skb); i++) {
|
||||
int newerr;
|
||||
|
||||
/* Check status of header. */
|
||||
pending_idx = copy_pending_idx(skb, i);
|
||||
|
||||
newerr = (*gopp_copy)->status;
|
||||
if (likely(!newerr)) {
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
} else {
|
||||
err = newerr;
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
|
||||
check_frags:
|
||||
for (i = 0; i < nr_frags; i++, gop_map++) {
|
||||
@ -526,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
/* First error: if the header haven't shared a slot with the
|
||||
* first frag, release it as well.
|
||||
*/
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue,
|
||||
XENVIF_TX_CB(skb)->pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
|
||||
/* Invalidate preceding fragments of this skb. */
|
||||
for (j = 0; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
@ -803,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
|
||||
struct sk_buff *skb, *nskb;
|
||||
int ret;
|
||||
unsigned int frag_overflow;
|
||||
@ -885,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
|
||||
ret = xenvif_count_requests(queue, &txreq, extra_count,
|
||||
txfrags, work_to_do);
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
break;
|
||||
|
||||
@ -912,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
|
||||
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
|
||||
data_len = txreq.size;
|
||||
|
||||
skb = xenvif_alloc_skb(data_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
@ -925,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
}
|
||||
|
||||
skb_shinfo(skb)->nr_frags = ret;
|
||||
if (data_len < txreq.size)
|
||||
skb_shinfo(skb)->nr_frags++;
|
||||
/* At this point shinfo->nr_frags is in fact the number of
|
||||
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
|
||||
*/
|
||||
@ -988,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
type);
|
||||
}
|
||||
|
||||
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
|
||||
|
||||
__skb_put(skb, data_len);
|
||||
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
|
||||
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
|
||||
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
||||
virt_to_gfn(skb->data);
|
||||
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
||||
queue->tx_copy_ops[*copy_ops].dest.offset =
|
||||
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].len = data_len;
|
||||
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
|
||||
|
||||
(*copy_ops)++;
|
||||
|
||||
if (data_len < txreq.size) {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
pending_idx);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
|
||||
extra_count, gop);
|
||||
gop++;
|
||||
} else {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
INVALID_PENDING_IDX);
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
&txreq, sizeof(txreq));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
extra_count;
|
||||
}
|
||||
|
||||
queue->pending_cons++;
|
||||
|
||||
gop = xenvif_get_requests(queue, skb, txfrags, gop,
|
||||
frag_overflow, nskb);
|
||||
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
|
||||
map_ops, frag_overflow, nskb, extra_count,
|
||||
data_len);
|
||||
|
||||
__skb_queue_tail(&queue->tx_queue, skb);
|
||||
|
||||
queue->tx.req_cons = idx;
|
||||
|
||||
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
|
||||
break;
|
||||
}
|
||||
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1114,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
|
||||
struct xen_netif_tx_request *txp;
|
||||
u16 pending_idx;
|
||||
unsigned data_len;
|
||||
|
||||
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
pending_idx = copy_pending_idx(skb, 0);
|
||||
txp = &queue->pending_tx_info[pending_idx].req;
|
||||
|
||||
/* Check the remap error code. */
|
||||
@ -1135,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = skb->len;
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
if (data_len < txp->size) {
|
||||
/* Append the packet payload as a fragment. */
|
||||
txp->offset += data_len;
|
||||
txp->size -= data_len;
|
||||
} else {
|
||||
/* Schedule a response immediately. */
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
if (txp->flags & XEN_NETTXF_csum_blank)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
else if (txp->flags & XEN_NETTXF_data_validated)
|
||||
@ -1332,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
|
||||
/* Called after netfront has transmitted */
|
||||
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
|
||||
{
|
||||
unsigned nr_mops, nr_cops = 0;
|
||||
unsigned nr_mops = 0, nr_cops = 0;
|
||||
int work_done, ret;
|
||||
|
||||
if (unlikely(!tx_work_todo(queue)))
|
||||
|
@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
return false;
|
||||
}
|
||||
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
|
||||
kfree_skb(skb);
|
||||
queue->vif->dev->stats.rx_dropped++;
|
||||
ret = false;
|
||||
} else {
|
||||
if (skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_update_needed_slots(queue, skb);
|
||||
@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
||||
|
@ -739,8 +739,14 @@ static void amd_pmc_s2idle_prepare(void)
|
||||
static void amd_pmc_s2idle_check(void)
|
||||
{
|
||||
struct amd_pmc_dev *pdev = &pmc;
|
||||
struct smu_metrics table;
|
||||
int rc;
|
||||
|
||||
/* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
|
||||
if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
|
||||
table.s0i3_last_entry_status)
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* Dump the IdleMask before we add to the STB */
|
||||
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
|
||||
|
||||
|
@ -605,6 +605,14 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
|
||||
set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
|
||||
queue = true;
|
||||
}
|
||||
/*
|
||||
* We could race with cookie_lru which may set LRU_DISCARD bit
|
||||
* but has yet to run the cookie state machine. If this happens
|
||||
* and another thread tries to use the cookie, clear LRU_DISCARD
|
||||
* so we don't end up withdrawing the cookie while in use.
|
||||
*/
|
||||
if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
|
||||
fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
|
||||
break;
|
||||
|
||||
case FSCACHE_COOKIE_STATE_FAILED:
|
||||
|
@ -66,6 +66,7 @@ enum fscache_cookie_trace {
|
||||
fscache_cookie_put_work,
|
||||
fscache_cookie_see_active,
|
||||
fscache_cookie_see_lru_discard,
|
||||
fscache_cookie_see_lru_discard_clear,
|
||||
fscache_cookie_see_lru_do_one,
|
||||
fscache_cookie_see_relinquish,
|
||||
fscache_cookie_see_withdraw,
|
||||
@ -149,6 +150,7 @@ enum fscache_access_trace {
|
||||
EM(fscache_cookie_put_work, "PQ work ") \
|
||||
EM(fscache_cookie_see_active, "- activ") \
|
||||
EM(fscache_cookie_see_lru_discard, "- x-lru") \
|
||||
EM(fscache_cookie_see_lru_discard_clear,"- lrudc") \
|
||||
EM(fscache_cookie_see_lru_do_one, "- lrudo") \
|
||||
EM(fscache_cookie_see_relinquish, "- x-rlq") \
|
||||
EM(fscache_cookie_see_withdraw, "- x-wth") \
|
||||
|
@ -2179,14 +2179,15 @@ long __do_semtimedop(int semid, struct sembuf *sops,
|
||||
* scenarios where we were awakened externally, during the
|
||||
* window between wake_q_add() and wake_up_q().
|
||||
*/
|
||||
rcu_read_lock();
|
||||
error = READ_ONCE(queue.status);
|
||||
if (error != -EINTR) {
|
||||
/* see SEM_BARRIER_2 for purpose/pairing */
|
||||
smp_acquire__after_ctrl_dep();
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
locknum = sem_lock(sma, sops, nsops);
|
||||
|
||||
if (!ipc_valid_object(&sma->sem_perm))
|
||||
|
@ -267,13 +267,14 @@ int proc_dostring(struct ctl_table *table, int write,
|
||||
ppos);
|
||||
}
|
||||
|
||||
static size_t proc_skip_spaces(char **buf)
|
||||
static void proc_skip_spaces(char **buf, size_t *size)
|
||||
{
|
||||
size_t ret;
|
||||
char *tmp = skip_spaces(*buf);
|
||||
ret = tmp - *buf;
|
||||
*buf = tmp;
|
||||
return ret;
|
||||
while (*size) {
|
||||
if (!isspace(**buf))
|
||||
break;
|
||||
(*size)--;
|
||||
(*buf)++;
|
||||
}
|
||||
}
|
||||
|
||||
static void proc_skip_char(char **buf, size_t *size, const char v)
|
||||
@ -342,13 +343,12 @@ static int proc_get_long(char **buf, size_t *size,
|
||||
unsigned long *val, bool *neg,
|
||||
const char *perm_tr, unsigned perm_tr_len, char *tr)
|
||||
{
|
||||
int len;
|
||||
char *p, tmp[TMPBUFLEN];
|
||||
ssize_t len = *size;
|
||||
|
||||
if (!*size)
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = *size;
|
||||
if (len > TMPBUFLEN - 1)
|
||||
len = TMPBUFLEN - 1;
|
||||
|
||||
@ -521,7 +521,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
|
||||
bool neg;
|
||||
|
||||
if (write) {
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
|
||||
if (!left)
|
||||
break;
|
||||
@ -548,7 +548,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
|
||||
if (!write && !first && left && !err)
|
||||
proc_put_char(&buffer, &left, '\n');
|
||||
if (write && !err && left)
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
if (write && first)
|
||||
return err ? : -EINVAL;
|
||||
*lenp -= left;
|
||||
@ -590,7 +590,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
|
||||
if (left > PAGE_SIZE - 1)
|
||||
left = PAGE_SIZE - 1;
|
||||
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
if (!left) {
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
@ -610,7 +610,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
|
||||
}
|
||||
|
||||
if (!err && left)
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
|
||||
out_free:
|
||||
if (err)
|
||||
@ -1075,7 +1075,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
|
||||
if (write) {
|
||||
bool neg;
|
||||
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
if (!left)
|
||||
break;
|
||||
|
||||
@ -1104,7 +1104,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
|
||||
if (!write && !first && left && !err)
|
||||
proc_put_char(&buffer, &left, '\n');
|
||||
if (write && !err)
|
||||
left -= proc_skip_spaces(&p);
|
||||
proc_skip_spaces(&p, &left);
|
||||
if (write && first)
|
||||
return err ? : -EINVAL;
|
||||
*lenp -= left;
|
||||
|
Loading…
Reference in New Issue
Block a user