Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
19725496da
@ -9074,7 +9074,7 @@ S: Maintained
|
|||||||
F: drivers/usb/mtu3/
|
F: drivers/usb/mtu3/
|
||||||
|
|
||||||
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
|
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
|
||||||
M: Peter Senna Tschudin <peter.senna@collabora.com>
|
M: Peter Senna Tschudin <peter.senna@gmail.com>
|
||||||
M: Martin Donnelly <martin.donnelly@ge.com>
|
M: Martin Donnelly <martin.donnelly@ge.com>
|
||||||
M: Martyn Welch <martyn.welch@collabora.co.uk>
|
M: Martyn Welch <martyn.welch@collabora.co.uk>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 18
|
PATCHLEVEL = 18
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc5
|
EXTRAVERSION = -rc6
|
||||||
NAME = Merciless Moray
|
NAME = Merciless Moray
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
|
|||||||
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
|
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
|
||||||
struct rusage32 __user *, ur)
|
struct rusage32 __user *, ur)
|
||||||
{
|
{
|
||||||
unsigned int status = 0;
|
|
||||||
struct rusage r;
|
struct rusage r;
|
||||||
long err = kernel_wait4(pid, &status, options, &r);
|
long err = kernel_wait4(pid, ustatus, options, &r);
|
||||||
if (err <= 0)
|
if (err <= 0)
|
||||||
return err;
|
return err;
|
||||||
if (put_user(status, ustatus))
|
|
||||||
return -EFAULT;
|
|
||||||
if (!ur)
|
if (!ur)
|
||||||
return err;
|
return err;
|
||||||
if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
|
if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
|
||||||
|
@ -692,7 +692,7 @@ switch: switch@0 {
|
|||||||
dsa,member = <0 0>;
|
dsa,member = <0 0>;
|
||||||
eeprom-length = <512>;
|
eeprom-length = <512>;
|
||||||
interrupt-parent = <&gpio6>;
|
interrupt-parent = <&gpio6>;
|
||||||
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
|
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
|
||||||
|
@ -159,13 +159,7 @@ soundcard {
|
|||||||
|
|
||||||
dais = <&mcbsp2_port>, <&mcbsp3_port>;
|
dais = <&mcbsp2_port>, <&mcbsp3_port>;
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
&dss {
|
|
||||||
status = "okay";
|
|
||||||
};
|
|
||||||
|
|
||||||
&gpio6 {
|
|
||||||
pwm8: dmtimer-pwm-8 {
|
pwm8: dmtimer-pwm-8 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&vibrator_direction_pin>;
|
pinctrl-0 = <&vibrator_direction_pin>;
|
||||||
@ -192,7 +186,10 @@ vibrator {
|
|||||||
pwm-names = "enable", "direction";
|
pwm-names = "enable", "direction";
|
||||||
direction-duty-cycle-ns = <10000000>;
|
direction-duty-cycle-ns = <10000000>;
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
&dss {
|
||||||
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
&dsi1 {
|
&dsi1 {
|
||||||
|
@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
|||||||
DPRINT(("smpl_buf @%p\n", smpl_buf));
|
DPRINT(("smpl_buf @%p\n", smpl_buf));
|
||||||
|
|
||||||
/* allocate vma */
|
/* allocate vma */
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(mm);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
DPRINT(("Cannot allocate vma\n"));
|
DPRINT(("Cannot allocate vma\n"));
|
||||||
goto error_kmem;
|
goto error_kmem;
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* partially initialize the vma for the sampling buffer
|
* partially initialize the vma for the sampling buffer
|
||||||
*/
|
*/
|
||||||
vma->vm_mm = mm;
|
|
||||||
vma->vm_file = get_file(filp);
|
vma->vm_file = get_file(filp);
|
||||||
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
|
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
|
||||||
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
|
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
|
||||||
@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
error_kmem:
|
error_kmem:
|
||||||
pfm_rvfree(smpl_buf, size);
|
pfm_rvfree(smpl_buf, size);
|
||||||
|
|
||||||
|
@ -114,10 +114,8 @@ ia64_init_addr_space (void)
|
|||||||
* the problem. When the process attempts to write to the register backing store
|
* the problem. When the process attempts to write to the register backing store
|
||||||
* for the first time, it will get a SEGFAULT in this case.
|
* for the first time, it will get a SEGFAULT in this case.
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(current->mm);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
vma->vm_mm = current->mm;
|
|
||||||
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
||||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||||
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
||||||
@ -125,7 +123,7 @@ ia64_init_addr_space (void)
|
|||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
if (insert_vm_struct(current->mm, vma)) {
|
if (insert_vm_struct(current->mm, vma)) {
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
@ -133,10 +131,8 @@ ia64_init_addr_space (void)
|
|||||||
|
|
||||||
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
||||||
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(current->mm);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
vma->vm_mm = current->mm;
|
|
||||||
vma->vm_end = PAGE_SIZE;
|
vma->vm_end = PAGE_SIZE;
|
||||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||||
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
||||||
@ -144,7 +140,7 @@ ia64_init_addr_space (void)
|
|||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
if (insert_vm_struct(current->mm, vma)) {
|
if (insert_vm_struct(current->mm, vma)) {
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
|
|||||||
|
|
||||||
void ath79_ddr_wb_flush(u32 reg)
|
void ath79_ddr_wb_flush(u32 reg)
|
||||||
{
|
{
|
||||||
void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
|
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
|
||||||
|
|
||||||
/* Flush the DDR write buffer. */
|
/* Flush the DDR write buffer. */
|
||||||
__raw_writel(0x1, flush_reg);
|
__raw_writel(0x1, flush_reg);
|
||||||
|
@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|||||||
phys_addr_t size = resource_size(rsrc);
|
phys_addr_t size = resource_size(rsrc);
|
||||||
|
|
||||||
*start = fixup_bigphys_addr(rsrc->start, size);
|
*start = fixup_bigphys_addr(rsrc->start, size);
|
||||||
*end = rsrc->start + size;
|
*end = rsrc->start + size - 1;
|
||||||
}
|
}
|
||||||
|
@ -243,6 +243,7 @@ endif
|
|||||||
cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
||||||
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
||||||
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
||||||
|
cpu-as-$(CONFIG_E500) += -Wa,-me500
|
||||||
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
|
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
|
||||||
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
|
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
|
||||||
|
|
||||||
|
@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
|
|||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries);
|
unsigned long ua, unsigned long entries);
|
||||||
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||||
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||||
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
||||||
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
||||||
#endif
|
#endif
|
||||||
|
@ -144,7 +144,9 @@ power9_restore_additional_sprs:
|
|||||||
mtspr SPRN_MMCR1, r4
|
mtspr SPRN_MMCR1, r4
|
||||||
|
|
||||||
ld r3, STOP_MMCR2(r13)
|
ld r3, STOP_MMCR2(r13)
|
||||||
|
ld r4, PACA_SPRG_VDSO(r13)
|
||||||
mtspr SPRN_MMCR2, r3
|
mtspr SPRN_MMCR2, r3
|
||||||
|
mtspr SPRN_SPRG3, r4
|
||||||
blr
|
blr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
|||||||
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
|
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
|
|
||||||
if (mm_iommu_mapped_inc(mem))
|
if (mm_iommu_mapped_inc(mem))
|
||||||
|
@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
|||||||
if (!mem)
|
if (!mem)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
|
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
|
||||||
|
&hpa)))
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
|
|
||||||
pua = (void *) vmalloc_to_phys(pua);
|
pua = (void *) vmalloc_to_phys(pua);
|
||||||
@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
|
|
||||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||||
if (mem)
|
if (mem)
|
||||||
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
|
prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
|
||||||
|
IOMMU_PAGE_SHIFT_4K, &tces) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!prereg) {
|
if (!prereg) {
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
#include <asm/pte-walk.h>
|
||||||
|
|
||||||
static DEFINE_MUTEX(mem_list_mutex);
|
static DEFINE_MUTEX(mem_list_mutex);
|
||||||
|
|
||||||
@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
|
|||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
unsigned long used;
|
unsigned long used;
|
||||||
atomic64_t mapped;
|
atomic64_t mapped;
|
||||||
|
unsigned int pageshift;
|
||||||
u64 ua; /* userspace address */
|
u64 ua; /* userspace address */
|
||||||
u64 entries; /* number of entries in hpas[] */
|
u64 entries; /* number of entries in hpas[] */
|
||||||
u64 *hpas; /* vmalloc'ed */
|
u64 *hpas; /* vmalloc'ed */
|
||||||
@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|||||||
{
|
{
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
long i, j, ret = 0, locked_entries = 0;
|
long i, j, ret = 0, locked_entries = 0;
|
||||||
|
unsigned int pageshift;
|
||||||
|
unsigned long flags;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
mutex_lock(&mem_list_mutex);
|
mutex_lock(&mem_list_mutex);
|
||||||
@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For a starting point for a maximum page size calculation
|
||||||
|
* we use @ua and @entries natural alignment to allow IOMMU pages
|
||||||
|
* smaller than huge pages but still bigger than PAGE_SIZE.
|
||||||
|
*/
|
||||||
|
mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
|
||||||
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
|
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
|
||||||
if (!mem->hpas) {
|
if (!mem->hpas) {
|
||||||
kfree(mem);
|
kfree(mem);
|
||||||
@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
populate:
|
populate:
|
||||||
|
pageshift = PAGE_SHIFT;
|
||||||
|
if (PageCompound(page)) {
|
||||||
|
pte_t *pte;
|
||||||
|
struct page *head = compound_head(page);
|
||||||
|
unsigned int compshift = compound_order(head);
|
||||||
|
|
||||||
|
local_irq_save(flags); /* disables as well */
|
||||||
|
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
/* Double check it is still the same pinned page */
|
||||||
|
if (pte && pte_page(*pte) == head &&
|
||||||
|
pageshift == compshift)
|
||||||
|
pageshift = max_t(unsigned int, pageshift,
|
||||||
|
PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
mem->pageshift = min(mem->pageshift, pageshift);
|
||||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
|||||||
EXPORT_SYMBOL_GPL(mm_iommu_find);
|
EXPORT_SYMBOL_GPL(mm_iommu_find);
|
||||||
|
|
||||||
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa)
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
||||||
{
|
{
|
||||||
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
u64 *va = &mem->hpas[entry];
|
u64 *va = &mem->hpas[entry];
|
||||||
@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|||||||
if (entry >= mem->entries)
|
if (entry >= mem->entries)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (pageshift > mem->pageshift)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
*hpa = *va | (ua & ~PAGE_MASK);
|
*hpa = *va | (ua & ~PAGE_MASK);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|||||||
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
||||||
|
|
||||||
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa)
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
||||||
{
|
{
|
||||||
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
void *va = &mem->hpas[entry];
|
void *va = &mem->hpas[entry];
|
||||||
@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
|||||||
if (entry >= mem->entries)
|
if (entry >= mem->entries)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (pageshift > mem->pageshift)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
pa = (void *) vmalloc_to_phys(va);
|
pa = (void *) vmalloc_to_phys(va);
|
||||||
if (!pa)
|
if (!pa)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||||||
u64 imm64;
|
u64 imm64;
|
||||||
u8 *func;
|
u8 *func;
|
||||||
u32 true_cond;
|
u32 true_cond;
|
||||||
|
u32 tmp_idx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* addrs[] maps a BPF bytecode address into a real offset from
|
* addrs[] maps a BPF bytecode address into a real offset from
|
||||||
@ -637,11 +638,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||||||
case BPF_STX | BPF_XADD | BPF_W:
|
case BPF_STX | BPF_XADD | BPF_W:
|
||||||
/* Get EA into TMP_REG_1 */
|
/* Get EA into TMP_REG_1 */
|
||||||
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
||||||
/* error if EA is not word-aligned */
|
tmp_idx = ctx->idx * 4;
|
||||||
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
|
|
||||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
|
|
||||||
PPC_LI(b2p[BPF_REG_0], 0);
|
|
||||||
PPC_JMP(exit_addr);
|
|
||||||
/* load value from memory into TMP_REG_2 */
|
/* load value from memory into TMP_REG_2 */
|
||||||
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||||
/* add value from src_reg into this */
|
/* add value from src_reg into this */
|
||||||
@ -649,32 +646,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||||||
/* store result back */
|
/* store result back */
|
||||||
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||||
/* we're done if this succeeded */
|
/* we're done if this succeeded */
|
||||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
|
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||||
/* otherwise, let's try once more */
|
|
||||||
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
|
||||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
|
||||||
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
|
||||||
/* exit if the store was not successful */
|
|
||||||
PPC_LI(b2p[BPF_REG_0], 0);
|
|
||||||
PPC_BCC(COND_NE, exit_addr);
|
|
||||||
break;
|
break;
|
||||||
/* *(u64 *)(dst + off) += src */
|
/* *(u64 *)(dst + off) += src */
|
||||||
case BPF_STX | BPF_XADD | BPF_DW:
|
case BPF_STX | BPF_XADD | BPF_DW:
|
||||||
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
||||||
/* error if EA is not doubleword-aligned */
|
tmp_idx = ctx->idx * 4;
|
||||||
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
|
|
||||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
|
|
||||||
PPC_LI(b2p[BPF_REG_0], 0);
|
|
||||||
PPC_JMP(exit_addr);
|
|
||||||
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
||||||
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
|
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||||
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
|
||||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
|
||||||
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
|
||||||
PPC_LI(b2p[BPF_REG_0], 0);
|
|
||||||
PPC_BCC(COND_NE, exit_addr);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
|
|||||||
{
|
{
|
||||||
int nr, dotted;
|
int nr, dotted;
|
||||||
unsigned long first_adr;
|
unsigned long first_adr;
|
||||||
unsigned long inst, last_inst = 0;
|
unsigned int inst, last_inst = 0;
|
||||||
unsigned char val[4];
|
unsigned char val[4];
|
||||||
|
|
||||||
dotted = 0;
|
dotted = 0;
|
||||||
@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
|
|||||||
dotted = 0;
|
dotted = 0;
|
||||||
last_inst = inst;
|
last_inst = inst;
|
||||||
if (praddr)
|
if (praddr)
|
||||||
printf(REG" %.8lx", adr, inst);
|
printf(REG" %.8x", adr, inst);
|
||||||
printf("\t");
|
printf("\t");
|
||||||
dump_func(inst, adr);
|
dump_func(inst, adr);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
@ -140,7 +140,7 @@ config S390
|
|||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||||
select HAVE_GCC_PLUGINS
|
select HAVE_GCC_PLUGINS if BROKEN
|
||||||
select HAVE_KERNEL_BZIP2
|
select HAVE_KERNEL_BZIP2
|
||||||
select HAVE_KERNEL_GZIP
|
select HAVE_KERNEL_GZIP
|
||||||
select HAVE_KERNEL_LZ4
|
select HAVE_KERNEL_LZ4
|
||||||
|
@ -63,7 +63,7 @@ config X86
|
|||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select ARCH_HAS_REFCOUNT
|
select ARCH_HAS_REFCOUNT
|
||||||
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
|
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
|
||||||
select ARCH_HAS_UACCESS_MCSAFE if X86_64
|
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
|
||||||
select ARCH_HAS_SET_MEMORY
|
select ARCH_HAS_SET_MEMORY
|
||||||
select ARCH_HAS_SG_CHAIN
|
select ARCH_HAS_SG_CHAIN
|
||||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||||
|
@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
|
|||||||
ds->bts_buffer_base = (unsigned long) cea;
|
ds->bts_buffer_base = (unsigned long) cea;
|
||||||
ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
|
ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
|
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
|
||||||
ds->bts_absolute_maximum = ds->bts_buffer_base + max;
|
ds->bts_absolute_maximum = ds->bts_buffer_base +
|
||||||
ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
|
max * BTS_RECORD_SIZE;
|
||||||
|
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
|
||||||
|
(max / 16) * BTS_RECORD_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,8 +7,6 @@
|
|||||||
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
|
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
|
||||||
#define _ASM_X86_MACH_DEFAULT_APM_H
|
#define _ASM_X86_MACH_DEFAULT_APM_H
|
||||||
|
|
||||||
#include <asm/nospec-branch.h>
|
|
||||||
|
|
||||||
#ifdef APM_ZERO_SEGS
|
#ifdef APM_ZERO_SEGS
|
||||||
# define APM_DO_ZERO_SEGS \
|
# define APM_DO_ZERO_SEGS \
|
||||||
"pushl %%ds\n\t" \
|
"pushl %%ds\n\t" \
|
||||||
@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
|||||||
* N.B. We do NOT need a cld after the BIOS call
|
* N.B. We do NOT need a cld after the BIOS call
|
||||||
* because we always save and restore the flags.
|
* because we always save and restore the flags.
|
||||||
*/
|
*/
|
||||||
firmware_restrict_branch_speculation_start();
|
|
||||||
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
||||||
"pushl %%edi\n\t"
|
"pushl %%edi\n\t"
|
||||||
"pushl %%ebp\n\t"
|
"pushl %%ebp\n\t"
|
||||||
@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
|||||||
"=S" (*esi)
|
"=S" (*esi)
|
||||||
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
firmware_restrict_branch_speculation_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
||||||
@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
|||||||
* N.B. We do NOT need a cld after the BIOS call
|
* N.B. We do NOT need a cld after the BIOS call
|
||||||
* because we always save and restore the flags.
|
* because we always save and restore the flags.
|
||||||
*/
|
*/
|
||||||
firmware_restrict_branch_speculation_start();
|
|
||||||
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
||||||
"pushl %%edi\n\t"
|
"pushl %%edi\n\t"
|
||||||
"pushl %%ebp\n\t"
|
"pushl %%ebp\n\t"
|
||||||
@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
|||||||
"=S" (si)
|
"=S" (si)
|
||||||
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
firmware_restrict_branch_speculation_end();
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
|
|||||||
unsigned long ret;
|
unsigned long ret;
|
||||||
|
|
||||||
__uaccess_begin();
|
__uaccess_begin();
|
||||||
ret = memcpy_mcsafe(to, from, len);
|
/*
|
||||||
|
* Note, __memcpy_mcsafe() is explicitly used since it can
|
||||||
|
* handle exceptions / faults. memcpy_mcsafe() may fall back to
|
||||||
|
* memcpy() which lacks this handling.
|
||||||
|
*/
|
||||||
|
ret = __memcpy_mcsafe(to, from, len);
|
||||||
__uaccess_end();
|
__uaccess_end();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -240,6 +240,7 @@
|
|||||||
#include <asm/olpc.h>
|
#include <asm/olpc.h>
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
#include <asm/reboot.h>
|
#include <asm/reboot.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
|
|
||||||
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
|
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
|
||||||
extern int (*console_blank_hook)(int);
|
extern int (*console_blank_hook)(int);
|
||||||
@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
|
|||||||
gdt[0x40 / 8] = bad_bios_desc;
|
gdt[0x40 / 8] = bad_bios_desc;
|
||||||
|
|
||||||
apm_irq_save(flags);
|
apm_irq_save(flags);
|
||||||
|
firmware_restrict_branch_speculation_start();
|
||||||
APM_DO_SAVE_SEGS;
|
APM_DO_SAVE_SEGS;
|
||||||
apm_bios_call_asm(call->func, call->ebx, call->ecx,
|
apm_bios_call_asm(call->func, call->ebx, call->ecx,
|
||||||
&call->eax, &call->ebx, &call->ecx, &call->edx,
|
&call->eax, &call->ebx, &call->ecx, &call->edx,
|
||||||
&call->esi);
|
&call->esi);
|
||||||
APM_DO_RESTORE_SEGS;
|
APM_DO_RESTORE_SEGS;
|
||||||
|
firmware_restrict_branch_speculation_end();
|
||||||
apm_irq_restore(flags);
|
apm_irq_restore(flags);
|
||||||
gdt[0x40 / 8] = save_desc_40;
|
gdt[0x40 / 8] = save_desc_40;
|
||||||
put_cpu();
|
put_cpu();
|
||||||
@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
|
|||||||
gdt[0x40 / 8] = bad_bios_desc;
|
gdt[0x40 / 8] = bad_bios_desc;
|
||||||
|
|
||||||
apm_irq_save(flags);
|
apm_irq_save(flags);
|
||||||
|
firmware_restrict_branch_speculation_start();
|
||||||
APM_DO_SAVE_SEGS;
|
APM_DO_SAVE_SEGS;
|
||||||
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
|
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
|
||||||
&call->eax);
|
&call->eax);
|
||||||
APM_DO_RESTORE_SEGS;
|
APM_DO_RESTORE_SEGS;
|
||||||
|
firmware_restrict_branch_speculation_end();
|
||||||
apm_irq_restore(flags);
|
apm_irq_restore(flags);
|
||||||
gdt[0x40 / 8] = save_desc_40;
|
gdt[0x40 / 8] = save_desc_40;
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
@ -2165,9 +2165,6 @@ static ssize_t store_int_with_restart(struct device *s,
|
|||||||
if (check_interval == old_check_interval)
|
if (check_interval == old_check_interval)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (check_interval < 1)
|
|
||||||
check_interval = 1;
|
|
||||||
|
|
||||||
mutex_lock(&mce_sysfs_mutex);
|
mutex_lock(&mce_sysfs_mutex);
|
||||||
mce_restart();
|
mce_restart();
|
||||||
mutex_unlock(&mce_sysfs_mutex);
|
mutex_unlock(&mce_sysfs_mutex);
|
||||||
|
@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||||||
|
|
||||||
/* Clone the lessor file to create a new file for us */
|
/* Clone the lessor file to create a new file for us */
|
||||||
DRM_DEBUG_LEASE("Allocating lease file\n");
|
DRM_DEBUG_LEASE("Allocating lease file\n");
|
||||||
path_get(&lessor_file->f_path);
|
lessee_file = filp_clone_open(lessor_file);
|
||||||
lessee_file = alloc_file(&lessor_file->f_path,
|
|
||||||
lessor_file->f_mode,
|
|
||||||
fops_get(lessor_file->f_inode->i_fop));
|
|
||||||
|
|
||||||
if (IS_ERR(lessee_file)) {
|
if (IS_ERR(lessee_file)) {
|
||||||
ret = PTR_ERR(lessee_file);
|
ret = PTR_ERR(lessee_file);
|
||||||
goto out_lessee;
|
goto out_lessee;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the new file for DRM */
|
|
||||||
DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
|
|
||||||
ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
|
|
||||||
if (ret)
|
|
||||||
goto out_lessee_file;
|
|
||||||
|
|
||||||
lessee_priv = lessee_file->private_data;
|
lessee_priv = lessee_file->private_data;
|
||||||
|
|
||||||
/* Change the file to a master one */
|
/* Change the file to a master one */
|
||||||
drm_master_put(&lessee_priv->master);
|
drm_master_put(&lessee_priv->master);
|
||||||
lessee_priv->master = lessee;
|
lessee_priv->master = lessee;
|
||||||
@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_lessee_file:
|
|
||||||
fput(lessee_file);
|
|
||||||
|
|
||||||
out_lessee:
|
out_lessee:
|
||||||
drm_master_put(&lessee);
|
drm_master_put(&lessee);
|
||||||
|
|
||||||
|
@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
|
|||||||
d_instantiate(path.dentry, inode);
|
d_instantiate(path.dentry, inode);
|
||||||
|
|
||||||
file = alloc_file(&path, OPEN_FMODE(flags), fops);
|
file = alloc_file(&path, OPEN_FMODE(flags), fops);
|
||||||
if (IS_ERR(file))
|
if (IS_ERR(file)) {
|
||||||
goto err_dput;
|
path_put(&path);
|
||||||
|
goto err_fs;
|
||||||
|
}
|
||||||
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
|
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
|
||||||
file->private_data = priv;
|
file->private_data = priv;
|
||||||
|
|
||||||
return file;
|
return file;
|
||||||
|
|
||||||
err_dput:
|
|
||||||
path_put(&path);
|
|
||||||
err_inode:
|
err_inode:
|
||||||
iput(inode);
|
iput(inode);
|
||||||
err_fs:
|
err_fs:
|
||||||
|
@ -743,16 +743,21 @@ const struct bond_option *bond_opt_get(unsigned int option)
|
|||||||
static int bond_option_mode_set(struct bonding *bond,
|
static int bond_option_mode_set(struct bonding *bond,
|
||||||
const struct bond_opt_value *newval)
|
const struct bond_opt_value *newval)
|
||||||
{
|
{
|
||||||
if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
|
if (!bond_mode_uses_arp(newval->value)) {
|
||||||
|
if (bond->params.arp_interval) {
|
||||||
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
|
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
|
||||||
newval->string);
|
newval->string);
|
||||||
/* disable arp monitoring */
|
/* disable arp monitoring */
|
||||||
bond->params.arp_interval = 0;
|
bond->params.arp_interval = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!bond->params.miimon) {
|
||||||
/* set miimon to default value */
|
/* set miimon to default value */
|
||||||
bond->params.miimon = BOND_DEFAULT_MIIMON;
|
bond->params.miimon = BOND_DEFAULT_MIIMON;
|
||||||
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
|
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
|
||||||
bond->params.miimon);
|
bond->params.miimon);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (newval->value == BOND_MODE_ALB)
|
if (newval->value == BOND_MODE_ALB)
|
||||||
bond->params.tlb_dynamic_lb = 1;
|
bond->params.tlb_dynamic_lb = 1;
|
||||||
|
@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(priv->device);
|
err = pm_runtime_get_sync(priv->device);
|
||||||
if (err)
|
if (err < 0) {
|
||||||
pm_runtime_put_noidle(priv->device);
|
pm_runtime_put_noidle(priv->device);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void m_can_clk_stop(struct m_can_priv *priv)
|
static void m_can_clk_stop(struct m_can_priv *priv)
|
||||||
@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
/* Version 3.1.x or 3.2.x */
|
/* Version 3.1.x or 3.2.x */
|
||||||
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
|
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
|
||||||
|
CCCR_NISO);
|
||||||
|
|
||||||
/* Only 3.2.x has NISO Bit implemented */
|
/* Only 3.2.x has NISO Bit implemented */
|
||||||
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
|
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
|
||||||
@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
|
|||||||
priv->can.clock.freq = clk_get_rate(cclk);
|
priv->can.clock.freq = clk_get_rate(cclk);
|
||||||
priv->mram_base = mram_addr;
|
priv->mram_base = mram_addr;
|
||||||
|
|
||||||
m_can_of_parse_mram(priv, mram_config_vals);
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, dev);
|
platform_set_drvdata(pdev, dev);
|
||||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||||
|
|
||||||
@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
|
|||||||
goto clk_disable;
|
goto clk_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m_can_of_parse_mram(priv, mram_config_vals);
|
||||||
|
|
||||||
devm_can_led_init(dev);
|
devm_can_led_init(dev);
|
||||||
|
|
||||||
of_can_transceiver(dev);
|
of_can_transceiver(dev);
|
||||||
@ -1687,8 +1690,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: runtime PM with power down or sleep mode */
|
|
||||||
|
|
||||||
static __maybe_unused int m_can_suspend(struct device *dev)
|
static __maybe_unused int m_can_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct net_device *ndev = dev_get_drvdata(dev);
|
struct net_device *ndev = dev_get_drvdata(dev);
|
||||||
@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
|
|||||||
|
|
||||||
pinctrl_pm_select_default_state(dev);
|
pinctrl_pm_select_default_state(dev);
|
||||||
|
|
||||||
m_can_init_ram(priv);
|
|
||||||
|
|
||||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||||
|
|
||||||
if (netif_running(ndev)) {
|
if (netif_running(ndev)) {
|
||||||
@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
m_can_init_ram(priv);
|
||||||
m_can_start(ndev);
|
m_can_start(ndev);
|
||||||
netif_device_attach(ndev);
|
netif_device_attach(ndev);
|
||||||
netif_start_queue(ndev);
|
netif_start_queue(ndev);
|
||||||
|
@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
cdm = of_iomap(np_cdm, 0);
|
cdm = of_iomap(np_cdm, 0);
|
||||||
|
if (!cdm) {
|
||||||
|
of_node_put(np_cdm);
|
||||||
|
dev_err(&ofdev->dev, "can't map clock node!\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (in_8(&cdm->ipb_clk_sel) & 0x1)
|
if (in_8(&cdm->ipb_clk_sel) & 0x1)
|
||||||
freq *= 2;
|
freq *= 2;
|
||||||
|
@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
|
|||||||
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
|
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
|
||||||
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
|
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
|
||||||
|
|
||||||
|
#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
|
||||||
|
((u32)(y) << 16) | \
|
||||||
|
((u32)(z) << 8))
|
||||||
|
|
||||||
/* System Control Registers Bits */
|
/* System Control Registers Bits */
|
||||||
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
|
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
|
||||||
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
|
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
|
||||||
@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
|
|||||||
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
|
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
|
||||||
hw_ver_major, hw_ver_minor, hw_ver_sub);
|
hw_ver_major, hw_ver_minor, hw_ver_sub);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||||
|
/* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
|
||||||
|
* 64-bit logical addresses: this workaround forces usage of 32-bit
|
||||||
|
* DMA addresses only when such a fw is detected.
|
||||||
|
*/
|
||||||
|
if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
|
||||||
|
PCIEFD_FW_VERSION(3, 3, 0)) {
|
||||||
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
|
if (err)
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"warning: can't set DMA mask %llxh (err %d)\n",
|
||||||
|
DMA_BIT_MASK(32), err);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* stop system clock */
|
/* stop system clock */
|
||||||
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
|
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
|
||||||
PCIEFD_REG_SYS_CTL_CLR);
|
PCIEFD_REG_SYS_CTL_CLR);
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (C) 2012 - 2014 Xilinx, Inc.
|
* Copyright (C) 2012 - 2014 Xilinx, Inc.
|
||||||
* Copyright (C) 2009 PetaLogix. All rights reserved.
|
* Copyright (C) 2009 PetaLogix. All rights reserved.
|
||||||
|
* Copyright (C) 2017 Sandvik Mining and Construction Oy
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
|
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
|
||||||
@ -25,8 +26,10 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
|
#include <linux/of_device.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/can/dev.h>
|
#include <linux/can/dev.h>
|
||||||
@ -101,7 +104,7 @@ enum xcan_reg {
|
|||||||
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
|
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
|
||||||
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
|
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
|
||||||
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
|
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
|
||||||
XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
|
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
|
||||||
|
|
||||||
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
|
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
|
||||||
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
|
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
|
||||||
@ -118,6 +121,7 @@ enum xcan_reg {
|
|||||||
/**
|
/**
|
||||||
* struct xcan_priv - This definition define CAN driver instance
|
* struct xcan_priv - This definition define CAN driver instance
|
||||||
* @can: CAN private data structure.
|
* @can: CAN private data structure.
|
||||||
|
* @tx_lock: Lock for synchronizing TX interrupt handling
|
||||||
* @tx_head: Tx CAN packets ready to send on the queue
|
* @tx_head: Tx CAN packets ready to send on the queue
|
||||||
* @tx_tail: Tx CAN packets successfully sended on the queue
|
* @tx_tail: Tx CAN packets successfully sended on the queue
|
||||||
* @tx_max: Maximum number packets the driver can send
|
* @tx_max: Maximum number packets the driver can send
|
||||||
@ -132,6 +136,7 @@ enum xcan_reg {
|
|||||||
*/
|
*/
|
||||||
struct xcan_priv {
|
struct xcan_priv {
|
||||||
struct can_priv can;
|
struct can_priv can;
|
||||||
|
spinlock_t tx_lock;
|
||||||
unsigned int tx_head;
|
unsigned int tx_head;
|
||||||
unsigned int tx_tail;
|
unsigned int tx_tail;
|
||||||
unsigned int tx_max;
|
unsigned int tx_max;
|
||||||
@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
|
|||||||
.brp_inc = 1,
|
.brp_inc = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define XCAN_CAP_WATERMARK 0x0001
|
||||||
|
struct xcan_devtype_data {
|
||||||
|
unsigned int caps;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xcan_write_reg_le - Write a value to the device register little endian
|
* xcan_write_reg_le - Write a value to the device register little endian
|
||||||
* @priv: Driver private data structure
|
* @priv: Driver private data structure
|
||||||
@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
|
|||||||
usleep_range(500, 10000);
|
usleep_range(500, 10000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reset clears FIFOs */
|
||||||
|
priv->tx_head = 0;
|
||||||
|
priv->tx_tail = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
struct net_device_stats *stats = &ndev->stats;
|
struct net_device_stats *stats = &ndev->stats;
|
||||||
struct can_frame *cf = (struct can_frame *)skb->data;
|
struct can_frame *cf = (struct can_frame *)skb->data;
|
||||||
u32 id, dlc, data[2] = {0, 0};
|
u32 id, dlc, data[2] = {0, 0};
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (can_dropped_invalid_skb(ndev, skb))
|
if (can_dropped_invalid_skb(ndev, skb))
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
|
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
|
||||||
|
|
||||||
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
|
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||||
|
|
||||||
priv->tx_head++;
|
priv->tx_head++;
|
||||||
|
|
||||||
/* Write the Frame to Xilinx CAN TX FIFO */
|
/* Write the Frame to Xilinx CAN TX FIFO */
|
||||||
@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
stats->tx_bytes += cf->can_dlc;
|
stats->tx_bytes += cf->can_dlc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
|
||||||
|
if (priv->tx_max > 1)
|
||||||
|
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
|
||||||
|
|
||||||
/* Check if the TX buffer is full */
|
/* Check if the TX buffer is full */
|
||||||
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
|
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
|
||||||
netif_stop_queue(ndev);
|
netif_stop_queue(ndev);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xcan_current_error_state - Get current error state from HW
|
||||||
|
* @ndev: Pointer to net_device structure
|
||||||
|
*
|
||||||
|
* Checks the current CAN error state from the HW. Note that this
|
||||||
|
* only checks for ERROR_PASSIVE and ERROR_WARNING.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
|
||||||
|
* otherwise.
|
||||||
|
*/
|
||||||
|
static enum can_state xcan_current_error_state(struct net_device *ndev)
|
||||||
|
{
|
||||||
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
|
u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
||||||
|
|
||||||
|
if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
|
||||||
|
return CAN_STATE_ERROR_PASSIVE;
|
||||||
|
else if (status & XCAN_SR_ERRWRN_MASK)
|
||||||
|
return CAN_STATE_ERROR_WARNING;
|
||||||
|
else
|
||||||
|
return CAN_STATE_ERROR_ACTIVE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xcan_set_error_state - Set new CAN error state
|
||||||
|
* @ndev: Pointer to net_device structure
|
||||||
|
* @new_state: The new CAN state to be set
|
||||||
|
* @cf: Error frame to be populated or NULL
|
||||||
|
*
|
||||||
|
* Set new CAN error state for the device, updating statistics and
|
||||||
|
* populating the error frame if given.
|
||||||
|
*/
|
||||||
|
static void xcan_set_error_state(struct net_device *ndev,
|
||||||
|
enum can_state new_state,
|
||||||
|
struct can_frame *cf)
|
||||||
|
{
|
||||||
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
|
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
|
||||||
|
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
|
||||||
|
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
|
||||||
|
|
||||||
|
priv->can.state = new_state;
|
||||||
|
|
||||||
|
if (cf) {
|
||||||
|
cf->can_id |= CAN_ERR_CRTL;
|
||||||
|
cf->data[6] = txerr;
|
||||||
|
cf->data[7] = rxerr;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (new_state) {
|
||||||
|
case CAN_STATE_ERROR_PASSIVE:
|
||||||
|
priv->can.can_stats.error_passive++;
|
||||||
|
if (cf)
|
||||||
|
cf->data[1] = (rxerr > 127) ?
|
||||||
|
CAN_ERR_CRTL_RX_PASSIVE :
|
||||||
|
CAN_ERR_CRTL_TX_PASSIVE;
|
||||||
|
break;
|
||||||
|
case CAN_STATE_ERROR_WARNING:
|
||||||
|
priv->can.can_stats.error_warning++;
|
||||||
|
if (cf)
|
||||||
|
cf->data[1] |= (txerr > rxerr) ?
|
||||||
|
CAN_ERR_CRTL_TX_WARNING :
|
||||||
|
CAN_ERR_CRTL_RX_WARNING;
|
||||||
|
break;
|
||||||
|
case CAN_STATE_ERROR_ACTIVE:
|
||||||
|
if (cf)
|
||||||
|
cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* non-ERROR states are handled elsewhere */
|
||||||
|
WARN_ON(1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
|
||||||
|
* @ndev: Pointer to net_device structure
|
||||||
|
*
|
||||||
|
* If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
|
||||||
|
* the performed RX/TX has caused it to drop to a lesser state and set
|
||||||
|
* the interface state accordingly.
|
||||||
|
*/
|
||||||
|
static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
|
||||||
|
{
|
||||||
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
|
enum can_state old_state = priv->can.state;
|
||||||
|
enum can_state new_state;
|
||||||
|
|
||||||
|
/* changing error state due to successful frame RX/TX can only
|
||||||
|
* occur from these states
|
||||||
|
*/
|
||||||
|
if (old_state != CAN_STATE_ERROR_WARNING &&
|
||||||
|
old_state != CAN_STATE_ERROR_PASSIVE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
new_state = xcan_current_error_state(ndev);
|
||||||
|
|
||||||
|
if (new_state != old_state) {
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct can_frame *cf;
|
||||||
|
|
||||||
|
skb = alloc_can_err_skb(ndev, &cf);
|
||||||
|
|
||||||
|
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
|
||||||
|
|
||||||
|
if (skb) {
|
||||||
|
struct net_device_stats *stats = &ndev->stats;
|
||||||
|
|
||||||
|
stats->rx_packets++;
|
||||||
|
stats->rx_bytes += cf->can_dlc;
|
||||||
|
netif_rx(skb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xcan_err_interrupt - error frame Isr
|
* xcan_err_interrupt - error frame Isr
|
||||||
* @ndev: net_device pointer
|
* @ndev: net_device pointer
|
||||||
@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||||||
struct net_device_stats *stats = &ndev->stats;
|
struct net_device_stats *stats = &ndev->stats;
|
||||||
struct can_frame *cf;
|
struct can_frame *cf;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u32 err_status, status, txerr = 0, rxerr = 0;
|
u32 err_status;
|
||||||
|
|
||||||
skb = alloc_can_err_skb(ndev, &cf);
|
skb = alloc_can_err_skb(ndev, &cf);
|
||||||
|
|
||||||
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
|
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
|
||||||
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
|
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
|
||||||
txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
|
|
||||||
rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
|
|
||||||
XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
|
|
||||||
status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
|
||||||
|
|
||||||
if (isr & XCAN_IXR_BSOFF_MASK) {
|
if (isr & XCAN_IXR_BSOFF_MASK) {
|
||||||
priv->can.state = CAN_STATE_BUS_OFF;
|
priv->can.state = CAN_STATE_BUS_OFF;
|
||||||
@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||||||
can_bus_off(ndev);
|
can_bus_off(ndev);
|
||||||
if (skb)
|
if (skb)
|
||||||
cf->can_id |= CAN_ERR_BUSOFF;
|
cf->can_id |= CAN_ERR_BUSOFF;
|
||||||
} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
|
} else {
|
||||||
priv->can.state = CAN_STATE_ERROR_PASSIVE;
|
enum can_state new_state = xcan_current_error_state(ndev);
|
||||||
priv->can.can_stats.error_passive++;
|
|
||||||
if (skb) {
|
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
|
||||||
cf->can_id |= CAN_ERR_CRTL;
|
|
||||||
cf->data[1] = (rxerr > 127) ?
|
|
||||||
CAN_ERR_CRTL_RX_PASSIVE :
|
|
||||||
CAN_ERR_CRTL_TX_PASSIVE;
|
|
||||||
cf->data[6] = txerr;
|
|
||||||
cf->data[7] = rxerr;
|
|
||||||
}
|
|
||||||
} else if (status & XCAN_SR_ERRWRN_MASK) {
|
|
||||||
priv->can.state = CAN_STATE_ERROR_WARNING;
|
|
||||||
priv->can.can_stats.error_warning++;
|
|
||||||
if (skb) {
|
|
||||||
cf->can_id |= CAN_ERR_CRTL;
|
|
||||||
cf->data[1] |= (txerr > rxerr) ?
|
|
||||||
CAN_ERR_CRTL_TX_WARNING :
|
|
||||||
CAN_ERR_CRTL_RX_WARNING;
|
|
||||||
cf->data[6] = txerr;
|
|
||||||
cf->data[7] = rxerr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for Arbitration lost interrupt */
|
/* Check for Arbitration lost interrupt */
|
||||||
@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
|
|||||||
if (isr & XCAN_IXR_RXOFLW_MASK) {
|
if (isr & XCAN_IXR_RXOFLW_MASK) {
|
||||||
stats->rx_over_errors++;
|
stats->rx_over_errors++;
|
||||||
stats->rx_errors++;
|
stats->rx_errors++;
|
||||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
cf->can_id |= CAN_ERR_CRTL;
|
cf->can_id |= CAN_ERR_CRTL;
|
||||||
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
|
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
|
||||||
@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
|
|||||||
|
|
||||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||||
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
|
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
|
||||||
if (isr & XCAN_IXR_RXOK_MASK) {
|
|
||||||
priv->write_reg(priv, XCAN_ICR_OFFSET,
|
|
||||||
XCAN_IXR_RXOK_MASK);
|
|
||||||
work_done += xcan_rx(ndev);
|
work_done += xcan_rx(ndev);
|
||||||
} else {
|
|
||||||
priv->write_reg(priv, XCAN_ICR_OFFSET,
|
|
||||||
XCAN_IXR_RXNEMP_MASK);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
|
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
|
||||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (work_done)
|
if (work_done) {
|
||||||
can_led_event(ndev, CAN_LED_EVENT_RX);
|
can_led_event(ndev, CAN_LED_EVENT_RX);
|
||||||
|
xcan_update_error_state_after_rxtx(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
if (work_done < quota) {
|
if (work_done < quota) {
|
||||||
napi_complete_done(napi, work_done);
|
napi_complete_done(napi, work_done);
|
||||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||||
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
|
ier |= XCAN_IXR_RXNEMP_MASK;
|
||||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||||
}
|
}
|
||||||
return work_done;
|
return work_done;
|
||||||
@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
|
|||||||
{
|
{
|
||||||
struct xcan_priv *priv = netdev_priv(ndev);
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
struct net_device_stats *stats = &ndev->stats;
|
struct net_device_stats *stats = &ndev->stats;
|
||||||
|
unsigned int frames_in_fifo;
|
||||||
|
int frames_sent = 1; /* TXOK => at least 1 frame was sent */
|
||||||
|
unsigned long flags;
|
||||||
|
int retries = 0;
|
||||||
|
|
||||||
while ((priv->tx_head - priv->tx_tail > 0) &&
|
/* Synchronize with xmit as we need to know the exact number
|
||||||
(isr & XCAN_IXR_TXOK_MASK)) {
|
* of frames in the FIFO to stay in sync due to the TXFEMP
|
||||||
|
* handling.
|
||||||
|
* This also prevents a race between netif_wake_queue() and
|
||||||
|
* netif_stop_queue().
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||||
|
|
||||||
|
frames_in_fifo = priv->tx_head - priv->tx_tail;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(frames_in_fifo == 0)) {
|
||||||
|
/* clear TXOK anyway to avoid getting back here */
|
||||||
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||||
|
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if 2 frames were sent (TXOK only means that at least 1
|
||||||
|
* frame was sent).
|
||||||
|
*/
|
||||||
|
if (frames_in_fifo > 1) {
|
||||||
|
WARN_ON(frames_in_fifo > priv->tx_max);
|
||||||
|
|
||||||
|
/* Synchronize TXOK and isr so that after the loop:
|
||||||
|
* (1) isr variable is up-to-date at least up to TXOK clear
|
||||||
|
* time. This avoids us clearing a TXOK of a second frame
|
||||||
|
* but not noticing that the FIFO is now empty and thus
|
||||||
|
* marking only a single frame as sent.
|
||||||
|
* (2) No TXOK is left. Having one could mean leaving a
|
||||||
|
* stray TXOK as we might process the associated frame
|
||||||
|
* via TXFEMP handling as we read TXFEMP *after* TXOK
|
||||||
|
* clear to satisfy (1).
|
||||||
|
*/
|
||||||
|
while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
|
||||||
|
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||||
|
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isr & XCAN_IXR_TXFEMP_MASK) {
|
||||||
|
/* nothing in FIFO anymore */
|
||||||
|
frames_sent = frames_in_fifo;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* single frame in fifo, just clear TXOK */
|
||||||
|
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (frames_sent--) {
|
||||||
can_get_echo_skb(ndev, priv->tx_tail %
|
can_get_echo_skb(ndev, priv->tx_tail %
|
||||||
priv->tx_max);
|
priv->tx_max);
|
||||||
priv->tx_tail++;
|
priv->tx_tail++;
|
||||||
stats->tx_packets++;
|
stats->tx_packets++;
|
||||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
|
||||||
}
|
}
|
||||||
can_led_event(ndev, CAN_LED_EVENT_TX);
|
|
||||||
netif_wake_queue(ndev);
|
netif_wake_queue(ndev);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||||
|
|
||||||
|
can_led_event(ndev, CAN_LED_EVENT_TX);
|
||||||
|
xcan_update_error_state_after_rxtx(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||||||
struct net_device *ndev = (struct net_device *)dev_id;
|
struct net_device *ndev = (struct net_device *)dev_id;
|
||||||
struct xcan_priv *priv = netdev_priv(ndev);
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
u32 isr, ier;
|
u32 isr, ier;
|
||||||
|
u32 isr_errors;
|
||||||
|
|
||||||
/* Get the interrupt status from Xilinx CAN */
|
/* Get the interrupt status from Xilinx CAN */
|
||||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
||||||
@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||||||
xcan_tx_interrupt(ndev, isr);
|
xcan_tx_interrupt(ndev, isr);
|
||||||
|
|
||||||
/* Check for the type of error interrupt and Processing it */
|
/* Check for the type of error interrupt and Processing it */
|
||||||
if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
|
isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
|
||||||
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
|
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
|
||||||
priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
|
if (isr_errors) {
|
||||||
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
|
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
|
||||||
XCAN_IXR_ARBLST_MASK));
|
|
||||||
xcan_err_interrupt(ndev, isr);
|
xcan_err_interrupt(ndev, isr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for the type of receive interrupt and Processing it */
|
/* Check for the type of receive interrupt and Processing it */
|
||||||
if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
|
if (isr & XCAN_IXR_RXNEMP_MASK) {
|
||||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
||||||
ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
|
ier &= ~XCAN_IXR_RXNEMP_MASK;
|
||||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
||||||
napi_schedule(&priv->napi);
|
napi_schedule(&priv->napi);
|
||||||
}
|
}
|
||||||
@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
|
|||||||
static void xcan_chip_stop(struct net_device *ndev)
|
static void xcan_chip_stop(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct xcan_priv *priv = netdev_priv(ndev);
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
u32 ier;
|
|
||||||
|
|
||||||
/* Disable interrupts and leave the can in configuration mode */
|
/* Disable interrupts and leave the can in configuration mode */
|
||||||
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
|
set_reset_mode(ndev);
|
||||||
ier &= ~XCAN_INTR_ALL;
|
|
||||||
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
|
|
||||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
|
||||||
priv->can.state = CAN_STATE_STOPPED;
|
priv->can.state = CAN_STATE_STOPPED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
|
|||||||
*/
|
*/
|
||||||
static int __maybe_unused xcan_suspend(struct device *dev)
|
static int __maybe_unused xcan_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
if (!device_may_wakeup(dev))
|
struct net_device *ndev = dev_get_drvdata(dev);
|
||||||
return pm_runtime_force_suspend(dev);
|
|
||||||
|
|
||||||
return 0;
|
if (netif_running(ndev)) {
|
||||||
|
netif_stop_queue(ndev);
|
||||||
|
netif_device_detach(ndev);
|
||||||
|
xcan_chip_stop(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pm_runtime_force_suspend(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
|
|||||||
*/
|
*/
|
||||||
static int __maybe_unused xcan_resume(struct device *dev)
|
static int __maybe_unused xcan_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
if (!device_may_wakeup(dev))
|
struct net_device *ndev = dev_get_drvdata(dev);
|
||||||
return pm_runtime_force_resume(dev);
|
int ret;
|
||||||
|
|
||||||
|
ret = pm_runtime_force_resume(dev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "pm_runtime_force_resume failed on resume\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (netif_running(ndev)) {
|
||||||
|
ret = xcan_chip_start(ndev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "xcan_chip_start failed on resume\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_device_attach(ndev);
|
||||||
|
netif_start_queue(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
|
|||||||
struct net_device *ndev = dev_get_drvdata(dev);
|
struct net_device *ndev = dev_get_drvdata(dev);
|
||||||
struct xcan_priv *priv = netdev_priv(ndev);
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
|
|
||||||
if (netif_running(ndev)) {
|
|
||||||
netif_stop_queue(ndev);
|
|
||||||
netif_device_detach(ndev);
|
|
||||||
}
|
|
||||||
|
|
||||||
priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
|
|
||||||
priv->can.state = CAN_STATE_SLEEPING;
|
|
||||||
|
|
||||||
clk_disable_unprepare(priv->bus_clk);
|
clk_disable_unprepare(priv->bus_clk);
|
||||||
clk_disable_unprepare(priv->can_clk);
|
clk_disable_unprepare(priv->can_clk);
|
||||||
|
|
||||||
@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
|
|||||||
struct net_device *ndev = dev_get_drvdata(dev);
|
struct net_device *ndev = dev_get_drvdata(dev);
|
||||||
struct xcan_priv *priv = netdev_priv(ndev);
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
int ret;
|
int ret;
|
||||||
u32 isr, status;
|
|
||||||
|
|
||||||
ret = clk_prepare_enable(priv->bus_clk);
|
ret = clk_prepare_enable(priv->bus_clk);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
|
|
||||||
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
|
|
||||||
status = priv->read_reg(priv, XCAN_SR_OFFSET);
|
|
||||||
|
|
||||||
if (netif_running(ndev)) {
|
|
||||||
if (isr & XCAN_IXR_BSOFF_MASK) {
|
|
||||||
priv->can.state = CAN_STATE_BUS_OFF;
|
|
||||||
priv->write_reg(priv, XCAN_SRR_OFFSET,
|
|
||||||
XCAN_SRR_RESET_MASK);
|
|
||||||
} else if ((status & XCAN_SR_ESTAT_MASK) ==
|
|
||||||
XCAN_SR_ESTAT_MASK) {
|
|
||||||
priv->can.state = CAN_STATE_ERROR_PASSIVE;
|
|
||||||
} else if (status & XCAN_SR_ERRWRN_MASK) {
|
|
||||||
priv->can.state = CAN_STATE_ERROR_WARNING;
|
|
||||||
} else {
|
|
||||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
|
||||||
}
|
|
||||||
netif_device_attach(ndev);
|
|
||||||
netif_start_queue(ndev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
|
|||||||
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct xcan_devtype_data xcan_zynq_data = {
|
||||||
|
.caps = XCAN_CAP_WATERMARK,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Match table for OF platform binding */
|
||||||
|
static const struct of_device_id xcan_of_match[] = {
|
||||||
|
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
|
||||||
|
{ .compatible = "xlnx,axi-can-1.00.a", },
|
||||||
|
{ /* end of list */ },
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, xcan_of_match);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xcan_probe - Platform registration call
|
* xcan_probe - Platform registration call
|
||||||
* @pdev: Handle to the platform device structure
|
* @pdev: Handle to the platform device structure
|
||||||
@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
|
|||||||
struct resource *res; /* IO mem resources */
|
struct resource *res; /* IO mem resources */
|
||||||
struct net_device *ndev;
|
struct net_device *ndev;
|
||||||
struct xcan_priv *priv;
|
struct xcan_priv *priv;
|
||||||
|
const struct of_device_id *of_id;
|
||||||
|
int caps = 0;
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
int ret, rx_max, tx_max;
|
int ret, rx_max, tx_max, tx_fifo_depth;
|
||||||
|
|
||||||
/* Get the virtual base address for the device */
|
/* Get the virtual base address for the device */
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
|
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
|
||||||
|
&tx_fifo_depth);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
of_id = of_match_device(xcan_of_match, &pdev->dev);
|
||||||
|
if (of_id) {
|
||||||
|
const struct xcan_devtype_data *devtype_data = of_id->data;
|
||||||
|
|
||||||
|
if (devtype_data)
|
||||||
|
caps = devtype_data->caps;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* There is no way to directly figure out how many frames have been
|
||||||
|
* sent when the TXOK interrupt is processed. If watermark programming
|
||||||
|
* is supported, we can have 2 frames in the FIFO and use TXFEMP
|
||||||
|
* to determine if 1 or 2 frames have been sent.
|
||||||
|
* Theoretically we should be able to use TXFWMEMP to determine up
|
||||||
|
* to 3 frames, but it seems that after putting a second frame in the
|
||||||
|
* FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
|
||||||
|
* than 2 frames in FIFO) is set anyway with no TXOK (a frame was
|
||||||
|
* sent), which is not a sensible state - possibly TXFWMEMP is not
|
||||||
|
* completely synchronized with the rest of the bits?
|
||||||
|
*/
|
||||||
|
if (caps & XCAN_CAP_WATERMARK)
|
||||||
|
tx_max = min(tx_fifo_depth, 2);
|
||||||
|
else
|
||||||
|
tx_max = 1;
|
||||||
|
|
||||||
/* Create a CAN device instance */
|
/* Create a CAN device instance */
|
||||||
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
|
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
|
||||||
if (!ndev)
|
if (!ndev)
|
||||||
@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
|
|||||||
CAN_CTRLMODE_BERR_REPORTING;
|
CAN_CTRLMODE_BERR_REPORTING;
|
||||||
priv->reg_base = addr;
|
priv->reg_base = addr;
|
||||||
priv->tx_max = tx_max;
|
priv->tx_max = tx_max;
|
||||||
|
spin_lock_init(&priv->tx_lock);
|
||||||
|
|
||||||
/* Get IRQ for the device */
|
/* Get IRQ for the device */
|
||||||
ndev->irq = platform_get_irq(pdev, 0);
|
ndev->irq = platform_get_irq(pdev, 0);
|
||||||
@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
pm_runtime_put(&pdev->dev);
|
pm_runtime_put(&pdev->dev);
|
||||||
|
|
||||||
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
|
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
|
||||||
priv->reg_base, ndev->irq, priv->can.clock.freq,
|
priv->reg_base, ndev->irq, priv->can.clock.freq,
|
||||||
priv->tx_max);
|
tx_fifo_depth, priv->tx_max);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Match table for OF platform binding */
|
|
||||||
static const struct of_device_id xcan_of_match[] = {
|
|
||||||
{ .compatible = "xlnx,zynq-can-1.0", },
|
|
||||||
{ .compatible = "xlnx,axi-can-1.00.a", },
|
|
||||||
{ /* end of list */ },
|
|
||||||
};
|
|
||||||
MODULE_DEVICE_TABLE(of, xcan_of_match);
|
|
||||||
|
|
||||||
static struct platform_driver xcan_driver = {
|
static struct platform_driver xcan_driver = {
|
||||||
.probe = xcan_probe,
|
.probe = xcan_probe,
|
||||||
.remove = xcan_remove,
|
.remove = xcan_remove,
|
||||||
|
@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
|
|||||||
.xlate = irq_domain_xlate_twocell,
|
.xlate = irq_domain_xlate_twocell,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* To be called with reg_lock held */
|
||||||
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
|
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
|
||||||
{
|
{
|
||||||
int irq, virq;
|
int irq, virq;
|
||||||
@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
|
|||||||
|
|
||||||
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
|
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
|
||||||
{
|
{
|
||||||
mv88e6xxx_g1_irq_free_common(chip);
|
/*
|
||||||
|
* free_irq must be called without reg_lock taken because the irq
|
||||||
|
* handler takes this lock, too.
|
||||||
|
*/
|
||||||
free_irq(chip->irq, chip);
|
free_irq(chip->irq, chip);
|
||||||
|
|
||||||
|
mutex_lock(&chip->reg_lock);
|
||||||
|
mv88e6xxx_g1_irq_free_common(chip);
|
||||||
|
mutex_unlock(&chip->reg_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
|
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
|
||||||
@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
|
|||||||
|
|
||||||
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
|
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
|
||||||
{
|
{
|
||||||
mv88e6xxx_g1_irq_free_common(chip);
|
|
||||||
|
|
||||||
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
|
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
|
||||||
kthread_destroy_worker(chip->kworker);
|
kthread_destroy_worker(chip->kworker);
|
||||||
|
|
||||||
|
mutex_lock(&chip->reg_lock);
|
||||||
|
mv88e6xxx_g1_irq_free_common(chip);
|
||||||
|
mutex_unlock(&chip->reg_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
|
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
|
||||||
@ -4523,12 +4532,10 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
|
|||||||
if (chip->info->g2_irqs > 0)
|
if (chip->info->g2_irqs > 0)
|
||||||
mv88e6xxx_g2_irq_free(chip);
|
mv88e6xxx_g2_irq_free(chip);
|
||||||
out_g1_irq:
|
out_g1_irq:
|
||||||
mutex_lock(&chip->reg_lock);
|
|
||||||
if (chip->irq > 0)
|
if (chip->irq > 0)
|
||||||
mv88e6xxx_g1_irq_free(chip);
|
mv88e6xxx_g1_irq_free(chip);
|
||||||
else
|
else
|
||||||
mv88e6xxx_irq_poll_free(chip);
|
mv88e6xxx_irq_poll_free(chip);
|
||||||
mutex_unlock(&chip->reg_lock);
|
|
||||||
out:
|
out:
|
||||||
if (pdata)
|
if (pdata)
|
||||||
dev_put(pdata->netdev);
|
dev_put(pdata->netdev);
|
||||||
@ -4556,12 +4563,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
|
|||||||
if (chip->info->g2_irqs > 0)
|
if (chip->info->g2_irqs > 0)
|
||||||
mv88e6xxx_g2_irq_free(chip);
|
mv88e6xxx_g2_irq_free(chip);
|
||||||
|
|
||||||
mutex_lock(&chip->reg_lock);
|
|
||||||
if (chip->irq > 0)
|
if (chip->irq > 0)
|
||||||
mv88e6xxx_g1_irq_free(chip);
|
mv88e6xxx_g1_irq_free(chip);
|
||||||
else
|
else
|
||||||
mv88e6xxx_irq_poll_free(chip);
|
mv88e6xxx_irq_poll_free(chip);
|
||||||
mutex_unlock(&chip->reg_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id mv88e6xxx_of_match[] = {
|
static const struct of_device_id mv88e6xxx_of_match[] = {
|
||||||
|
@ -32,7 +32,7 @@ config EL3
|
|||||||
|
|
||||||
config 3C515
|
config 3C515
|
||||||
tristate "3c515 ISA \"Fast EtherLink\""
|
tristate "3c515 ISA \"Fast EtherLink\""
|
||||||
depends on ISA && ISA_DMA_API
|
depends on ISA && ISA_DMA_API && !PPC32
|
||||||
---help---
|
---help---
|
||||||
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
|
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
|
||||||
network card, say Y here.
|
network card, say Y here.
|
||||||
|
@ -44,7 +44,7 @@ config AMD8111_ETH
|
|||||||
|
|
||||||
config LANCE
|
config LANCE
|
||||||
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
|
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
|
||||||
depends on ISA && ISA_DMA_API && !ARM
|
depends on ISA && ISA_DMA_API && !ARM && !PPC32
|
||||||
---help---
|
---help---
|
||||||
If you have a network (Ethernet) card of this type, say Y here.
|
If you have a network (Ethernet) card of this type, say Y here.
|
||||||
Some LinkSys cards are of this type.
|
Some LinkSys cards are of this type.
|
||||||
@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
|
|||||||
|
|
||||||
config NI65
|
config NI65
|
||||||
tristate "NI6510 support"
|
tristate "NI6510 support"
|
||||||
depends on ISA && ISA_DMA_API && !ARM
|
depends on ISA && ISA_DMA_API && !ARM && !PPC32
|
||||||
---help---
|
---help---
|
||||||
If you have a network (Ethernet) card of this type, say Y here.
|
If you have a network (Ethernet) card of this type, say Y here.
|
||||||
|
|
||||||
|
@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
|
|||||||
skb = build_skb(page_address(page) + adapter->rx_page_offset,
|
skb = build_skb(page_address(page) + adapter->rx_page_offset,
|
||||||
adapter->rx_frag_size);
|
adapter->rx_frag_size);
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
|
skb_reserve(skb, NET_SKB_PAD);
|
||||||
adapter->rx_page_offset += adapter->rx_frag_size;
|
adapter->rx_page_offset += adapter->rx_frag_size;
|
||||||
if (adapter->rx_page_offset >= PAGE_SIZE)
|
if (adapter->rx_page_offset >= PAGE_SIZE)
|
||||||
adapter->rx_page = NULL;
|
adapter->rx_page = NULL;
|
||||||
|
@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
|
|||||||
DP(BNX2X_MSG_ETHTOOL,
|
DP(BNX2X_MSG_ETHTOOL,
|
||||||
"rss re-configured, UDP 4-tupple %s\n",
|
"rss re-configured, UDP 4-tupple %s\n",
|
||||||
udp_rss_requested ? "enabled" : "disabled");
|
udp_rss_requested ? "enabled" : "disabled");
|
||||||
return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
|
if (bp->state == BNX2X_STATE_OPEN)
|
||||||
|
return bnx2x_rss(bp, &bp->rss_conf_obj, false,
|
||||||
|
true);
|
||||||
} else if ((info->flow_type == UDP_V6_FLOW) &&
|
} else if ((info->flow_type == UDP_V6_FLOW) &&
|
||||||
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
|
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
|
||||||
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
|
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
|
||||||
DP(BNX2X_MSG_ETHTOOL,
|
DP(BNX2X_MSG_ETHTOOL,
|
||||||
"rss re-configured, UDP 4-tupple %s\n",
|
"rss re-configured, UDP 4-tupple %s\n",
|
||||||
udp_rss_requested ? "enabled" : "disabled");
|
udp_rss_requested ? "enabled" : "disabled");
|
||||||
return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
|
if (bp->state == BNX2X_STATE_OPEN)
|
||||||
|
return bnx2x_rss(bp, &bp->rss_conf_obj, false,
|
||||||
|
true);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||||||
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
|
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bp->state == BNX2X_STATE_OPEN)
|
||||||
return bnx2x_config_rss_eth(bp, false);
|
return bnx2x_config_rss_eth(bp, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
|
|||||||
config CS89x0
|
config CS89x0
|
||||||
tristate "CS89x0 support"
|
tristate "CS89x0 support"
|
||||||
depends on ISA || EISA || ARM
|
depends on ISA || EISA || ARM
|
||||||
|
depends on !PPC32
|
||||||
---help---
|
---help---
|
||||||
Support for CS89x0 chipset based Ethernet cards. If you have a
|
Support for CS89x0 chipset based Ethernet cards. If you have a
|
||||||
network (Ethernet) card of this type, say Y and read the file
|
network (Ethernet) card of this type, say Y and read the file
|
||||||
|
@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
txq->txq_stats.tx_busy++;
|
txq->txq_stats.tx_busy++;
|
||||||
u64_stats_update_end(&txq->txq_stats.syncp);
|
u64_stats_update_end(&txq->txq_stats.syncp);
|
||||||
err = NETDEV_TX_BUSY;
|
err = NETDEV_TX_BUSY;
|
||||||
|
wqe_size = 0;
|
||||||
goto flush_skbs;
|
goto flush_skbs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
|
|||||||
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
|
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
|
||||||
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
|
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
|
||||||
struct res_srq *srq;
|
struct res_srq *srq;
|
||||||
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
|
int local_qpn = vhcr->in_modifier & 0xffffff;
|
||||||
|
|
||||||
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
|
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
buf->size = size;
|
buf->size = size;
|
||||||
buf->npages = 1 << get_order(size);
|
buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||||
buf->page_shift = PAGE_SHIFT;
|
buf->page_shift = PAGE_SHIFT;
|
||||||
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
|
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
|
|||||||
HLIST_HEAD(del_list);
|
HLIST_HEAD(del_list);
|
||||||
spin_lock_bh(&priv->fs.arfs.arfs_lock);
|
spin_lock_bh(&priv->fs.arfs.arfs_lock);
|
||||||
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
|
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
|
||||||
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
|
||||||
break;
|
|
||||||
if (!work_pending(&arfs_rule->arfs_work) &&
|
if (!work_pending(&arfs_rule->arfs_work) &&
|
||||||
rps_may_expire_flow(priv->netdev,
|
rps_may_expire_flow(priv->netdev,
|
||||||
arfs_rule->rxq, arfs_rule->flow_id,
|
arfs_rule->rxq, arfs_rule->flow_id,
|
||||||
arfs_rule->filter_id)) {
|
arfs_rule->filter_id)) {
|
||||||
hlist_del_init(&arfs_rule->hlist);
|
hlist_del_init(&arfs_rule->hlist);
|
||||||
hlist_add_head(&arfs_rule->hlist, &del_list);
|
hlist_add_head(&arfs_rule->hlist, &del_list);
|
||||||
|
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
|
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
|
||||||
@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||||||
skb->protocol != htons(ETH_P_IPV6))
|
skb->protocol != htons(ETH_P_IPV6))
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
|
if (skb->encapsulation)
|
||||||
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
||||||
if (!arfs_t)
|
if (!arfs_t)
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
|
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
|
||||||
struct ieee_ets *ets)
|
struct ieee_ets *ets,
|
||||||
|
bool zero_sum_allowed)
|
||||||
{
|
{
|
||||||
bool have_ets_tc = false;
|
bool have_ets_tc = false;
|
||||||
int bw_sum = 0;
|
int bw_sum = 0;
|
||||||
@ -300,6 +301,7 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (have_ets_tc && bw_sum != 100) {
|
if (have_ets_tc && bw_sum != 100) {
|
||||||
|
if (bw_sum || (!bw_sum && !zero_sum_allowed))
|
||||||
netdev_err(netdev,
|
netdev_err(netdev,
|
||||||
"Failed to validate ETS: BW sum is illegal\n");
|
"Failed to validate ETS: BW sum is illegal\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
|
|||||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
err = mlx5e_dbcnl_validate_ets(netdev, ets);
|
err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
|
|||||||
ets.prio_tc[i]);
|
ets.prio_tc[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
|
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
|
||||||
if (err) {
|
if (err)
|
||||||
netdev_err(netdev,
|
|
||||||
"%s, Failed to validate ETS: %d\n", __func__, err);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
|
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -2004,6 +2004,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
|
|||||||
else
|
else
|
||||||
actions = flow->nic_attr->action;
|
actions = flow->nic_attr->action;
|
||||||
|
|
||||||
|
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
|
||||||
|
!(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||||
return modify_header_match_supported(&parse_attr->spec, exts);
|
return modify_header_match_supported(&parse_attr->spec, exts);
|
||||||
|
|
||||||
|
@ -2216,6 +2216,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
return esw->mode;
|
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
||||||
|
@ -1889,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||||||
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
|
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
|
||||||
if (!fwd_next_prio_supported(ft))
|
if (!fwd_next_prio_supported(ft))
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
if (dest)
|
if (dest_num)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
mutex_lock(&root->chain_lock);
|
mutex_lock(&root->chain_lock);
|
||||||
next_ft = find_next_chained_ft(prio);
|
next_ft = find_next_chained_ft(prio);
|
||||||
|
@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
|
|||||||
void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
||||||
{
|
{
|
||||||
struct mlx5_clock *clock = &mdev->clock;
|
struct mlx5_clock *clock = &mdev->clock;
|
||||||
|
u64 overflow_cycles;
|
||||||
u64 ns;
|
u64 ns;
|
||||||
u64 frac = 0;
|
u64 frac = 0;
|
||||||
u32 dev_freq;
|
u32 dev_freq;
|
||||||
@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
|||||||
|
|
||||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||||
* sure counter is checked at least once every wrap around.
|
* sure counter is checked at least once every wrap around.
|
||||||
|
* The period is calculated as the minimum between max HW cycles count
|
||||||
|
* (The clock source mask) and max amount of cycles that can be
|
||||||
|
* multiplied by clock multiplier where the result doesn't exceed
|
||||||
|
* 64bits.
|
||||||
*/
|
*/
|
||||||
ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
|
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
|
||||||
|
overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
|
||||||
|
|
||||||
|
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
|
||||||
frac, &frac);
|
frac, &frac);
|
||||||
do_div(ns, NSEC_PER_SEC / 2 / HZ);
|
do_div(ns, NSEC_PER_SEC / HZ);
|
||||||
clock->overflow_period = ns;
|
clock->overflow_period = ns;
|
||||||
|
|
||||||
mdev->clock_info_page = alloc_page(GFP_KERNEL);
|
mdev->clock_info_page = alloc_page(GFP_KERNEL);
|
||||||
|
@ -113,34 +113,44 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
||||||
struct mlx5_wq_qp *qp)
|
struct mlx5_wq_qp *qp)
|
||||||
{
|
{
|
||||||
|
struct mlx5_frag_buf_ctrl *sq_fbc;
|
||||||
struct mlx5_frag_buf *rqb, *sqb;
|
struct mlx5_frag_buf *rqb, *sqb;
|
||||||
|
|
||||||
rqb = &qp->rq.fbc.frag_buf;
|
rqb = &qp->rq.fbc.frag_buf;
|
||||||
*rqb = *buf;
|
*rqb = *buf;
|
||||||
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||||
rqb->npages = 1 << get_order(rqb->size);
|
rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
|
||||||
|
|
||||||
sqb = &qp->sq.fbc.frag_buf;
|
sq_fbc = &qp->sq.fbc;
|
||||||
|
sqb = &sq_fbc->frag_buf;
|
||||||
*sqb = *buf;
|
*sqb = *buf;
|
||||||
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
|
||||||
sqb->npages = 1 << get_order(sqb->size);
|
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
|
||||||
sqb->frags += rqb->npages; /* first part is for the rq */
|
sqb->frags += rqb->npages; /* first part is for the rq */
|
||||||
|
if (sq_fbc->strides_offset)
|
||||||
|
sqb->frags--;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||||
void *qpc, struct mlx5_wq_qp *wq,
|
void *qpc, struct mlx5_wq_qp *wq,
|
||||||
struct mlx5_wq_ctrl *wq_ctrl)
|
struct mlx5_wq_ctrl *wq_ctrl)
|
||||||
{
|
{
|
||||||
|
u32 sq_strides_offset;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
|
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
|
||||||
MLX5_GET(qpc, qpc, log_rq_size),
|
MLX5_GET(qpc, qpc, log_rq_size),
|
||||||
&wq->rq.fbc);
|
&wq->rq.fbc);
|
||||||
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
|
|
||||||
|
sq_strides_offset =
|
||||||
|
((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
|
||||||
|
|
||||||
|
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
|
||||||
MLX5_GET(qpc, qpc, log_sq_size),
|
MLX5_GET(qpc, qpc, log_sq_size),
|
||||||
|
sq_strides_offset,
|
||||||
&wq->sq.fbc);
|
&wq->sq.fbc);
|
||||||
|
|
||||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||||
@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||||||
goto err_db_free;
|
goto err_db_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
||||||
|
|
||||||
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
|
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
|
||||||
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
|
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
|
||||||
|
@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
|
|||||||
payload.dst_ipv4 = flow->daddr;
|
payload.dst_ipv4 = flow->daddr;
|
||||||
|
|
||||||
/* If entry has expired send dst IP with all other fields 0. */
|
/* If entry has expired send dst IP with all other fields 0. */
|
||||||
if (!(neigh->nud_state & NUD_VALID)) {
|
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
|
||||||
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
|
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
|
||||||
/* Trigger ARP to verify invalid neighbour state. */
|
/* Trigger ARP to verify invalid neighbour state. */
|
||||||
neigh_event_send(neigh, NULL);
|
neigh_event_send(neigh, NULL);
|
||||||
|
@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
p_ramrod->common.update_approx_mcast_flg = 1;
|
p_ramrod->common.update_approx_mcast_flg = 1;
|
||||||
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
|
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
|
||||||
u32 *p_bins = (u32 *)p_params->bins;
|
u32 *p_bins = p_params->bins;
|
||||||
|
|
||||||
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
|
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
|
||||||
}
|
}
|
||||||
@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
|
|||||||
enum spq_mode comp_mode,
|
enum spq_mode comp_mode,
|
||||||
struct qed_spq_comp_cb *p_comp_data)
|
struct qed_spq_comp_cb *p_comp_data)
|
||||||
{
|
{
|
||||||
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
|
|
||||||
struct vport_update_ramrod_data *p_ramrod = NULL;
|
struct vport_update_ramrod_data *p_ramrod = NULL;
|
||||||
|
u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
|
||||||
struct qed_spq_entry *p_ent = NULL;
|
struct qed_spq_entry *p_ent = NULL;
|
||||||
struct qed_sp_init_data init_data;
|
struct qed_sp_init_data init_data;
|
||||||
u8 abs_vport_id = 0;
|
u8 abs_vport_id = 0;
|
||||||
@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
|
|||||||
/* explicitly clear out the entire vector */
|
/* explicitly clear out the entire vector */
|
||||||
memset(&p_ramrod->approx_mcast.bins, 0,
|
memset(&p_ramrod->approx_mcast.bins, 0,
|
||||||
sizeof(p_ramrod->approx_mcast.bins));
|
sizeof(p_ramrod->approx_mcast.bins));
|
||||||
memset(bins, 0, sizeof(unsigned long) *
|
memset(bins, 0, sizeof(bins));
|
||||||
ETH_MULTICAST_MAC_BINS_IN_REGS);
|
|
||||||
/* filter ADD op is explicit set op and it removes
|
/* filter ADD op is explicit set op and it removes
|
||||||
* any existing filters for the vport
|
* any existing filters for the vport
|
||||||
*/
|
*/
|
||||||
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
|
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
|
||||||
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
|
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
|
||||||
u32 bit;
|
u32 bit, nbits;
|
||||||
|
|
||||||
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
|
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
|
||||||
__set_bit(bit, bins);
|
nbits = sizeof(u32) * BITS_PER_BYTE;
|
||||||
|
bins[bit / nbits] |= 1 << (bit % nbits);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Convert to correct endianity */
|
/* Convert to correct endianity */
|
||||||
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
|
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
|
||||||
struct vport_update_ramrod_mcast *p_ramrod_bins;
|
struct vport_update_ramrod_mcast *p_ramrod_bins;
|
||||||
u32 *p_bins = (u32 *)bins;
|
|
||||||
|
|
||||||
p_ramrod_bins = &p_ramrod->approx_mcast;
|
p_ramrod_bins = &p_ramrod->approx_mcast;
|
||||||
p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
|
p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
|
|||||||
u8 anti_spoofing_en;
|
u8 anti_spoofing_en;
|
||||||
u8 update_accept_any_vlan_flg;
|
u8 update_accept_any_vlan_flg;
|
||||||
u8 accept_any_vlan;
|
u8 accept_any_vlan;
|
||||||
unsigned long bins[8];
|
u32 bins[8];
|
||||||
struct qed_rss_params *rss_params;
|
struct qed_rss_params *rss_params;
|
||||||
struct qed_filter_accept_flags accept_flags;
|
struct qed_filter_accept_flags accept_flags;
|
||||||
struct qed_sge_tpa_params *sge_tpa_params;
|
struct qed_sge_tpa_params *sge_tpa_params;
|
||||||
|
@ -1211,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
p_link->speed = 0;
|
p_link->speed = 0;
|
||||||
|
p_link->link_up = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p_link->link_up && p_link->speed)
|
if (p_link->link_up && p_link->speed)
|
||||||
@ -1308,8 +1309,14 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
|
|||||||
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
|
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
|
||||||
phy_cfg.adv_speed = params->speed.advertised_speeds;
|
phy_cfg.adv_speed = params->speed.advertised_speeds;
|
||||||
phy_cfg.loopback_mode = params->loopback_mode;
|
phy_cfg.loopback_mode = params->loopback_mode;
|
||||||
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
|
|
||||||
if (params->eee.enable)
|
/* There are MFWs that share this capability regardless of whether
|
||||||
|
* this is feasible or not. And given that at the very least adv_caps
|
||||||
|
* would be set internally by qed, we want to make sure LFA would
|
||||||
|
* still work.
|
||||||
|
*/
|
||||||
|
if ((p_hwfn->mcp_info->capabilities &
|
||||||
|
FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
|
||||||
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
|
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
|
||||||
if (params->eee.tx_lpi_enable)
|
if (params->eee.tx_lpi_enable)
|
||||||
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
|
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
|
||||||
|
@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
p_data->update_approx_mcast_flg = 1;
|
p_data->update_approx_mcast_flg = 1;
|
||||||
memcpy(p_data->bins, p_mcast_tlv->bins,
|
memcpy(p_data->bins, p_mcast_tlv->bins,
|
||||||
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
|
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
|
||||||
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
|
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
|
|||||||
resp_size += sizeof(struct pfvf_def_resp_tlv);
|
resp_size += sizeof(struct pfvf_def_resp_tlv);
|
||||||
|
|
||||||
memcpy(p_mcast_tlv->bins, p_params->bins,
|
memcpy(p_mcast_tlv->bins, p_params->bins,
|
||||||
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
|
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
update_rx = p_params->accept_flags.update_rx_mode_config;
|
update_rx = p_params->accept_flags.update_rx_mode_config;
|
||||||
@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
|
|||||||
u32 bit;
|
u32 bit;
|
||||||
|
|
||||||
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
|
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
|
||||||
__set_bit(bit, sp_params.bins);
|
sp_params.bins[bit / 32] |= 1 << (bit % 32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
|
|||||||
struct channel_tlv tl;
|
struct channel_tlv tl;
|
||||||
u8 padding[4];
|
u8 padding[4];
|
||||||
|
|
||||||
u64 bins[8];
|
/* There are only 256 approx bins, and in HSI they're divided into
|
||||||
|
* 32-bit values. As old VFs used to set-bit to the values on its side,
|
||||||
|
* the upper half of the array is never expected to contain any data.
|
||||||
|
*/
|
||||||
|
u64 bins[4];
|
||||||
|
u64 obsolete_bins[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vfpf_vport_update_accept_param_tlv {
|
struct vfpf_vport_update_accept_param_tlv {
|
||||||
|
@ -7396,8 +7396,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* override BIOS settings, use userspace tools to enable WOL */
|
tp->saved_wolopts = __rtl8169_get_wol(tp);
|
||||||
__rtl8169_set_wol(tp, 0);
|
|
||||||
|
|
||||||
mutex_init(&tp->wk.mutex);
|
mutex_init(&tp->wk.mutex);
|
||||||
u64_stats_init(&tp->rx_stats.syncp);
|
u64_stats_init(&tp->rx_stats.syncp);
|
||||||
|
@ -218,6 +218,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
|
|||||||
ret = of_mdiobus_register(bus, np1);
|
ret = of_mdiobus_register(bus, np1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mdiobus_free(bus);
|
mdiobus_free(bus);
|
||||||
|
lp->mii_bus = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -519,7 +519,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
|
|||||||
* negotiation may already be done and aneg interrupt may not be
|
* negotiation may already be done and aneg interrupt may not be
|
||||||
* generated.
|
* generated.
|
||||||
*/
|
*/
|
||||||
if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
|
if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
|
||||||
err = phy_aneg_done(phydev);
|
err = phy_aneg_done(phydev);
|
||||||
if (err > 0) {
|
if (err > 0) {
|
||||||
trigger = true;
|
trigger = true;
|
||||||
|
@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
||||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
|
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
|
||||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||||
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
||||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||||
|
@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
|
|||||||
static void nvme_ns_remove(struct nvme_ns *ns);
|
static void nvme_ns_remove(struct nvme_ns *ns);
|
||||||
static int nvme_revalidate_disk(struct gendisk *disk);
|
static int nvme_revalidate_disk(struct gendisk *disk);
|
||||||
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
|
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
|
||||||
|
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
||||||
|
unsigned nsid);
|
||||||
|
|
||||||
|
static void nvme_set_queue_dying(struct nvme_ns *ns)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Revalidating a dead namespace sets capacity to 0. This will end
|
||||||
|
* buffered writers dirtying pages that can't be synced.
|
||||||
|
*/
|
||||||
|
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
||||||
|
return;
|
||||||
|
revalidate_disk(ns->disk);
|
||||||
|
blk_set_queue_dying(ns->queue);
|
||||||
|
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
||||||
|
blk_mq_unquiesce_queue(ns->queue);
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
|
|||||||
|
|
||||||
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
|
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
u32 result;
|
u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
|
if (!supported_aens)
|
||||||
ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
|
return;
|
||||||
|
|
||||||
|
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
|
||||||
|
NULL, 0, &result);
|
||||||
if (status)
|
if (status)
|
||||||
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
|
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
|
||||||
ctrl->oaes & NVME_AEN_SUPPORTED);
|
supported_aens);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||||
@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
|
|
||||||
static void nvme_update_formats(struct nvme_ctrl *ctrl)
|
static void nvme_update_formats(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns, *next;
|
struct nvme_ns *ns;
|
||||||
LIST_HEAD(rm_list);
|
|
||||||
|
|
||||||
down_write(&ctrl->namespaces_rwsem);
|
down_read(&ctrl->namespaces_rwsem);
|
||||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||||
if (ns->disk && nvme_revalidate_disk(ns->disk)) {
|
if (ns->disk && nvme_revalidate_disk(ns->disk))
|
||||||
list_move_tail(&ns->list, &rm_list);
|
nvme_set_queue_dying(ns);
|
||||||
}
|
up_read(&ctrl->namespaces_rwsem);
|
||||||
}
|
|
||||||
up_write(&ctrl->namespaces_rwsem);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(ns, next, &rm_list, list)
|
nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
|
||||||
nvme_ns_remove(ns);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
|
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
|
||||||
@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
|
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
|
||||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||||
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
||||||
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
|
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
|
||||||
0, &cmd.result, timeout);
|
0, &cmd.result, timeout);
|
||||||
nvme_passthru_end(ctrl, effects);
|
nvme_passthru_end(ctrl, effects);
|
||||||
|
|
||||||
@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
|||||||
|
|
||||||
down_write(&ctrl->namespaces_rwsem);
|
down_write(&ctrl->namespaces_rwsem);
|
||||||
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
|
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
|
||||||
if (ns->head->ns_id > nsid)
|
if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
|
||||||
list_move_tail(&ns->list, &rm_list);
|
list_move_tail(&ns->list, &rm_list);
|
||||||
}
|
}
|
||||||
up_write(&ctrl->namespaces_rwsem);
|
up_write(&ctrl->namespaces_rwsem);
|
||||||
@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
|||||||
if (ctrl->admin_q)
|
if (ctrl->admin_q)
|
||||||
blk_mq_unquiesce_queue(ctrl->admin_q);
|
blk_mq_unquiesce_queue(ctrl->admin_q);
|
||||||
|
|
||||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||||
/*
|
nvme_set_queue_dying(ns);
|
||||||
* Revalidating a dead namespace sets capacity to 0. This will
|
|
||||||
* end buffered writers dirtying pages that can't be synced.
|
|
||||||
*/
|
|
||||||
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
|
||||||
continue;
|
|
||||||
revalidate_disk(ns->disk);
|
|
||||||
blk_set_queue_dying(ns->queue);
|
|
||||||
|
|
||||||
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
|
||||||
blk_mq_unquiesce_queue(ns->queue);
|
|
||||||
}
|
|
||||||
up_read(&ctrl->namespaces_rwsem);
|
up_read(&ctrl->namespaces_rwsem);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|
||||||
|
@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
|
|
||||||
quirks |= check_vendor_combination_bug(pdev);
|
quirks |= check_vendor_combination_bug(pdev);
|
||||||
|
|
||||||
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
|
|
||||||
quirks);
|
|
||||||
if (result)
|
|
||||||
goto release_pools;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Double check that our mempool alloc size will cover the biggest
|
* Double check that our mempool alloc size will cover the biggest
|
||||||
* command we support.
|
* command we support.
|
||||||
@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
goto release_pools;
|
goto release_pools;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
|
||||||
|
quirks);
|
||||||
|
if (result)
|
||||||
|
goto release_mempool;
|
||||||
|
|
||||||
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
||||||
|
|
||||||
nvme_get_ctrl(&dev->ctrl);
|
nvme_get_ctrl(&dev->ctrl);
|
||||||
@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
release_mempool:
|
||||||
|
mempool_destroy(dev->iod_mempool);
|
||||||
release_pools:
|
release_pools:
|
||||||
nvme_release_prp_pools(dev);
|
nvme_release_prp_pools(dev);
|
||||||
unmap:
|
unmap:
|
||||||
|
@ -134,15 +134,14 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
|
|||||||
rc = PTR_ERR(file);
|
rc = PTR_ERR(file);
|
||||||
dev_err(dev, "%s: alloc_file failed rc=%d\n",
|
dev_err(dev, "%s: alloc_file failed rc=%d\n",
|
||||||
__func__, rc);
|
__func__, rc);
|
||||||
goto err5;
|
path_put(&path);
|
||||||
|
goto err3;
|
||||||
}
|
}
|
||||||
|
|
||||||
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
|
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
|
||||||
file->private_data = priv;
|
file->private_data = priv;
|
||||||
out:
|
out:
|
||||||
return file;
|
return file;
|
||||||
err5:
|
|
||||||
path_put(&path);
|
|
||||||
err4:
|
err4:
|
||||||
iput(inode);
|
iput(inode);
|
||||||
err3:
|
err3:
|
||||||
|
@ -27,9 +27,16 @@
|
|||||||
#define GPC_PGC_SW2ISO_SHIFT 0x8
|
#define GPC_PGC_SW2ISO_SHIFT 0x8
|
||||||
#define GPC_PGC_SW_SHIFT 0x0
|
#define GPC_PGC_SW_SHIFT 0x0
|
||||||
|
|
||||||
|
#define GPC_PGC_PCI_PDN 0x200
|
||||||
|
#define GPC_PGC_PCI_SR 0x20c
|
||||||
|
|
||||||
#define GPC_PGC_GPU_PDN 0x260
|
#define GPC_PGC_GPU_PDN 0x260
|
||||||
#define GPC_PGC_GPU_PUPSCR 0x264
|
#define GPC_PGC_GPU_PUPSCR 0x264
|
||||||
#define GPC_PGC_GPU_PDNSCR 0x268
|
#define GPC_PGC_GPU_PDNSCR 0x268
|
||||||
|
#define GPC_PGC_GPU_SR 0x26c
|
||||||
|
|
||||||
|
#define GPC_PGC_DISP_PDN 0x240
|
||||||
|
#define GPC_PGC_DISP_SR 0x24c
|
||||||
|
|
||||||
#define GPU_VPU_PUP_REQ BIT(1)
|
#define GPU_VPU_PUP_REQ BIT(1)
|
||||||
#define GPU_VPU_PDN_REQ BIT(0)
|
#define GPU_VPU_PDN_REQ BIT(0)
|
||||||
@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct regmap_range yes_ranges[] = {
|
||||||
|
regmap_reg_range(GPC_CNTR, GPC_CNTR),
|
||||||
|
regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
|
||||||
|
regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
|
||||||
|
regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct regmap_access_table access_table = {
|
||||||
|
.yes_ranges = yes_ranges,
|
||||||
|
.n_yes_ranges = ARRAY_SIZE(yes_ranges),
|
||||||
|
};
|
||||||
|
|
||||||
static const struct regmap_config imx_gpc_regmap_config = {
|
static const struct regmap_config imx_gpc_regmap_config = {
|
||||||
.reg_bits = 32,
|
.reg_bits = 32,
|
||||||
.val_bits = 32,
|
.val_bits = 32,
|
||||||
.reg_stride = 4,
|
.reg_stride = 4,
|
||||||
|
.rd_table = &access_table,
|
||||||
|
.wr_table = &access_table,
|
||||||
.max_register = 0x2ac,
|
.max_register = 0x2ac,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
* (at your option) any later version.
|
* (at your option) any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <asm/cacheflush.h>
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
@ -24,6 +23,8 @@
|
|||||||
#include <media/v4l2-ioctl.h>
|
#include <media/v4l2-ioctl.h>
|
||||||
#include <media/v4l2-mc.h>
|
#include <media/v4l2-mc.h>
|
||||||
|
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
#include "iss_video.h"
|
#include "iss_video.h"
|
||||||
#include "iss.h"
|
#include "iss.h"
|
||||||
|
|
||||||
|
@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
|
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
|
||||||
unsigned long tce, unsigned long size,
|
unsigned long tce, unsigned long shift,
|
||||||
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
|
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
|
||||||
{
|
{
|
||||||
long ret = 0;
|
long ret = 0;
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
mem = mm_iommu_lookup(container->mm, tce, size);
|
mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
|
ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
|
|||||||
if (!pua)
|
if (!pua)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
|
ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
|
||||||
&hpa, &mem);
|
&hpa, &mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
|
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
|
||||||
@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
|
|||||||
entry + i);
|
entry + i);
|
||||||
|
|
||||||
ret = tce_iommu_prereg_ua_to_hpa(container,
|
ret = tce_iommu_prereg_ua_to_hpa(container,
|
||||||
tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
|
tce, tbl->it_page_shift, &hpa, &mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
5
fs/aio.c
5
fs/aio.c
@ -1896,6 +1896,11 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct __aio_sigset {
|
||||||
|
const sigset_t __user *sigmask;
|
||||||
|
size_t sigsetsize;
|
||||||
|
};
|
||||||
|
|
||||||
SYSCALL_DEFINE6(io_pgetevents,
|
SYSCALL_DEFINE6(io_pgetevents,
|
||||||
aio_context_t, ctx_id,
|
aio_context_t, ctx_id,
|
||||||
long, min_nr,
|
long, min_nr,
|
||||||
|
@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
|
|||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
u64 start = page_offset(page);
|
u64 start = page_offset(page);
|
||||||
u64 end = start + PAGE_SIZE - 1;
|
u64 end = start + PAGE_SIZE - 1;
|
||||||
struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
|
struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
|
||||||
struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree;
|
struct extent_io_tree *tree = &btrfs_inode->io_tree;
|
||||||
|
struct extent_map_tree *map = &btrfs_inode->extent_tree;
|
||||||
|
|
||||||
if (gfpflags_allow_blocking(mask) &&
|
if (gfpflags_allow_blocking(mask) &&
|
||||||
page->mapping->host->i_size > SZ_16M) {
|
page->mapping->host->i_size > SZ_16M) {
|
||||||
@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
|
|||||||
extent_map_end(em) - 1,
|
extent_map_end(em) - 1,
|
||||||
EXTENT_LOCKED | EXTENT_WRITEBACK,
|
EXTENT_LOCKED | EXTENT_WRITEBACK,
|
||||||
0, NULL)) {
|
0, NULL)) {
|
||||||
|
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||||
|
&btrfs_inode->runtime_flags);
|
||||||
remove_extent_mapping(map, em);
|
remove_extent_mapping(map, em);
|
||||||
/* once for the rb tree */
|
/* once for the rb tree */
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
|
@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|||||||
struct vm_area_struct *vma = NULL;
|
struct vm_area_struct *vma = NULL;
|
||||||
struct mm_struct *mm = bprm->mm;
|
struct mm_struct *mm = bprm->mm;
|
||||||
|
|
||||||
bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
bprm->vma = vma = vm_area_alloc(mm);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|||||||
err = -EINTR;
|
err = -EINTR;
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
vma->vm_mm = mm;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Place the stack at the largest stack address the architecture
|
* Place the stack at the largest stack address the architecture
|
||||||
@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|||||||
vma->vm_start = vma->vm_end - PAGE_SIZE;
|
vma->vm_start = vma->vm_end - PAGE_SIZE;
|
||||||
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
|
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
|
||||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
|
|
||||||
err = insert_vm_struct(mm, vma);
|
err = insert_vm_struct(mm, vma);
|
||||||
if (err)
|
if (err)
|
||||||
@ -326,7 +324,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
err_free:
|
err_free:
|
||||||
bprm->vma = NULL;
|
bprm->vma = NULL;
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb,
|
|||||||
brelse(bh);
|
brelse(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fat_reset_iocharset(struct fat_mount_options *opts)
|
||||||
|
{
|
||||||
|
if (opts->iocharset != fat_default_iocharset) {
|
||||||
|
/* Note: opts->iocharset can be NULL here */
|
||||||
|
kfree(opts->iocharset);
|
||||||
|
opts->iocharset = fat_default_iocharset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void delayed_free(struct rcu_head *p)
|
static void delayed_free(struct rcu_head *p)
|
||||||
{
|
{
|
||||||
struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
|
struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
|
||||||
unload_nls(sbi->nls_disk);
|
unload_nls(sbi->nls_disk);
|
||||||
unload_nls(sbi->nls_io);
|
unload_nls(sbi->nls_io);
|
||||||
if (sbi->options.iocharset != fat_default_iocharset)
|
fat_reset_iocharset(&sbi->options);
|
||||||
kfree(sbi->options.iocharset);
|
|
||||||
kfree(sbi);
|
kfree(sbi);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
|
|||||||
opts->fs_fmask = opts->fs_dmask = current_umask();
|
opts->fs_fmask = opts->fs_dmask = current_umask();
|
||||||
opts->allow_utime = -1;
|
opts->allow_utime = -1;
|
||||||
opts->codepage = fat_default_codepage;
|
opts->codepage = fat_default_codepage;
|
||||||
opts->iocharset = fat_default_iocharset;
|
fat_reset_iocharset(opts);
|
||||||
if (is_vfat) {
|
if (is_vfat) {
|
||||||
opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
|
opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
|
||||||
opts->rodir = 0;
|
opts->rodir = 0;
|
||||||
@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
|
|||||||
|
|
||||||
/* vfat specific */
|
/* vfat specific */
|
||||||
case Opt_charset:
|
case Opt_charset:
|
||||||
if (opts->iocharset != fat_default_iocharset)
|
fat_reset_iocharset(opts);
|
||||||
kfree(opts->iocharset);
|
|
||||||
iocharset = match_strdup(&args[0]);
|
iocharset = match_strdup(&args[0]);
|
||||||
if (!iocharset)
|
if (!iocharset)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1881,8 +1888,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
|
|||||||
iput(fat_inode);
|
iput(fat_inode);
|
||||||
unload_nls(sbi->nls_io);
|
unload_nls(sbi->nls_io);
|
||||||
unload_nls(sbi->nls_disk);
|
unload_nls(sbi->nls_disk);
|
||||||
if (sbi->options.iocharset != fat_default_iocharset)
|
fat_reset_iocharset(&sbi->options);
|
||||||
kfree(sbi->options.iocharset);
|
|
||||||
sb->s_fs_info = NULL;
|
sb->s_fs_info = NULL;
|
||||||
kfree(sbi);
|
kfree(sbi);
|
||||||
return error;
|
return error;
|
||||||
|
@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
|
|||||||
|
|
||||||
extern int open_check_o_direct(struct file *f);
|
extern int open_check_o_direct(struct file *f);
|
||||||
extern int vfs_open(const struct path *, struct file *, const struct cred *);
|
extern int vfs_open(const struct path *, struct file *, const struct cred *);
|
||||||
extern struct file *filp_clone_open(struct file *);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* inode.c
|
* inode.c
|
||||||
|
@ -5,10 +5,10 @@
|
|||||||
#include <uapi/linux/bpfilter.h>
|
#include <uapi/linux/bpfilter.h>
|
||||||
|
|
||||||
struct sock;
|
struct sock;
|
||||||
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
|
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||||
unsigned int optlen);
|
unsigned int optlen);
|
||||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
|
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||||
int *optlen);
|
int __user *optlen);
|
||||||
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
|
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
|
||||||
char __user *optval,
|
char __user *optval,
|
||||||
unsigned int optlen, bool is_set);
|
unsigned int optlen, bool is_set);
|
||||||
|
@ -2420,6 +2420,7 @@ extern struct file *filp_open(const char *, int, umode_t);
|
|||||||
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
|
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
|
||||||
const char *, int, umode_t);
|
const char *, int, umode_t);
|
||||||
extern struct file * dentry_open(const struct path *, int, const struct cred *);
|
extern struct file * dentry_open(const struct path *, int, const struct cred *);
|
||||||
|
extern struct file *filp_clone_open(struct file *);
|
||||||
extern int filp_close(struct file *, fl_owner_t id);
|
extern int filp_close(struct file *, fl_owner_t id);
|
||||||
|
|
||||||
extern struct filename *getname_flags(const char __user *, int, int *);
|
extern struct filename *getname_flags(const char __user *, int, int *);
|
||||||
|
@ -363,6 +363,7 @@ struct mlx5_frag_buf_ctrl {
|
|||||||
struct mlx5_frag_buf frag_buf;
|
struct mlx5_frag_buf frag_buf;
|
||||||
u32 sz_m1;
|
u32 sz_m1;
|
||||||
u32 frag_sz_m1;
|
u32 frag_sz_m1;
|
||||||
|
u32 strides_offset;
|
||||||
u8 log_sz;
|
u8 log_sz;
|
||||||
u8 log_stride;
|
u8 log_stride;
|
||||||
u8 log_frag_strides;
|
u8 log_frag_strides;
|
||||||
@ -991,7 +992,8 @@ static inline u32 mlx5_base_mkey(const u32 key)
|
|||||||
return key & 0xffffff00u;
|
return key & 0xffffff00u;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
|
||||||
|
u32 strides_offset,
|
||||||
struct mlx5_frag_buf_ctrl *fbc)
|
struct mlx5_frag_buf_ctrl *fbc)
|
||||||
{
|
{
|
||||||
fbc->log_stride = log_stride;
|
fbc->log_stride = log_stride;
|
||||||
@ -999,6 +1001,13 @@ static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
|||||||
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
||||||
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
||||||
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
||||||
|
fbc->strides_offset = strides_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||||
|
struct mlx5_frag_buf_ctrl *fbc)
|
||||||
|
{
|
||||||
|
mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||||
@ -1012,7 +1021,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
|||||||
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
||||||
u32 ix)
|
u32 ix)
|
||||||
{
|
{
|
||||||
unsigned int frag = (ix >> fbc->log_frag_strides);
|
unsigned int frag;
|
||||||
|
|
||||||
|
ix += fbc->strides_offset;
|
||||||
|
frag = ix >> fbc->log_frag_strides;
|
||||||
|
|
||||||
return fbc->frag_buf.frags[frag].buf +
|
return fbc->frag_buf.frags[frag].buf +
|
||||||
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
|
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
|
||||||
|
@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
|
|||||||
* mmap() functions).
|
* mmap() functions).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern struct kmem_cache *vm_area_cachep;
|
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
|
||||||
|
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
|
||||||
|
void vm_area_free(struct vm_area_struct *);
|
||||||
|
|
||||||
#ifndef CONFIG_MMU
|
#ifndef CONFIG_MMU
|
||||||
extern struct rb_root nommu_region_tree;
|
extern struct rb_root nommu_region_tree;
|
||||||
|
@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
|
|||||||
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
|
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
|
||||||
struct task_struct *fork_idle(int);
|
struct task_struct *fork_idle(int);
|
||||||
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
||||||
extern long kernel_wait4(pid_t, int *, int, struct rusage *);
|
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
|
||||||
|
|
||||||
extern void free_task(struct task_struct *tsk);
|
extern void free_task(struct task_struct *tsk);
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#ifndef _LINUX_SYSCALLS_H
|
#ifndef _LINUX_SYSCALLS_H
|
||||||
#define _LINUX_SYSCALLS_H
|
#define _LINUX_SYSCALLS_H
|
||||||
|
|
||||||
|
struct __aio_sigset;
|
||||||
struct epoll_event;
|
struct epoll_event;
|
||||||
struct iattr;
|
struct iattr;
|
||||||
struct inode;
|
struct inode;
|
||||||
|
@ -5937,10 +5937,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
|
|||||||
/**
|
/**
|
||||||
* cfg80211_rx_control_port - notification about a received control port frame
|
* cfg80211_rx_control_port - notification about a received control port frame
|
||||||
* @dev: The device the frame matched to
|
* @dev: The device the frame matched to
|
||||||
* @buf: control port frame
|
* @skb: The skbuf with the control port frame. It is assumed that the skbuf
|
||||||
* @len: length of the frame data
|
* is 802.3 formatted (with 802.3 header). The skb can be non-linear.
|
||||||
* @addr: The peer from which the frame was received
|
* This function does not take ownership of the skb, so the caller is
|
||||||
* @proto: frame protocol, typically PAE or Pre-authentication
|
* responsible for any cleanup. The caller must also ensure that
|
||||||
|
* skb->protocol is set appropriately.
|
||||||
* @unencrypted: Whether the frame was received unencrypted
|
* @unencrypted: Whether the frame was received unencrypted
|
||||||
*
|
*
|
||||||
* This function is used to inform userspace about a received control port
|
* This function is used to inform userspace about a received control port
|
||||||
@ -5953,8 +5954,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
|
|||||||
* Return: %true if the frame was passed to userspace
|
* Return: %true if the frame was passed to userspace
|
||||||
*/
|
*/
|
||||||
bool cfg80211_rx_control_port(struct net_device *dev,
|
bool cfg80211_rx_control_port(struct net_device *dev,
|
||||||
const u8 *buf, size_t len,
|
struct sk_buff *skb, bool unencrypted);
|
||||||
const u8 *addr, u16 proto, bool unencrypted);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
|
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
|
||||||
|
@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
|
|||||||
atomic_inc(&f6i->fib6_ref);
|
atomic_inc(&f6i->fib6_ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
|
||||||
|
{
|
||||||
|
return atomic_inc_not_zero(&f6i->fib6_ref);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void fib6_info_release(struct fib6_info *f6i)
|
static inline void fib6_info_release(struct fib6_info *f6i)
|
||||||
{
|
{
|
||||||
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
|
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
|
||||||
|
@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
|
|||||||
* @portid: netlink portID of the original message
|
* @portid: netlink portID of the original message
|
||||||
* @seq: netlink sequence number
|
* @seq: netlink sequence number
|
||||||
* @family: protocol family
|
* @family: protocol family
|
||||||
|
* @level: depth of the chains
|
||||||
* @report: notify via unicast netlink message
|
* @report: notify via unicast netlink message
|
||||||
*/
|
*/
|
||||||
struct nft_ctx {
|
struct nft_ctx {
|
||||||
@ -160,6 +161,7 @@ struct nft_ctx {
|
|||||||
u32 portid;
|
u32 portid;
|
||||||
u32 seq;
|
u32 seq;
|
||||||
u8 family;
|
u8 family;
|
||||||
|
u8 level;
|
||||||
bool report;
|
bool report;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -865,7 +867,6 @@ enum nft_chain_flags {
|
|||||||
* @table: table that this chain belongs to
|
* @table: table that this chain belongs to
|
||||||
* @handle: chain handle
|
* @handle: chain handle
|
||||||
* @use: number of jump references to this chain
|
* @use: number of jump references to this chain
|
||||||
* @level: length of longest path to this chain
|
|
||||||
* @flags: bitmask of enum nft_chain_flags
|
* @flags: bitmask of enum nft_chain_flags
|
||||||
* @name: name of the chain
|
* @name: name of the chain
|
||||||
*/
|
*/
|
||||||
@ -878,7 +879,6 @@ struct nft_chain {
|
|||||||
struct nft_table *table;
|
struct nft_table *table;
|
||||||
u64 handle;
|
u64 handle;
|
||||||
u32 use;
|
u32 use;
|
||||||
u16 level;
|
|
||||||
u8 flags:6,
|
u8 flags:6,
|
||||||
genmask:2;
|
genmask:2;
|
||||||
char *name;
|
char *name;
|
||||||
@ -1124,7 +1124,6 @@ struct nft_flowtable {
|
|||||||
u32 genmask:2,
|
u32 genmask:2,
|
||||||
use:30;
|
use:30;
|
||||||
u64 handle;
|
u64 handle;
|
||||||
char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
|
|
||||||
/* runtime data below here */
|
/* runtime data below here */
|
||||||
struct nf_hook_ops *ops ____cacheline_aligned;
|
struct nf_hook_ops *ops ____cacheline_aligned;
|
||||||
struct nf_flowtable data;
|
struct nf_flowtable data;
|
||||||
|
@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
|||||||
struct pipe_inode_info *pipe, size_t len,
|
struct pipe_inode_info *pipe, size_t len,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
|
|
||||||
|
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
|
||||||
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
||||||
const unsigned int pkts)
|
const unsigned int pkts)
|
||||||
{
|
{
|
||||||
@ -540,6 +541,7 @@ void tcp_send_fin(struct sock *sk);
|
|||||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
||||||
int tcp_send_synack(struct sock *);
|
int tcp_send_synack(struct sock *);
|
||||||
void tcp_push_one(struct sock *, unsigned int mss_now);
|
void tcp_push_one(struct sock *, unsigned int mss_now);
|
||||||
|
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
|
||||||
void tcp_send_ack(struct sock *sk);
|
void tcp_send_ack(struct sock *sk);
|
||||||
void tcp_send_delayed_ack(struct sock *sk);
|
void tcp_send_delayed_ack(struct sock *sk);
|
||||||
void tcp_send_loss_probe(struct sock *sk);
|
void tcp_send_loss_probe(struct sock *sk);
|
||||||
@ -839,6 +841,11 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
|
|||||||
* as TCP moves IP6CB into a different location in skb->cb[]
|
* as TCP moves IP6CB into a different location in skb->cb[]
|
||||||
*/
|
*/
|
||||||
static inline int tcp_v6_iif(const struct sk_buff *skb)
|
static inline int tcp_v6_iif(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return TCP_SKB_CB(skb)->header.h6.iif;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
|
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/signal.h>
|
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
|
|
||||||
typedef __kernel_ulong_t aio_context_t;
|
typedef __kernel_ulong_t aio_context_t;
|
||||||
@ -110,10 +109,5 @@ struct iocb {
|
|||||||
#undef IFBIG
|
#undef IFBIG
|
||||||
#undef IFLITTLE
|
#undef IFLITTLE
|
||||||
|
|
||||||
struct __aio_sigset {
|
|
||||||
const sigset_t __user *sigmask;
|
|
||||||
size_t sigsetsize;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* __LINUX__AIO_ABI_H */
|
#endif /* __LINUX__AIO_ABI_H */
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ struct btf_type {
|
|||||||
*/
|
*/
|
||||||
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
|
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
|
||||||
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
|
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
|
||||||
#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
|
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
|
||||||
|
|
||||||
/* Attributes stored in the BTF_INT_ENCODING */
|
/* Attributes stored in the BTF_INT_ENCODING */
|
||||||
#define BTF_INT_SIGNED (1 << 0)
|
#define BTF_INT_SIGNED (1 << 0)
|
||||||
|
@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
|
|||||||
*/
|
*/
|
||||||
static bool btf_type_int_is_regular(const struct btf_type *t)
|
static bool btf_type_int_is_regular(const struct btf_type *t)
|
||||||
{
|
{
|
||||||
u16 nr_bits, nr_bytes;
|
u8 nr_bits, nr_bytes;
|
||||||
u32 int_data;
|
u32 int_data;
|
||||||
|
|
||||||
int_data = btf_type_int(t);
|
int_data = btf_type_int(t);
|
||||||
@ -993,12 +993,16 @@ static void btf_int_bits_seq_show(const struct btf *btf,
|
|||||||
{
|
{
|
||||||
u16 left_shift_bits, right_shift_bits;
|
u16 left_shift_bits, right_shift_bits;
|
||||||
u32 int_data = btf_type_int(t);
|
u32 int_data = btf_type_int(t);
|
||||||
u16 nr_bits = BTF_INT_BITS(int_data);
|
u8 nr_bits = BTF_INT_BITS(int_data);
|
||||||
u16 total_bits_offset;
|
u8 total_bits_offset;
|
||||||
u16 nr_copy_bytes;
|
u8 nr_copy_bytes;
|
||||||
u16 nr_copy_bits;
|
u8 nr_copy_bits;
|
||||||
u64 print_num;
|
u64 print_num;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bits_offset is at most 7.
|
||||||
|
* BTF_INT_OFFSET() cannot exceed 64 bits.
|
||||||
|
*/
|
||||||
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
|
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
|
||||||
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
|
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
|
||||||
bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
|
bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
|
||||||
@ -1028,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
|
|||||||
u32 int_data = btf_type_int(t);
|
u32 int_data = btf_type_int(t);
|
||||||
u8 encoding = BTF_INT_ENCODING(int_data);
|
u8 encoding = BTF_INT_ENCODING(int_data);
|
||||||
bool sign = encoding & BTF_INT_SIGNED;
|
bool sign = encoding & BTF_INT_SIGNED;
|
||||||
u32 nr_bits = BTF_INT_BITS(int_data);
|
u8 nr_bits = BTF_INT_BITS(int_data);
|
||||||
|
|
||||||
if (bits_offset || BTF_INT_OFFSET(int_data) ||
|
if (bits_offset || BTF_INT_OFFSET(int_data) ||
|
||||||
BITS_PER_BYTE_MASKED(nr_bits)) {
|
BITS_PER_BYTE_MASKED(nr_bits)) {
|
||||||
|
@ -303,11 +303,38 @@ struct kmem_cache *files_cachep;
|
|||||||
struct kmem_cache *fs_cachep;
|
struct kmem_cache *fs_cachep;
|
||||||
|
|
||||||
/* SLAB cache for vm_area_struct structures */
|
/* SLAB cache for vm_area_struct structures */
|
||||||
struct kmem_cache *vm_area_cachep;
|
static struct kmem_cache *vm_area_cachep;
|
||||||
|
|
||||||
/* SLAB cache for mm_struct structures (tsk->mm) */
|
/* SLAB cache for mm_struct structures (tsk->mm) */
|
||||||
static struct kmem_cache *mm_cachep;
|
static struct kmem_cache *mm_cachep;
|
||||||
|
|
||||||
|
struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (vma) {
|
||||||
|
vma->vm_mm = mm;
|
||||||
|
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
||||||
|
}
|
||||||
|
return vma;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (new) {
|
||||||
|
*new = *orig;
|
||||||
|
INIT_LIST_HEAD(&new->anon_vma_chain);
|
||||||
|
}
|
||||||
|
return new;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vm_area_free(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
|
}
|
||||||
|
|
||||||
static void account_kernel_stack(struct task_struct *tsk, int account)
|
static void account_kernel_stack(struct task_struct *tsk, int account)
|
||||||
{
|
{
|
||||||
void *stack = task_stack_page(tsk);
|
void *stack = task_stack_page(tsk);
|
||||||
@ -455,11 +482,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
|||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
charge = len;
|
charge = len;
|
||||||
}
|
}
|
||||||
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
tmp = vm_area_dup(mpnt);
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
goto fail_nomem;
|
goto fail_nomem;
|
||||||
*tmp = *mpnt;
|
|
||||||
INIT_LIST_HEAD(&tmp->anon_vma_chain);
|
|
||||||
retval = vma_dup_policy(mpnt, tmp);
|
retval = vma_dup_policy(mpnt, tmp);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto fail_nomem_policy;
|
goto fail_nomem_policy;
|
||||||
@ -539,7 +564,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
|||||||
fail_nomem_anon_vma_fork:
|
fail_nomem_anon_vma_fork:
|
||||||
mpol_put(vma_policy(tmp));
|
mpol_put(vma_policy(tmp));
|
||||||
fail_nomem_policy:
|
fail_nomem_policy:
|
||||||
kmem_cache_free(vm_area_cachep, tmp);
|
vm_area_free(tmp);
|
||||||
fail_nomem:
|
fail_nomem:
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
vm_unacct_memory(charge);
|
vm_unacct_memory(charge);
|
||||||
|
@ -2290,8 +2290,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
|
|||||||
if (task_on_rq_queued(p) && p->dl.dl_runtime)
|
if (task_on_rq_queued(p) && p->dl.dl_runtime)
|
||||||
task_non_contending(p);
|
task_non_contending(p);
|
||||||
|
|
||||||
if (!task_on_rq_queued(p))
|
if (!task_on_rq_queued(p)) {
|
||||||
|
/*
|
||||||
|
* Inactive timer is armed. However, p is leaving DEADLINE and
|
||||||
|
* might migrate away from this rq while continuing to run on
|
||||||
|
* some other class. We need to remove its contribution from
|
||||||
|
* this rq running_bw now, or sub_rq_bw (below) will complain.
|
||||||
|
*/
|
||||||
|
if (p->dl.dl_non_contending)
|
||||||
|
sub_running_bw(&p->dl, &rq->dl);
|
||||||
sub_rq_bw(&p->dl, &rq->dl);
|
sub_rq_bw(&p->dl, &rq->dl);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We cannot use inactive_task_timer() to invoke sub_running_bw()
|
* We cannot use inactive_task_timer() to invoke sub_running_bw()
|
||||||
|
@ -270,7 +270,11 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!err) {
|
||||||
|
preempt_disable();
|
||||||
wake_up_q(&wakeq);
|
wake_up_q(&wakeq);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
|
||||||
|
struct iov_iter *i)
|
||||||
|
{
|
||||||
|
struct pipe_inode_info *pipe = i->pipe;
|
||||||
|
size_t n, off, xfer = 0;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (!sanity(i))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
bytes = n = push_pipe(i, bytes, &idx, &off);
|
||||||
|
if (unlikely(!n))
|
||||||
|
return 0;
|
||||||
|
for ( ; n; idx = next_idx(idx, pipe), off = 0) {
|
||||||
|
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
|
||||||
|
unsigned long rem;
|
||||||
|
|
||||||
|
rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
|
||||||
|
chunk);
|
||||||
|
i->idx = idx;
|
||||||
|
i->iov_offset = off + chunk - rem;
|
||||||
|
xfer += chunk - rem;
|
||||||
|
if (rem)
|
||||||
|
break;
|
||||||
|
n -= chunk;
|
||||||
|
addr += chunk;
|
||||||
|
}
|
||||||
|
i->count -= xfer;
|
||||||
|
return xfer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* _copy_to_iter_mcsafe - copy to user with source-read error exception handling
|
||||||
|
* @addr: source kernel address
|
||||||
|
* @bytes: total transfer length
|
||||||
|
* @iter: destination iterator
|
||||||
|
*
|
||||||
|
* The pmem driver arranges for filesystem-dax to use this facility via
|
||||||
|
* dax_copy_to_iter() for protecting read/write to persistent memory.
|
||||||
|
* Unless / until an architecture can guarantee identical performance
|
||||||
|
* between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
|
||||||
|
* performance regression to switch more users to the mcsafe version.
|
||||||
|
*
|
||||||
|
* Otherwise, the main differences between this and typical _copy_to_iter().
|
||||||
|
*
|
||||||
|
* * Typical tail/residue handling after a fault retries the copy
|
||||||
|
* byte-by-byte until the fault happens again. Re-triggering machine
|
||||||
|
* checks is potentially fatal so the implementation uses source
|
||||||
|
* alignment and poison alignment assumptions to avoid re-triggering
|
||||||
|
* hardware exceptions.
|
||||||
|
*
|
||||||
|
* * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
|
||||||
|
* Compare to copy_to_iter() where only ITER_IOVEC attempts might return
|
||||||
|
* a short copy.
|
||||||
|
*
|
||||||
|
* See MCSAFE_TEST for self-test.
|
||||||
|
*/
|
||||||
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
|
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
const char *from = addr;
|
const char *from = addr;
|
||||||
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
|
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
|
||||||
|
|
||||||
if (unlikely(i->type & ITER_PIPE)) {
|
if (unlikely(i->type & ITER_PIPE))
|
||||||
WARN_ON(1);
|
return copy_pipe_to_iter_mcsafe(addr, bytes, i);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (iter_is_iovec(i))
|
if (iter_is_iovec(i))
|
||||||
might_fault();
|
might_fault();
|
||||||
iterate_and_advance(i, bytes, v,
|
iterate_and_advance(i, bytes, v,
|
||||||
@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
|||||||
EXPORT_SYMBOL(_copy_from_iter_nocache);
|
EXPORT_SYMBOL(_copy_from_iter_nocache);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
||||||
|
/**
|
||||||
|
* _copy_from_iter_flushcache - write destination through cpu cache
|
||||||
|
* @addr: destination kernel address
|
||||||
|
* @bytes: total transfer length
|
||||||
|
* @iter: source iterator
|
||||||
|
*
|
||||||
|
* The pmem driver arranges for filesystem-dax to use this facility via
|
||||||
|
* dax_copy_from_iter() for ensuring that writes to persistent memory
|
||||||
|
* are flushed through the CPU cache. It is differentiated from
|
||||||
|
* _copy_from_iter_nocache() in that guarantees all data is flushed for
|
||||||
|
* all iterator types. The _copy_from_iter_nocache() only attempts to
|
||||||
|
* bypass the cache for the ITER_IOVEC case, and on some archs may use
|
||||||
|
* instructions that strand dirty-data in the cache.
|
||||||
|
*/
|
||||||
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
|
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
char *to = addr;
|
char *to = addr;
|
||||||
|
@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
if (vma_is_dax(vma))
|
if (vma_is_dax(vma))
|
||||||
return;
|
return;
|
||||||
page = pmd_page(_pmd);
|
page = pmd_page(_pmd);
|
||||||
|
if (!PageDirty(page) && pmd_dirty(_pmd))
|
||||||
|
set_page_dirty(page);
|
||||||
if (!PageReferenced(page) && pmd_young(_pmd))
|
if (!PageReferenced(page) && pmd_young(_pmd))
|
||||||
SetPageReferenced(page);
|
SetPageReferenced(page);
|
||||||
page_remove_rmap(page, true);
|
page_remove_rmap(page, true);
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include <linux/kmemleak.h>
|
#include <linux/kmemleak.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
@ -1225,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
|
|||||||
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
|
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_NO_BOOTMEM)
|
||||||
/**
|
/**
|
||||||
* memblock_virt_alloc_internal - allocate boot memory block
|
* memblock_virt_alloc_internal - allocate boot memory block
|
||||||
* @size: size of memory block to be allocated in bytes
|
* @size: size of memory block to be allocated in bytes
|
||||||
@ -1432,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid(
|
|||||||
(u64)max_addr);
|
(u64)max_addr);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __memblock_free_early - free boot memory block
|
* __memblock_free_early - free boot memory block
|
||||||
|
@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
|
|||||||
int nid;
|
int nid;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
while ((memcg = parent_mem_cgroup(memcg))) {
|
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
|
||||||
for_each_node(nid) {
|
for_each_node(nid) {
|
||||||
mz = mem_cgroup_nodeinfo(memcg, nid);
|
mz = mem_cgroup_nodeinfo(memcg, nid);
|
||||||
for (i = 0; i <= DEF_PRIORITY; i++) {
|
for (i = 0; i <= DEF_PRIORITY; i++) {
|
||||||
|
35
mm/mmap.c
35
mm/mmap.c
@ -182,7 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
|
|||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
mpol_put(vma_policy(vma));
|
mpol_put(vma_policy(vma));
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -911,7 +911,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
|||||||
anon_vma_merge(vma, next);
|
anon_vma_merge(vma, next);
|
||||||
mm->map_count--;
|
mm->map_count--;
|
||||||
mpol_put(vma_policy(next));
|
mpol_put(vma_policy(next));
|
||||||
kmem_cache_free(vm_area_cachep, next);
|
vm_area_free(next);
|
||||||
/*
|
/*
|
||||||
* In mprotect's case 6 (see comments on vma_merge),
|
* In mprotect's case 6 (see comments on vma_merge),
|
||||||
* we must remove another next too. It would clutter
|
* we must remove another next too. It would clutter
|
||||||
@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|||||||
* specific mapper. the address has already been validated, but
|
* specific mapper. the address has already been validated, but
|
||||||
* not unmapped, but the maps are removed from the list.
|
* not unmapped, but the maps are removed from the list.
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(mm);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
goto unacct_error;
|
goto unacct_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
vma->vm_mm = mm;
|
|
||||||
vma->vm_start = addr;
|
vma->vm_start = addr;
|
||||||
vma->vm_end = addr + len;
|
vma->vm_end = addr + len;
|
||||||
vma->vm_flags = vm_flags;
|
vma->vm_flags = vm_flags;
|
||||||
vma->vm_page_prot = vm_get_page_prot(vm_flags);
|
vma->vm_page_prot = vm_get_page_prot(vm_flags);
|
||||||
vma->vm_pgoff = pgoff;
|
vma->vm_pgoff = pgoff;
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
if (vm_flags & VM_DENYWRITE) {
|
if (vm_flags & VM_DENYWRITE) {
|
||||||
@ -1832,7 +1830,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|||||||
if (vm_flags & VM_DENYWRITE)
|
if (vm_flags & VM_DENYWRITE)
|
||||||
allow_write_access(file);
|
allow_write_access(file);
|
||||||
free_vma:
|
free_vma:
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
unacct_error:
|
unacct_error:
|
||||||
if (charged)
|
if (charged)
|
||||||
vm_unacct_memory(charged);
|
vm_unacct_memory(charged);
|
||||||
@ -2620,15 +2618,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
new = vm_area_dup(vma);
|
||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* most fields are the same, copy all, and then fixup */
|
|
||||||
*new = *vma;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&new->anon_vma_chain);
|
|
||||||
|
|
||||||
if (new_below)
|
if (new_below)
|
||||||
new->vm_end = addr;
|
new->vm_end = addr;
|
||||||
else {
|
else {
|
||||||
@ -2669,7 +2662,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
out_free_mpol:
|
out_free_mpol:
|
||||||
mpol_put(vma_policy(new));
|
mpol_put(vma_policy(new));
|
||||||
out_free_vma:
|
out_free_vma:
|
||||||
kmem_cache_free(vm_area_cachep, new);
|
vm_area_free(new);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2984,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
|
|||||||
/*
|
/*
|
||||||
* create a vma struct for an anonymous mapping
|
* create a vma struct for an anonymous mapping
|
||||||
*/
|
*/
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(mm);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
vm_unacct_memory(len >> PAGE_SHIFT);
|
vm_unacct_memory(len >> PAGE_SHIFT);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
vma->vm_mm = mm;
|
|
||||||
vma->vm_start = addr;
|
vma->vm_start = addr;
|
||||||
vma->vm_end = addr + len;
|
vma->vm_end = addr + len;
|
||||||
vma->vm_pgoff = pgoff;
|
vma->vm_pgoff = pgoff;
|
||||||
@ -3202,16 +3193,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|||||||
}
|
}
|
||||||
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
|
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
|
||||||
} else {
|
} else {
|
||||||
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
new_vma = vm_area_dup(vma);
|
||||||
if (!new_vma)
|
if (!new_vma)
|
||||||
goto out;
|
goto out;
|
||||||
*new_vma = *vma;
|
|
||||||
new_vma->vm_start = addr;
|
new_vma->vm_start = addr;
|
||||||
new_vma->vm_end = addr + len;
|
new_vma->vm_end = addr + len;
|
||||||
new_vma->vm_pgoff = pgoff;
|
new_vma->vm_pgoff = pgoff;
|
||||||
if (vma_dup_policy(vma, new_vma))
|
if (vma_dup_policy(vma, new_vma))
|
||||||
goto out_free_vma;
|
goto out_free_vma;
|
||||||
INIT_LIST_HEAD(&new_vma->anon_vma_chain);
|
|
||||||
if (anon_vma_clone(new_vma, vma))
|
if (anon_vma_clone(new_vma, vma))
|
||||||
goto out_free_mempol;
|
goto out_free_mempol;
|
||||||
if (new_vma->vm_file)
|
if (new_vma->vm_file)
|
||||||
@ -3226,7 +3215,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|||||||
out_free_mempol:
|
out_free_mempol:
|
||||||
mpol_put(vma_policy(new_vma));
|
mpol_put(vma_policy(new_vma));
|
||||||
out_free_vma:
|
out_free_vma:
|
||||||
kmem_cache_free(vm_area_cachep, new_vma);
|
vm_area_free(new_vma);
|
||||||
out:
|
out:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -3350,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping(
|
|||||||
int ret;
|
int ret;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(mm);
|
||||||
if (unlikely(vma == NULL))
|
if (unlikely(vma == NULL))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
vma->vm_mm = mm;
|
|
||||||
vma->vm_start = addr;
|
vma->vm_start = addr;
|
||||||
vma->vm_end = addr + len;
|
vma->vm_end = addr + len;
|
||||||
|
|
||||||
@ -3376,7 +3363,7 @@ static struct vm_area_struct *__install_special_mapping(
|
|||||||
return vma;
|
return vma;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
mm/nommu.c
10
mm/nommu.c
@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
|
|||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
put_nommu_region(vma->vm_region);
|
put_nommu_region(vma->vm_region);
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file,
|
|||||||
if (!region)
|
if (!region)
|
||||||
goto error_getting_region;
|
goto error_getting_region;
|
||||||
|
|
||||||
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
vma = vm_area_alloc(current->mm);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
goto error_getting_vma;
|
goto error_getting_vma;
|
||||||
|
|
||||||
@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file,
|
|||||||
region->vm_flags = vm_flags;
|
region->vm_flags = vm_flags;
|
||||||
region->vm_pgoff = pgoff;
|
region->vm_pgoff = pgoff;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
||||||
vma->vm_flags = vm_flags;
|
vma->vm_flags = vm_flags;
|
||||||
vma->vm_pgoff = pgoff;
|
vma->vm_pgoff = pgoff;
|
||||||
|
|
||||||
@ -1368,7 +1367,7 @@ unsigned long do_mmap(struct file *file,
|
|||||||
kmem_cache_free(vm_region_jar, region);
|
kmem_cache_free(vm_region_jar, region);
|
||||||
if (vma->vm_file)
|
if (vma->vm_file)
|
||||||
fput(vma->vm_file);
|
fput(vma->vm_file);
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
vm_area_free(vma);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
sharing_violation:
|
sharing_violation:
|
||||||
@ -1469,14 +1468,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
if (!region)
|
if (!region)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
new = vm_area_dup(vma);
|
||||||
if (!new) {
|
if (!new) {
|
||||||
kmem_cache_free(vm_region_jar, region);
|
kmem_cache_free(vm_region_jar, region);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* most fields are the same, copy all, and then fixup */
|
/* most fields are the same, copy all, and then fixup */
|
||||||
*new = *vma;
|
|
||||||
*region = *vma->vm_region;
|
*region = *vma->vm_region;
|
||||||
new->vm_region = region;
|
new->vm_region = region;
|
||||||
|
|
||||||
|
@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
|
|||||||
caifd = caif_get(skb->dev);
|
caifd = caif_get(skb->dev);
|
||||||
|
|
||||||
WARN_ON(caifd == NULL);
|
WARN_ON(caifd == NULL);
|
||||||
if (caifd == NULL)
|
if (!caifd) {
|
||||||
|
rcu_read_unlock();
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
caifd_hold(caifd);
|
caifd_hold(caifd);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/* Empty recycle ring */
|
/* Empty recycle ring */
|
||||||
while ((page = ptr_ring_consume(&pool->ring))) {
|
while ((page = ptr_ring_consume_bh(&pool->ring))) {
|
||||||
/* Verify the refcnt invariant of cached pages */
|
/* Verify the refcnt invariant of cached pages */
|
||||||
if (!(page_ref_count(page) == 1))
|
if (!(page_ref_count(page) == 1))
|
||||||
pr_crit("%s() page_pool refcnt %d violation\n",
|
pr_crit("%s() page_pool refcnt %d violation\n",
|
||||||
|
@ -3720,6 +3720,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|||||||
net_warn_ratelimited(
|
net_warn_ratelimited(
|
||||||
"skb_segment: too many frags: %u %u\n",
|
"skb_segment: too many frags: %u %u\n",
|
||||||
pos, mss);
|
pos, mss);
|
||||||
|
err = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3753,11 +3754,10 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|||||||
|
|
||||||
perform_csum_check:
|
perform_csum_check:
|
||||||
if (!csum) {
|
if (!csum) {
|
||||||
if (skb_has_shared_frag(nskb)) {
|
if (skb_has_shared_frag(nskb) &&
|
||||||
err = __skb_linearize(nskb);
|
__skb_linearize(nskb))
|
||||||
if (err)
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
if (!nskb->remcsum_offload)
|
if (!nskb->remcsum_offload)
|
||||||
nskb->ip_summed = CHECKSUM_NONE;
|
nskb->ip_summed = CHECKSUM_NONE;
|
||||||
SKB_GSO_CB(nskb)->csum =
|
SKB_GSO_CB(nskb)->csum =
|
||||||
|
@ -2316,9 +2316,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
|||||||
pfrag->offset += use;
|
pfrag->offset += use;
|
||||||
|
|
||||||
sge = sg + sg_curr - 1;
|
sge = sg + sg_curr - 1;
|
||||||
if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
|
if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
|
||||||
sg->offset + sg->length == orig_offset) {
|
sge->offset + sge->length == orig_offset) {
|
||||||
sg->length += use;
|
sge->length += use;
|
||||||
} else {
|
} else {
|
||||||
sge = sg + sg_curr;
|
sge = sg + sg_curr;
|
||||||
sg_unmark_end(sge);
|
sg_unmark_end(sge);
|
||||||
|
@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
|||||||
spin_lock_bh(&im->lock);
|
spin_lock_bh(&im->lock);
|
||||||
if (pmc) {
|
if (pmc) {
|
||||||
im->interface = pmc->interface;
|
im->interface = pmc->interface;
|
||||||
im->sfmode = pmc->sfmode;
|
if (im->sfmode == MCAST_INCLUDE) {
|
||||||
if (pmc->sfmode == MCAST_INCLUDE) {
|
|
||||||
im->tomb = pmc->tomb;
|
im->tomb = pmc->tomb;
|
||||||
im->sources = pmc->sources;
|
im->sources = pmc->sources;
|
||||||
for (psf = im->sources; psf; psf = psf->sf_next)
|
for (psf = im->sources; psf; psf = psf->sf_next)
|
||||||
|
@ -524,6 +524,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
|||||||
to->dev = from->dev;
|
to->dev = from->dev;
|
||||||
to->mark = from->mark;
|
to->mark = from->mark;
|
||||||
|
|
||||||
|
skb_copy_hash(to, from);
|
||||||
|
|
||||||
/* Copy the flags to each fragment. */
|
/* Copy the flags to each fragment. */
|
||||||
IPCB(to)->flags = IPCB(from)->flags;
|
IPCB(to)->flags = IPCB(from)->flags;
|
||||||
|
|
||||||
|
@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
struct sockaddr_in sin;
|
struct sockaddr_in sin;
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
__be16 *ports = (__be16 *)skb_transport_header(skb);
|
__be16 *ports;
|
||||||
|
int end;
|
||||||
|
|
||||||
if (skb_transport_offset(skb) + 4 > (int)skb->len)
|
end = skb_transport_offset(skb) + 4;
|
||||||
|
if (end > 0 && !pskb_may_pull(skb, end))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* All current transport protocols have the port numbers in the
|
/* All current transport protocols have the port numbers in the
|
||||||
* first four bytes of the transport header and this function is
|
* first four bytes of the transport header and this function is
|
||||||
* written with this assumption in mind.
|
* written with this assumption in mind.
|
||||||
*/
|
*/
|
||||||
|
ports = (__be16 *)skb_transport_header(skb);
|
||||||
|
|
||||||
sin.sin_family = AF_INET;
|
sin.sin_family = AF_INET;
|
||||||
sin.sin_addr.s_addr = iph->daddr;
|
sin.sin_addr.s_addr = iph->daddr;
|
||||||
|
@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
|
|||||||
struct dctcp *ca = inet_csk_ca(sk);
|
struct dctcp *ca = inet_csk_ca(sk);
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
/* State has changed from CE=0 to CE=1 and delayed
|
if (!ca->ce_state) {
|
||||||
* ACK has not sent yet.
|
/* State has changed from CE=0 to CE=1, force an immediate
|
||||||
|
* ACK to reflect the new CE state. If an ACK was delayed,
|
||||||
|
* send that first to reflect the prior CE state.
|
||||||
*/
|
*/
|
||||||
if (!ca->ce_state &&
|
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||||
inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
|
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||||
u32 tmp_rcv_nxt;
|
tcp_enter_quickack_mode(sk, 1);
|
||||||
|
|
||||||
/* Save current rcv_nxt. */
|
|
||||||
tmp_rcv_nxt = tp->rcv_nxt;
|
|
||||||
|
|
||||||
/* Generate previous ack with CE=0. */
|
|
||||||
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
|
|
||||||
tp->rcv_nxt = ca->prior_rcv_nxt;
|
|
||||||
|
|
||||||
tcp_send_ack(sk);
|
|
||||||
|
|
||||||
/* Recover current rcv_nxt. */
|
|
||||||
tp->rcv_nxt = tmp_rcv_nxt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||||
@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
|
|||||||
struct dctcp *ca = inet_csk_ca(sk);
|
struct dctcp *ca = inet_csk_ca(sk);
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
/* State has changed from CE=1 to CE=0 and delayed
|
if (ca->ce_state) {
|
||||||
* ACK has not sent yet.
|
/* State has changed from CE=1 to CE=0, force an immediate
|
||||||
|
* ACK to reflect the new CE state. If an ACK was delayed,
|
||||||
|
* send that first to reflect the prior CE state.
|
||||||
*/
|
*/
|
||||||
if (ca->ce_state &&
|
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||||
inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
|
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||||
u32 tmp_rcv_nxt;
|
tcp_enter_quickack_mode(sk, 1);
|
||||||
|
|
||||||
/* Save current rcv_nxt. */
|
|
||||||
tmp_rcv_nxt = tp->rcv_nxt;
|
|
||||||
|
|
||||||
/* Generate previous ack with CE=1. */
|
|
||||||
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
|
|
||||||
tp->rcv_nxt = ca->prior_rcv_nxt;
|
|
||||||
|
|
||||||
tcp_send_ack(sk);
|
|
||||||
|
|
||||||
/* Recover current rcv_nxt. */
|
|
||||||
tp->rcv_nxt = tmp_rcv_nxt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||||
|
@ -216,7 +216,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
|
|||||||
icsk->icsk_ack.quick = quickacks;
|
icsk->icsk_ack.quick = quickacks;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
|
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
|
||||||
{
|
{
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
|
|
||||||
@ -224,6 +224,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
|
|||||||
icsk->icsk_ack.pingpong = 0;
|
icsk->icsk_ack.pingpong = 0;
|
||||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(tcp_enter_quickack_mode);
|
||||||
|
|
||||||
/* Send ACKs quickly, if "quick" count is not exhausted
|
/* Send ACKs quickly, if "quick" count is not exhausted
|
||||||
* and the session is not interactive.
|
* and the session is not interactive.
|
||||||
@ -4366,6 +4367,23 @@ static bool tcp_try_coalesce(struct sock *sk,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool tcp_ooo_try_coalesce(struct sock *sk,
|
||||||
|
struct sk_buff *to,
|
||||||
|
struct sk_buff *from,
|
||||||
|
bool *fragstolen)
|
||||||
|
{
|
||||||
|
bool res = tcp_try_coalesce(sk, to, from, fragstolen);
|
||||||
|
|
||||||
|
/* In case tcp_drop() is called later, update to->gso_segs */
|
||||||
|
if (res) {
|
||||||
|
u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
|
||||||
|
max_t(u16, 1, skb_shinfo(from)->gso_segs);
|
||||||
|
|
||||||
|
skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
|
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
sk_drops_add(sk, skb);
|
sk_drops_add(sk, skb);
|
||||||
@ -4489,7 +4507,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|||||||
/* In the typical case, we are adding an skb to the end of the list.
|
/* In the typical case, we are adding an skb to the end of the list.
|
||||||
* Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
|
* Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
|
||||||
*/
|
*/
|
||||||
if (tcp_try_coalesce(sk, tp->ooo_last_skb,
|
if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
|
||||||
skb, &fragstolen)) {
|
skb, &fragstolen)) {
|
||||||
coalesce_done:
|
coalesce_done:
|
||||||
tcp_grow_window(sk, skb);
|
tcp_grow_window(sk, skb);
|
||||||
@ -4518,7 +4536,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|||||||
/* All the bits are present. Drop. */
|
/* All the bits are present. Drop. */
|
||||||
NET_INC_STATS(sock_net(sk),
|
NET_INC_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPOFOMERGE);
|
LINUX_MIB_TCPOFOMERGE);
|
||||||
__kfree_skb(skb);
|
tcp_drop(sk, skb);
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
tcp_dsack_set(sk, seq, end_seq);
|
tcp_dsack_set(sk, seq, end_seq);
|
||||||
goto add_sack;
|
goto add_sack;
|
||||||
@ -4537,10 +4555,10 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|||||||
TCP_SKB_CB(skb1)->end_seq);
|
TCP_SKB_CB(skb1)->end_seq);
|
||||||
NET_INC_STATS(sock_net(sk),
|
NET_INC_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPOFOMERGE);
|
LINUX_MIB_TCPOFOMERGE);
|
||||||
__kfree_skb(skb1);
|
tcp_drop(sk, skb1);
|
||||||
goto merge_right;
|
goto merge_right;
|
||||||
}
|
}
|
||||||
} else if (tcp_try_coalesce(sk, skb1,
|
} else if (tcp_ooo_try_coalesce(sk, skb1,
|
||||||
skb, &fragstolen)) {
|
skb, &fragstolen)) {
|
||||||
goto coalesce_done;
|
goto coalesce_done;
|
||||||
}
|
}
|
||||||
@ -4924,6 +4942,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
|
|||||||
static void tcp_collapse_ofo_queue(struct sock *sk)
|
static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
u32 range_truesize, sum_tiny = 0;
|
||||||
struct sk_buff *skb, *head;
|
struct sk_buff *skb, *head;
|
||||||
u32 start, end;
|
u32 start, end;
|
||||||
|
|
||||||
@ -4935,6 +4954,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
|||||||
}
|
}
|
||||||
start = TCP_SKB_CB(skb)->seq;
|
start = TCP_SKB_CB(skb)->seq;
|
||||||
end = TCP_SKB_CB(skb)->end_seq;
|
end = TCP_SKB_CB(skb)->end_seq;
|
||||||
|
range_truesize = skb->truesize;
|
||||||
|
|
||||||
for (head = skb;;) {
|
for (head = skb;;) {
|
||||||
skb = skb_rb_next(skb);
|
skb = skb_rb_next(skb);
|
||||||
@ -4945,11 +4965,20 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
|||||||
if (!skb ||
|
if (!skb ||
|
||||||
after(TCP_SKB_CB(skb)->seq, end) ||
|
after(TCP_SKB_CB(skb)->seq, end) ||
|
||||||
before(TCP_SKB_CB(skb)->end_seq, start)) {
|
before(TCP_SKB_CB(skb)->end_seq, start)) {
|
||||||
|
/* Do not attempt collapsing tiny skbs */
|
||||||
|
if (range_truesize != head->truesize ||
|
||||||
|
end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
|
||||||
tcp_collapse(sk, NULL, &tp->out_of_order_queue,
|
tcp_collapse(sk, NULL, &tp->out_of_order_queue,
|
||||||
head, skb, start, end);
|
head, skb, start, end);
|
||||||
|
} else {
|
||||||
|
sum_tiny += range_truesize;
|
||||||
|
if (sum_tiny > sk->sk_rcvbuf >> 3)
|
||||||
|
return;
|
||||||
|
}
|
||||||
goto new_range;
|
goto new_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
range_truesize += skb->truesize;
|
||||||
if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
|
if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
|
||||||
start = TCP_SKB_CB(skb)->seq;
|
start = TCP_SKB_CB(skb)->seq;
|
||||||
if (after(TCP_SKB_CB(skb)->end_seq, end))
|
if (after(TCP_SKB_CB(skb)->end_seq, end))
|
||||||
@ -4964,6 +4993,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
|||||||
* 2) not add too big latencies if thousands of packets sit there.
|
* 2) not add too big latencies if thousands of packets sit there.
|
||||||
* (But if application shrinks SO_RCVBUF, we could still end up
|
* (But if application shrinks SO_RCVBUF, we could still end up
|
||||||
* freeing whole queue here)
|
* freeing whole queue here)
|
||||||
|
* 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
|
||||||
*
|
*
|
||||||
* Return true if queue has shrunk.
|
* Return true if queue has shrunk.
|
||||||
*/
|
*/
|
||||||
@ -4971,20 +5001,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
|||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct rb_node *node, *prev;
|
struct rb_node *node, *prev;
|
||||||
|
int goal;
|
||||||
|
|
||||||
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||||
|
goal = sk->sk_rcvbuf >> 3;
|
||||||
node = &tp->ooo_last_skb->rbnode;
|
node = &tp->ooo_last_skb->rbnode;
|
||||||
do {
|
do {
|
||||||
prev = rb_prev(node);
|
prev = rb_prev(node);
|
||||||
rb_erase(node, &tp->out_of_order_queue);
|
rb_erase(node, &tp->out_of_order_queue);
|
||||||
|
goal -= rb_to_skb(node)->truesize;
|
||||||
tcp_drop(sk, rb_to_skb(node));
|
tcp_drop(sk, rb_to_skb(node));
|
||||||
|
if (!prev || goal <= 0) {
|
||||||
sk_mem_reclaim(sk);
|
sk_mem_reclaim(sk);
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
||||||
!tcp_under_memory_pressure(sk))
|
!tcp_under_memory_pressure(sk))
|
||||||
break;
|
break;
|
||||||
|
goal = sk->sk_rcvbuf >> 3;
|
||||||
|
}
|
||||||
node = prev;
|
node = prev;
|
||||||
} while (node);
|
} while (node);
|
||||||
tp->ooo_last_skb = rb_to_skb(prev);
|
tp->ooo_last_skb = rb_to_skb(prev);
|
||||||
@ -5019,6 +5055,9 @@ static int tcp_prune_queue(struct sock *sk)
|
|||||||
else if (tcp_under_memory_pressure(sk))
|
else if (tcp_under_memory_pressure(sk))
|
||||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
||||||
|
|
||||||
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
||||||
|
return 0;
|
||||||
|
|
||||||
tcp_collapse_ofo_queue(sk);
|
tcp_collapse_ofo_queue(sk);
|
||||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||||
tcp_collapse(sk, &sk->sk_receive_queue, NULL,
|
tcp_collapse(sk, &sk->sk_receive_queue, NULL,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user