Merge android12-5.10.19 (e1a763a) into msm-5.10

* refs/heads/tmp-e1a763a
  Linux 5.10.19
  scripts/recordmcount.pl: support big endian for ARCH sh
  kbuild: fix CONFIG_TRIM_UNUSED_KSYMS build for ppc64
  cifs: Set CIFS_MOUNT_USE_PREFIX_PATH flag on setting cifs_sb->prepath.
  cxgb4: Add new T6 PCI device id 0x6092
  NET: usb: qmi_wwan: Adding support for Cinterion MV31
  drm/xlnx: fix kmemleak by sending vblank_event in atomic_disable
  KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped()
  mm: provide a saner PTE walking API for modules
  KVM: do not assume PTE is writable after follow_pfn
  mm: simplify follow_pte{,pmd}
  mm: unexport follow_pte_pmd
  KVM: x86: Zap the oldest MMU pages, not the newest
  hwmon: (dell-smm) Add XPS 15 L502X to fan control blacklist
  arm64: tegra: Add power-domain for Tegra210 HDA
  Bluetooth: btusb: Some Qualcomm Bluetooth adapters stop working
  ntfs: check for valid standard information attribute
  ceph: downgrade warning from mdsmap decode to debug
  usb: quirks: add quirk to start video capture on ELMO L-12F document camera reliable
  USB: quirks: sort quirk entries
  nvme-rdma: Use ibdev_to_node instead of dereferencing ->dma_device
  RDMA: Lift ibdev_to_node from rds to common code
  HID: make arrays usage and value to be the same
  bpf: Fix truncation handling for mod32 dst reg wrt zero
  FROMLIST: f2fs: support direct I/O with fscrypt using blk-crypto
  FROMLIST: ext4: support direct I/O with fscrypt using blk-crypto
  FROMLIST: iomap: support direct I/O with fscrypt using blk-crypto
  FROMLIST: direct-io: add support for fscrypt using blk-crypto
  ANDROID: export fscrypt_limit_io_blocks()
  FROMLIST: fscrypt: Add functions for direct I/O support
  ANDROID: revert fscrypt direct I/O support
  ANDROID: driver core: Set fw_devlink.strict=true by default
  UPSTREAM: of: property: fw_devlink: Ignore interrupts property for some configs
  UPSTREAM: of: irq: Fix the return value for of_irq_parse_one() stub
  UPSTREAM: of: irq: make a stub for of_irq_parse_one()
  UPSTREAM: clk: Mark fwnodes when their clock provider is added/removed
  UPSTREAM: PM: domains: Mark fwnodes when their powerdomain is added/removed
  UPSTREAM: irqdomain: Mark fwnodes when their irqdomain is added/removed
  UPSTREAM: driver core: fw_devlink: Handle suppliers that don't use driver core
  UPSTREAM: of: property: Add fw_devlink support for optional properties
  UPSTREAM: driver core: Add fw_devlink.strict kernel param
  UPSTREAM: of: property: Don't add links to absent suppliers
  UPSTREAM: driver core: fw_devlink: Detect supplier devices that will never be added
  UPSTREAM: of: property: Fix fw_devlink handling of interrupts/interrupts-extended
  UPSTREAM: gpiolib: Don't probe gpio_device if it's not the primary device
  UPSTREAM: gpiolib: Bind gpio_device to a driver to enable fw_devlink=on by default
  UPSTREAM: of: property: Add fw_devlink support for interrupts
  UPSTREAM: of: property: Add fw_devlink support for "gpio" and "gpios" binding
  UPSTREAM: driver core: Handle cycles in device links created by fw_devlink
  UPSTREAM: driver core: Have fw_devlink use DL_FLAG_INFERRED
  UPSTREAM: driver core: Add device link support for INFERRED flag
  UPSTREAM: driver core: Add debug logs for device link related probe deferrals
  ANDROID: GKI: add fields required to enable CONFIG_TRANSPARENT_HUGEPAGE
  UPSTREAM: lib/vsprintf: no_hash_pointers prints all addresses as unhashed
  UPSTREAM: kselftest: add support for skipped tests
  UPSTREAM: lib: use KSTM_MODULE_GLOBALS macro in kselftest drivers
  ANDROID: dma-buf: Export is_dma_buf_file
  ANDROID: mm: export zone_watermark_ok
  UPSTREAM: HID: playstation: add DualSense player LED support.
  UPSTREAM: HID: playstation: add microphone mute support for DualSense.
  UPSTREAM: HID: playstation: add initial DualSense lightbar support.
  UPSTREAM: HID: playstation: fix array size comparison (off-by-one)
  UPSTREAM: HID: playstation: fix unused variable in ps_battery_get_property.
  UPSTREAM: HID: playstation: report DualSense hardware and firmware version.
  UPSTREAM: HID: playstation: add DualSense classic rumble support.
  UPSTREAM: HID: playstation: add DualSense Bluetooth support.
  UPSTREAM: HID: playstation: track devices in list.
  UPSTREAM: HID: playstation: add DualSense accelerometer and gyroscope support.
  UPSTREAM: HID: playstation: add DualSense touchpad support.
  UPSTREAM: HID: playstation: add DualSense battery support.
  UPSTREAM: HID: playstation: use DualSense MAC address as unique identifier.
  UPSTREAM: HID: playstation: initial DualSense USB support.
  ANDROID: sched/rt: Add support for rt sync wakeups
  ANDROID: abi_gki_aarch64_qcom: Add strncpy_from_user to symbol list
  ANDROID: gki_defconfig: Ensure KVM is configured in "protected" mode
  FROMGIT: arm64: VHE: Enable EL2 MMU from the idmap
  FROMGIT: KVM: arm64: make the hyp vector table entries local
  UPSTREAM: printk: avoid prb_first_valid_seq() where possible

Change-Id: I7587948a3bc8564fc342789a51752a453008c1a6
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2021-02-26 18:32:28 -08:00
commit 385aa081f1
64 changed files with 4469 additions and 2582 deletions

View File

@ -1451,6 +1451,11 @@
to enforce probe and suspend/resume ordering.
rpm -- Like "on", but also use to order runtime PM.
fw_devlink.strict=<bool>
[KNL] Treat all inferred dependencies as mandatory
dependencies. This only applies for fw_devlink=on|rpm.
Format: <bool>
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
@ -3331,6 +3336,21 @@
in certain environments such as networked servers or
real-time systems.
no_hash_pointers
Force pointers printed to the console or buffers to be
unhashed. By default, when a pointer is printed via %p
format string, that pointer is "hashed", i.e. obscured
by hashing the pointer value. This is a security feature
that hides actual kernel addresses from unprivileged
users, but it also makes debugging the kernel more
difficult since unequal pointers can no longer be
compared. However, if this command-line option is
specified, then all normal pointers will have their true
value printed. Pointers printed via %pK may still be
hashed. This option should only be specified when
debugging the kernel. Please do not use on production
kernels.
nohibernate [HIBERNATION] Disable hibernation and resume.
nohz= [KNL] Boottime enable/disable dynamic ticks

View File

@ -7864,6 +7864,12 @@ F: drivers/hid/
F: include/linux/hid*
F: include/uapi/linux/hid*
HID PLAYSTATION DRIVER
M: Roderick Colenbrander <roderick.colenbrander@sony.com>
L: linux-input@vger.kernel.org
S: Supported
F: drivers/hid/hid-playstation.c
HID SENSOR HUB DRIVERS
M: Jiri Kosina <jikos@kernel.org>
M: Jonathan Cameron <jic23@kernel.org>

View File

@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 18
SUBLEVEL = 19
EXTRAVERSION =
NAME = Kleptomaniac Octopus
NAME = Dare mighty things
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

File diff suppressed because it is too large Load Diff

View File

@ -2196,6 +2196,7 @@
strnchr
strncmp
strncpy
strncpy_from_user
strnlen
strnstr
strpbrk

View File

@ -997,6 +997,7 @@ hda@70030000 {
<&tegra_car 128>, /* hda2hdmi */
<&tegra_car 111>; /* hda2codec_2x */
reset-names = "hda", "hda2hdmi", "hda2codec_2x";
power-domains = <&pd_sor>;
status = "disabled";
};

View File

@ -56,7 +56,7 @@ CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
CONFIG_RANDOMIZE_BASE=y
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off"
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off kvm-arm.mode=protected"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_DMI is not set
CONFIG_PM_WAKELOCKS=y
@ -98,6 +98,8 @@ CONFIG_GKI_HACKS_TO_FIX=y
CONFIG_BINFMT_MISC=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y

View File

@ -75,9 +75,6 @@ SYM_CODE_END(el1_sync)
// nVHE? No way! Give me the real thing!
SYM_CODE_START_LOCAL(mutate_to_vhe)
// Be prepared to fail
mov_q x0, HVC_STUB_ERR
// Sanity check: MMU *must* be off
mrs x1, sctlr_el2
tbnz x1, #0, 1f
@ -96,8 +93,11 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
cmp x1, xzr
and x2, x2, x1
csinv x2, x2, xzr, ne
cbz x2, 1f
cbnz x2, 2f
1: mov_q x0, HVC_STUB_ERR
eret
2:
// Engage the VHE magic!
mov_q x0, HCR_HOST_VHE_FLAGS
msr hcr_el2, x0
@ -131,6 +131,24 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr mair_el1, x0
isb
// Hack the exception return to stay at EL2
mrs x0, spsr_el1
and x0, x0, #~PSR_MODE_MASK
mov x1, #PSR_MODE_EL2h
orr x0, x0, x1
msr spsr_el1, x0
b enter_vhe
SYM_CODE_END(mutate_to_vhe)
// At the point where we reach enter_vhe(), we run with
// the MMU off (which is enforced by mutate_to_vhe()).
// We thus need to be in the idmap, or everything will
// explode when enabling the MMU.
.pushsection .idmap.text, "ax"
SYM_CODE_START_LOCAL(enter_vhe)
// Invalidate TLBs before enabling the MMU
tlbi vmalle1
dsb nsh
@ -143,17 +161,12 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
msr_s SYS_SCTLR_EL12, x0
// Hack the exception return to stay at EL2
mrs x0, spsr_el1
and x0, x0, #~PSR_MODE_MASK
mov x1, #PSR_MODE_EL2h
orr x0, x0, x1
msr spsr_el1, x0
mov x0, xzr
1: eret
SYM_CODE_END(mutate_to_vhe)
eret
SYM_CODE_END(enter_vhe)
.popsection
.macro invalid_vector label
SYM_CODE_START_LOCAL(\label)

View File

@ -119,7 +119,7 @@ el2_error:
.macro invalid_vector label, target = __guest_exit_panic
.align 2
SYM_CODE_START(\label)
SYM_CODE_START_LOCAL(\label)
b \target
SYM_CODE_END(\label)
.endm

View File

@ -80,6 +80,8 @@ CONFIG_GKI_HACKS_TO_FIX=y
CONFIG_BINFMT_MISC=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y

View File

@ -2409,7 +2409,7 @@ static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
return 0;
restart:
list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
/*
* Don't zap active root pages, the page itself can't be freed
* and zapping it will just force vCPUs to realloc and reload.

View File

@ -148,6 +148,21 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
fwnode_links_purge_consumers(fwnode);
}
static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
/* Don't purge consumer links of an added child */
if (fwnode->dev)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_links_purge_consumers(fwnode);
fwnode_for_each_available_child_node(fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
@ -244,7 +259,8 @@ int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
if ((link->flags & ~DL_FLAG_INFERRED) ==
(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
if (link->consumer == target)
@ -317,7 +333,8 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
if ((link->flags & ~DL_FLAG_INFERRED) ==
(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@ -565,7 +582,8 @@ postcore_initcall(devlink_class_init);
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY)
DL_FLAG_SYNC_STATE_ONLY | \
DL_FLAG_INFERRED)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@ -634,7 +652,7 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
flags != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@ -690,6 +708,10 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
if (link->flags & DL_FLAG_INFERRED &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
@ -949,6 +971,10 @@ int device_links_check_suppliers(struct device *dev)
mutex_lock(&fwnode_link_lock);
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
!fw_devlink_is_permissive()) {
dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
list_first_entry(&dev->fwnode->suppliers,
struct fwnode_link,
c_hook)->supplier);
mutex_unlock(&fwnode_link_lock);
return -EPROBE_DEFER;
}
@ -963,6 +989,8 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
dev_dbg(dev, "probe deferral - supplier %s not ready\n",
dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
@ -1141,12 +1169,22 @@ void device_links_driver_bound(struct device *dev)
LIST_HEAD(sync_list);
/*
* If a device probes successfully, it's expected to have created all
* If a device binds successfully, it's expected to have created all
* the device links it needs to or make new device links as it needs
* them. So, it no longer needs to wait on any suppliers.
* them. So, fw_devlink no longer needs to create device links to any
* of the device's suppliers.
*
* Also, if a child firmware node of this bound device is not added as
* a device by now, assume it is never going to be added and make sure
* other devices don't defer probe indefinitely by waiting for such a
* child device.
*/
if (dev->fwnode && dev->fwnode->dev == dev)
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
fwnode_for_each_available_child_node(dev->fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
@ -1457,7 +1495,14 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
static u32 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
DL_FLAG_SYNC_STATE_ONLY)
#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
DL_FLAG_AUTOPROBE_CONSUMER)
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
DL_FLAG_PM_RUNTIME)
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
@ -1466,17 +1511,23 @@ static int __init fw_devlink_setup(char *arg)
if (strcmp(arg, "off") == 0) {
fw_devlink_flags = 0;
} else if (strcmp(arg, "permissive") == 0) {
fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
} else if (strcmp(arg, "on") == 0) {
fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
} else if (strcmp(arg, "rpm") == 0) {
fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
DL_FLAG_PM_RUNTIME;
fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
}
return 0;
}
early_param("fw_devlink", fw_devlink_setup);
static bool fw_devlink_strict = true;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
}
early_param("fw_devlink.strict", fw_devlink_strict_setup);
u32 fw_devlink_get_flags(void)
{
return fw_devlink_flags;
@ -1484,7 +1535,12 @@ u32 fw_devlink_get_flags(void)
static bool fw_devlink_is_permissive(void)
{
return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
}
bool fw_devlink_is_strict(void)
{
return fw_devlink_strict && !fw_devlink_is_permissive();
}
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
@ -1506,6 +1562,53 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
fw_devlink_parse_fwtree(child);
}
/**
* fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
* @con: Device to check dependencies for.
* @sup: Device to check against.
*
* Check if @sup depends on @con or any device dependent on it (its child or
* its consumer etc). When such a cyclic dependency is found, convert all
* device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
* This is the equivalent of doing fw_devlink=permissive just between the
* devices in the cycle. We need to do this because, at this point, fw_devlink
* can't tell which of these dependencies is not a real dependency.
*
* Return 1 if a cycle is found. Otherwise, return 0.
*/
int fw_devlink_relax_cycle(struct device *con, void *sup)
{
struct device_link *link;
int ret;
if (con == sup)
return 1;
ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
if (ret)
return ret;
list_for_each_entry(link, &con->links.consumers, s_node) {
if ((link->flags & ~DL_FLAG_INFERRED) ==
(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
if (!fw_devlink_relax_cycle(link->consumer, sup))
continue;
ret = 1;
if (!(link->flags & DL_FLAG_INFERRED))
continue;
pm_runtime_drop_link(link);
link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
dev_dbg(link->consumer, "Relaxing link with %s\n",
dev_name(link->supplier));
}
return ret;
}
/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con - Consumer device for the device link
@ -1533,16 +1636,40 @@ static int fw_devlink_create_devlink(struct device *con,
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
* If it's one of those drivers that don't actually bind to
* their device using driver core, then don't wait on this
* supplier device indefinitely.
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
ret = -EINVAL;
goto out;
}
/*
* If this fails, it is due to cycles in device links. Just
* give up on this link and treat it as invalid.
*/
if (!device_link_add(con, sup_dev, flags))
if (!device_link_add(con, sup_dev, flags) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
dev_info(con, "Fixing up cyclic dependency with %s\n",
dev_name(sup_dev));
device_links_write_lock();
fw_devlink_relax_cycle(con, sup_dev);
device_links_write_unlock();
device_link_add(con, sup_dev,
FW_DEVLINK_FLAGS_PERMISSIVE);
ret = -EINVAL;
}
goto out;
}
/* Supplier that's already initialized without a struct device. */
if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
return -EINVAL;
/*
* DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
* cycles. So cycle detection isn't necessary and shouldn't be
@ -1631,7 +1758,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
con_dev = NULL;
} else {
own_link = false;
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
}
}
@ -1686,7 +1813,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
if (own_link)
dl_flags = fw_devlink_get_flags();
else
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;

View File

@ -2223,6 +2223,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
cp->node = of_node_get(np);
cp->data = data;
cp->xlate = xlate;
fwnode_dev_initialized(&np->fwnode, true);
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
@ -2412,6 +2413,7 @@ void of_genpd_del_provider(struct device_node *np)
}
}
fwnode_dev_initialized(&cp->node->fwnode, false);
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);

View File

@ -3689,6 +3689,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
info = &qca_devices_table[i];
}
if (!info) {
/* If the rom_version is not matched in the qca_devices_table
* and the high ROM version is not zero, we assume this chip no
* need to load the rampatch and nvm.
*/
if (ver_rom & ~0xffffU)
return 0;
bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
return -ENODEV;
}

View File

@ -4625,6 +4625,8 @@ int of_clk_add_provider(struct device_node *np,
if (ret < 0)
of_clk_del_provider(np);
fwnode_dev_initialized(&np->fwnode, true);
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
@ -4742,6 +4744,7 @@ void of_clk_del_provider(struct device_node *np)
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
fwnode_dev_initialized(&np->fwnode, false);
of_node_put(cp->node);
kfree(cp);
break;

View File

@ -31,8 +31,6 @@
#include "dma-buf-sysfs-stats.h"
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
struct list_head head;
struct mutex lock;
@ -488,10 +486,11 @@ static const struct file_operations dma_buf_fops = {
/*
* is_dma_buf_file - Check if struct file* is associated with dma_buf
*/
static inline int is_dma_buf_file(struct file *file)
int is_dma_buf_file(struct file *file)
{
return file->f_op == &dma_buf_fops;
}
EXPORT_SYMBOL_GPL(is_dma_buf_file);
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{

View File

@ -1039,3 +1039,14 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(chip->of_node);
}
void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
{
/* If the gpiochip has an assigned OF node this takes precedence */
if (gc->of_node)
gdev->dev.of_node = gc->of_node;
else
gc->of_node = gdev->dev.of_node;
if (gdev->dev.of_node)
gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
}

View File

@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
int of_gpio_get_count(struct device *dev, const char *con_id);
bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
#else
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
return false;
}
static inline void of_gpio_dev_init(struct gpio_chip *gc,
struct gpio_device *gdev)
{
}
#endif /* CONFIG_OF_GPIO */
extern struct notifier_block gpio_of_notifier;

View File

@ -55,8 +55,10 @@
static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
static int gpio_bus_match(struct device *dev, struct device_driver *drv);
static struct bus_type gpio_bus_type = {
.name = "gpio",
.match = gpio_bus_match,
};
/*
@ -589,13 +591,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->dev.of_node = gc->parent->of_node;
}
#ifdef CONFIG_OF_GPIO
/* If the gpiochip has an assigned OF node this takes precedence */
if (gc->of_node)
gdev->dev.of_node = gc->of_node;
else
gc->of_node = gdev->dev.of_node;
#endif
of_gpio_dev_init(gc, gdev);
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@ -4342,6 +4338,41 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
static int gpio_bus_match(struct device *dev, struct device_driver *drv)
{
/*
* Only match if the fwnode doesn't already have a proper struct device
* created for it.
*/
if (dev->fwnode && dev->fwnode->dev != dev)
return 0;
return 1;
}
static int gpio_stub_drv_probe(struct device *dev)
{
/*
* The DT node of some GPIO chips have a "compatible" property, but
* never have a struct device added and probed by a driver to register
* the GPIO chip with gpiolib. In such cases, fw_devlink=on will cause
* the consumers of the GPIO chip to get probe deferred forever because
* they will be waiting for a device associated with the GPIO chip
* firmware node to get added and bound to a driver.
*
* To allow these consumers to probe, we associate the struct
* gpio_device of the GPIO chip with the firmware node and then simply
* bind it to this stub driver.
*/
return 0;
}
static struct device_driver gpio_stub_drv = {
.name = "gpio_stub_drv",
.bus = &gpio_bus_type,
.probe = gpio_stub_drv_probe,
};
static int __init gpiolib_dev_init(void)
{
int ret;
@ -4353,9 +4384,16 @@ static int __init gpiolib_dev_init(void)
return ret;
}
if (driver_register(&gpio_stub_drv) < 0) {
pr_err("gpiolib: could not register GPIO stub driver\n");
bus_unregister(&gpio_bus_type);
return ret;
}
ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
driver_unregister(&gpio_stub_drv);
bus_unregister(&gpio_bus_type);
return ret;
}

View File

@ -1398,19 +1398,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
static void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
struct drm_crtc *crtc = &disp->crtc;
zynqmp_disp_audio_disable(&disp->audio);
zynqmp_disp_avbuf_disable_audio(&disp->avbuf);
zynqmp_disp_avbuf_disable_channels(&disp->avbuf);
zynqmp_disp_avbuf_disable(&disp->avbuf);
/* Mark the flip is done as crtc is disabled anyway */
if (crtc->state->event) {
complete_all(crtc->state->event->base.completion);
crtc->state->event = NULL;
}
}
static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
@ -1499,6 +1491,13 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(&disp->crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
clk_disable_unprepare(disp->pclk);
pm_runtime_put_sync(disp->dev);
}

View File

@ -864,6 +864,24 @@ config HID_PLANTRONICS
Say M here if you may ever plug in a Plantronics USB audio device.
config HID_PLAYSTATION
tristate "PlayStation HID Driver"
depends on HID
select CRC32
select POWER_SUPPLY
help
Provides support for Sony PS5 controllers including support for
its special functionalities e.g. touchpad, lights and motion
sensors.
config PLAYSTATION_FF
bool "PlayStation force feedback support"
depends on HID_PLAYSTATION
select INPUT_FF_MEMLESS
help
Say Y here if you would like to enable force feedback support for
PlayStation game controllers.
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
depends on HID

View File

@ -95,6 +95,7 @@ hid-picolcd-$(CONFIG_HID_PICOLCD_CIR) += hid-picolcd_cir.o
hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
obj-$(CONFIG_HID_PLAYSTATION) += hid-playstation.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o
obj-$(CONFIG_HID_RETRODE) += hid-retrode.o

View File

@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
* Register a new field for this report.
*/
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
{
struct hid_field *field;
@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field = kzalloc((sizeof(struct hid_field) +
usages * sizeof(struct hid_usage) +
values * sizeof(unsigned)), GFP_KERNEL);
usages * sizeof(unsigned)), GFP_KERNEL);
if (!field)
return NULL;
@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
usages = max_t(unsigned, parser->local.usage_index,
parser->global.report_count);
field = hid_register_field(report, usages, parser->global.report_count);
field = hid_register_field(report, usages);
if (!field)
return 0;

View File

@ -1073,6 +1073,7 @@
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002

File diff suppressed because it is too large Load Diff

View File

@ -1159,6 +1159,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
},
},
{
.ident = "Dell XPS 15 L502X",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L502X"),
},
},
{ }
};

View File

@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */

View File

@ -1332,6 +1332,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */

View File

@ -860,7 +860,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return error;
ctrl->device = ctrl->queues[0].device;
ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
/* T10-PI support */
if (ctrl->device->dev->attrs.device_cap_flags &

View File

@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
@ -1102,7 +1103,9 @@ static int of_link_to_phandle(struct device_node *con_np,
* created for them.
*/
sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
if (!sup_dev && of_node_check_flag(sup_np, OF_POPULATED)) {
if (!sup_dev &&
(of_node_check_flag(sup_np, OF_POPULATED) ||
sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) {
pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
con_np, sup_np);
of_node_put(sup_np);
@ -1232,6 +1235,7 @@ static struct device_node *parse_##fname(struct device_node *np, \
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
bool optional;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
@ -1244,8 +1248,6 @@ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
DEFINE_SIMPLE_PROP(interrupts_extended, "interrupts-extended",
"#interrupt-cells")
DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", NULL)
DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells")
DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL)
@ -1271,19 +1273,55 @@ static struct device_node *parse_iommu_maps(struct device_node *np,
return of_parse_phandle(np, prop_name, (index * 4) + 1);
}
static struct device_node *parse_gpio_compat(struct device_node *np,
const char *prop_name, int index)
{
struct of_phandle_args sup_args;
if (strcmp(prop_name, "gpio") && strcmp(prop_name, "gpios"))
return NULL;
/*
* Ignore node with gpio-hog property since its gpios are all provided
* by its parent.
*/
if (of_find_property(np, "gpio-hog", NULL))
return NULL;
if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index,
&sup_args))
return NULL;
return sup_args.np;
}
static struct device_node *parse_interrupts(struct device_node *np,
const char *prop_name, int index)
{
struct of_phandle_args sup_args;
if (!IS_ENABLED(CONFIG_OF_IRQ) || IS_ENABLED(CONFIG_PPC))
return NULL;
if (strcmp(prop_name, "interrupts") &&
strcmp(prop_name, "interrupts-extended"))
return NULL;
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
{ .parse_prop = parse_iommus, },
{ .parse_prop = parse_iommu_maps, },
{ .parse_prop = parse_iommus, .optional = true, },
{ .parse_prop = parse_iommu_maps, .optional = true, },
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, },
{ .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
{ .parse_prop = parse_extcon, },
{ .parse_prop = parse_interrupts_extended, },
{ .parse_prop = parse_nvmem_cells, },
{ .parse_prop = parse_phys, },
{ .parse_prop = parse_wakeup_parent, },
@ -1296,6 +1334,8 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
@ -1332,6 +1372,11 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
/* Do not stop at first failed link, link all available suppliers. */
while (!matched && s->parse_prop) {
if (s->optional && !fw_devlink_is_strict()) {
s++;
continue;
}
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
matched = true;
i++;

View File

@ -391,6 +391,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
/* ELMO L-12F document camera */
{ USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG },
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
@ -415,6 +418,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
@ -495,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};

View File

@ -3734,6 +3734,7 @@ include/trace/events/filelock.h
include/trace/events/filemap.h
include/trace/events/gpio.h
include/trace/events/gpu_mem.h
include/trace/events/huge_memory.h
include/trace/events/i2c.h
include/trace/events/initcall.h
include/trace/events/iommu.h
@ -4835,6 +4836,7 @@ mm/filemap.c
mm/frame_vector.c
mm/gup.c
mm/highmem.c
mm/huge_memory.c
mm/init-mm.c
mm/internal.h
mm/interval_tree.c
@ -4847,6 +4849,7 @@ mm/kasan/report_hw_tags.c
mm/kfence/core.c
mm/kfence/kfence.h
mm/kfence/report.c
mm/khugepaged.c
mm/list_lru.c
mm/maccess.c
mm/madvise.c

View File

@ -243,8 +243,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
}
if (state <= 0) {
pr_warn("mdsmap_decode got incorrect state(%s)\n",
ceph_mds_state_name(state));
dout("mdsmap_decode got incorrect state(%s)\n",
ceph_mds_state_name(state));
continue;
}

View File

@ -4007,6 +4007,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
if (cifs_sb->prepath == NULL)
return -ENOMEM;
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
}
return 0;

View File

@ -73,7 +73,7 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page);
* Generate the IV for the given logical block number within the given file.
* For filenames encryption, lblk_num == 0.
*
* Keep this in sync with fscrypt_limit_dio_pages(). fscrypt_limit_dio_pages()
* Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
* needs to know about any IV generation methods where the low bits of IV don't
* simply contain the lblk_num (e.g., IV_INO_LBLK_32).
*/

View File

@ -434,40 +434,43 @@ bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
/**
* fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs
* fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
* @inode: the file on which I/O is being done
* @pos: the file position (in bytes) at which the I/O is being done
* @nr_pages: the number of pages we want to submit starting at @pos
* @lblk: the block at which the I/O is being started from
* @nr_blocks: the number of blocks we want to submit starting at @pos
*
* For direct I/O: limit the number of pages that will be submitted in the bio
* targeting @pos, in order to avoid crossing a data unit number (DUN)
* discontinuity. This is only needed for certain IV generation methods.
* Determine the limit to the number of blocks that can be submitted in the bio
* targeting @pos without causing a data unit number (DUN) discontinuity.
*
* This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported().
* This is normally just @nr_blocks, as normally the DUNs just increment along
* with the logical blocks. (Or the file is not encrypted.)
*
* Return: the actual number of pages that can be submitted
* In rare cases, fscrypt can be using an IV generation method that allows the
* DUN to wrap around within logically continuous blocks, and that wraparound
* will occur. If this happens, a value less than @nr_blocks will be returned
* so that the wraparound doesn't occur in the middle of the bio.
*
* Return: the actual number of blocks that can be submitted
*/
int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages)
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
{
const struct fscrypt_info *ci = inode->i_crypt_info;
u32 dun;
if (!fscrypt_inode_uses_inline_crypto(inode))
return nr_pages;
return nr_blocks;
if (nr_pages <= 1)
return nr_pages;
if (nr_blocks <= 1)
return nr_blocks;
if (!(fscrypt_policy_flags(&ci->ci_policy) &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
return nr_pages;
if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE))
return 1;
return nr_blocks;
/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits);
dun = ci->ci_hashed_ino + lblk;
return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun);
return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
}
EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);

View File

@ -810,12 +810,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
address = pgoff_address(index, vma);
/*
* Note because we provide range to follow_pte_pmd it will
* call mmu_notifier_invalidate_range_start() on our behalf
* before taking any lock.
* follow_invalidate_pte() will use the range to call
* mmu_notifier_invalidate_range_start() on our behalf before
* taking any lock.
*/
if (follow_pte_pmd(vma->vm_mm, address, &range,
&ptep, &pmdp, &ptl))
if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
&pmdp, &ptl))
continue;
/*

View File

@ -3506,6 +3506,14 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (ret < 0)
return ret;
out:
/*
* When inline encryption is enabled, sometimes I/O to an encrypted file
* has to be broken up to guarantee DUN contiguity. Handle this by
* limiting the length of the mapping returned.
*/
map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
ext4_set_iomap(inode, iomap, &map, offset, length);
return 0;

View File

@ -259,7 +259,6 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
ret = nr_pages;
goto out;
}
nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
@ -316,7 +315,6 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
copied += n;
nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
iomap_dio_submit_bio(dio, iomap, bio, pos);
pos += n;
} while (nr_pages);

View File

@ -629,6 +629,12 @@ static int ntfs_read_locked_inode(struct inode *vi)
}
a = ctx->attr;
/* Get the standard information attribute value. */
if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
+ le32_to_cpu(a->data.resident.value_length) >
(u8 *)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
goto unm_err_out;
}
si = (STANDARD_INFORMATION*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));

View File

@ -323,6 +323,7 @@ enum device_link_state {
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
* INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@ -332,6 +333,7 @@ enum device_link_state {
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
#define DL_FLAG_INFERRED BIT(8)
/**
* enum dl_dev_state - Device driver presence tracking information.

View File

@ -588,6 +588,7 @@ dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
return !!attach->importer_ops;
}
int is_dma_buf_file(struct file *file);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *

View File

@ -611,8 +611,7 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio,
bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
int nr_pages);
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
@ -651,10 +650,10 @@ static inline bool fscrypt_dio_supported(struct kiocb *iocb,
return !fscrypt_needs_contents_encryption(inode);
}
static inline int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
int nr_pages)
static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
u64 nr_blocks)
{
return nr_pages;
return nr_blocks;
}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */

View File

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/err.h>
struct fwnode_operations;
struct device;
@ -18,9 +19,13 @@ struct device;
/*
* fwnode link flags
*
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
*/
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
#define FWNODE_FLAG_NOT_DEVICE BIT(1)
#define FWNODE_FLAG_INITIALIZED BIT(2)
struct fwnode_handle {
struct fwnode_handle *secondary;
@ -159,7 +164,20 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;
if (initialized)
fwnode->flags |= FWNODE_FLAG_INITIALIZED;
else
fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
}
extern u32 fw_devlink_get_flags(void);
extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);

View File

@ -1705,9 +1705,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range, pte_t **ptepp,
pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,

View File

@ -33,8 +33,6 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
@ -42,6 +40,8 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
extern void of_irq_init(const struct of_device_id *matches);
#ifdef CONFIG_OF_IRQ
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@ -57,6 +57,11 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
}
static inline int of_irq_count(struct device_node *dev)
{
return 0;

View File

@ -4642,6 +4642,19 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
return coredev->owner;
}
/**
* ibdev_to_node - return the NUMA node for a given ib_device
* @dev: device to get the NUMA node for.
*/
static inline int ibdev_to_node(struct ib_device *ibdev)
{
struct device *parent = ibdev->dev.parent;
if (!parent)
return NUMA_NO_NODE;
return dev_to_node(parent);
}
/**
* rdma_device_to_drv_device - Helper macro to reach back to driver's
* ib_device holder structure from device pointer.

View File

@ -10869,7 +10869,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
struct bpf_insn *patchlet;
struct bpf_insn chk_and_div[] = {
/* Rx div 0 -> 0 */
/* [R,W]x div 0 -> 0 */
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
BPF_JNE | BPF_K, insn->src_reg,
0, 2, 0),
@ -10878,16 +10878,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*insn,
};
struct bpf_insn chk_and_mod[] = {
/* Rx mod 0 -> Rx */
/* [R,W]x mod 0 -> [R,W]x */
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
BPF_JEQ | BPF_K, insn->src_reg,
0, 1, 0),
0, 1 + (is64 ? 0 : 1), 0),
*insn,
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
};
patchlet = isdiv ? chk_and_div : chk_and_mod;
cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
ARRAY_SIZE(chk_and_mod);
ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)

View File

@ -196,6 +196,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
}
fwnode_handle_get(fwnode);
fwnode_dev_initialized(fwnode, true);
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
@ -244,6 +245,7 @@ void irq_domain_remove(struct irq_domain *domain)
pr_debug("Removed domain %s\n", domain->name);
fwnode_dev_initialized(domain->fwnode, false);
fwnode_handle_put(domain->fwnode);
if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
kfree(domain->name);

View File

@ -786,9 +786,9 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
logbuf_lock_irq();
}
if (user->seq < prb_first_valid_seq(prb)) {
if (r->info->seq != user->seq) {
/* our last seen message is gone, return error and reset */
user->seq = prb_first_valid_seq(prb);
user->seq = r->info->seq;
ret = -EPIPE;
logbuf_unlock_irq();
goto out;
@ -863,6 +863,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
{
struct devkmsg_user *user = file->private_data;
struct printk_info info;
__poll_t ret = 0;
if (!user)
@ -871,9 +872,9 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
poll_wait(file, &log_wait, wait);
logbuf_lock_irq();
if (prb_read_valid(prb, user->seq, NULL)) {
if (prb_read_valid_info(prb, user->seq, &info, NULL)) {
/* return error when data has vanished underneath us */
if (user->seq < prb_first_valid_seq(prb))
if (info.seq != user->seq)
ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
else
ret = EPOLLIN|EPOLLRDNORM;
@ -1610,6 +1611,7 @@ static void syslog_clear(void)
int do_syslog(int type, char __user *buf, int len, int source)
{
struct printk_info info;
bool clear = false;
static int saved_console_loglevel = LOGLEVEL_DEFAULT;
int error;
@ -1680,9 +1682,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
logbuf_lock_irq();
if (syslog_seq < prb_first_valid_seq(prb)) {
if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
/* No unread messages. */
logbuf_unlock_irq();
return 0;
}
if (info.seq != syslog_seq) {
/* messages are gone, move to first one */
syslog_seq = prb_first_valid_seq(prb);
syslog_seq = info.seq;
syslog_partial = 0;
}
if (source == SYSLOG_FROM_PROC) {
@ -1694,7 +1701,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
error = prb_next_seq(prb) - syslog_seq;
} else {
bool time = syslog_partial ? syslog_time : printk_time;
struct printk_info info;
unsigned int line_count;
u64 seq;
@ -3389,9 +3395,11 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
goto out;
logbuf_lock_irqsave(flags);
if (dumper->cur_seq < prb_first_valid_seq(prb)) {
/* messages are gone, move to first available one */
dumper->cur_seq = prb_first_valid_seq(prb);
if (prb_read_valid_info(prb, dumper->cur_seq, &info, NULL)) {
if (info.seq != dumper->cur_seq) {
/* messages are gone, move to first available one */
dumper->cur_seq = info.seq;
}
}
/* last entry */

View File

@ -2627,6 +2627,9 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
if (wake_flags & WF_SYNC)
en_flags |= ENQUEUE_WAKEUP_SYNC;
lockdep_assert_held(&rq->lock);
if (p->sched_contributes_to_load)

View File

@ -1372,6 +1372,27 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
enqueue_top_rt_rq(&rq->rt);
}
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
/*
* If the waker is CFS, then an RT sync wakeup would preempt the waker
* and force it to run for a likely small time after the RT wakee is
* done. So, only honor RT sync wakeups from RT wakers.
*/
return sync && task_has_rt_policy(rq->curr) &&
p->prio <= rq->rt.highest_prio.next &&
rq->rt.rt_nr_running <= 2;
}
#else
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
return 0;
}
#endif
/*
* Adding/removing a task to/from a priority array:
*/
@ -1379,13 +1400,15 @@ static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
!should_honor_rt_sync(rq, p, sync))
enqueue_pushable_task(rq, p);
}
@ -1462,9 +1485,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
struct rq *this_cpu_rq;
bool test;
int target_cpu = -1;
bool may_not_preempt;
bool sync = !!(flags & WF_SYNC);
int this_cpu;
trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
flags, &target_cpu);
@ -1479,6 +1505,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
this_cpu = smp_processor_id();
this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is a softirq task,
@ -1516,6 +1544,15 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
(unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
/*
* Respect the sync flag as long as the task can run on this CPU.
*/
if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
cpu = this_cpu;
goto out_unlock;
}
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);

View File

@ -1785,6 +1785,8 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_MIGRATED 0x00
#endif
#define ENQUEUE_WAKEUP_SYNC 0x80
#define RETRY_TASK ((void *)-1UL)
struct sched_class {

View File

@ -16,8 +16,7 @@
#include "../tools/testing/selftests/kselftest_module.h"
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;

View File

@ -30,11 +30,13 @@
#define PAD_SIZE 16
#define FILL_CHAR '$'
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
KSTM_MODULE_GLOBALS();
static char *test_buffer __initdata;
static char *alloced_buffer __initdata;
extern bool no_hash_pointers;
static int __printf(4, 0) __init
do_test(int bufsize, const char *expect, int elen,
const char *fmt, va_list ap)
@ -301,6 +303,12 @@ plain(void)
{
int err;
if (no_hash_pointers) {
pr_warn("skipping plain 'p' tests");
skipped_tests += 2;
return;
}
err = plain_hash();
if (err) {
pr_warn("plain 'p' does not appear to be hashed\n");

View File

@ -2090,6 +2090,32 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
/* Disable pointer hashing if requested */
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
static int __init no_hash_pointers_enable(char *str)
{
no_hash_pointers = true;
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** This system shows unhashed kernel memory addresses **\n");
pr_warn("** via the console, logs, and other interfaces. This **\n");
pr_warn("** might reduce the security of your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging **\n");
pr_warn("** the kernel, report this immediately to your system **\n");
pr_warn("** administrator! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
return 0;
}
early_param("no_hash_pointers", no_hash_pointers_enable);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@ -2297,8 +2323,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
}
/* default is to _not_ leak addresses, hash before printing */
return ptr_to_id(buf, end, ptr, spec);
/*
* default is to _not_ leak addresses, so hash before printing,
* unless no_hash_pointers is specified on the command line.
*/
if (unlikely(no_hash_pointers))
return pointer_string(buf, end, ptr, spec);
else
return ptr_to_id(buf, end, ptr, spec);
}
/*

View File

@ -5146,9 +5146,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range, pte_t **ptepp,
pmd_t **pmdpp, spinlock_t **ptlp)
{
pgd_t *pgd;
p4d_t *p4d;
@ -5213,31 +5213,33 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
return -EINVAL;
}
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
/**
* follow_pte - look up PTE at a user virtual address
* @mm: the mm_struct of the target address space
* @address: user virtual address
* @ptepp: location to store found PTE
* @ptlp: location to store the lock for the PTE
*
* On a successful return, the pointer to the PTE is stored in @ptepp;
* the corresponding lock is taken and its location is stored in @ptlp.
* The contents of the PTE are only stable until @ptlp is released;
* any further use, if any, must be protected against invalidation
* with MMU notifiers.
*
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
* should be taken for read.
*
* KVM uses this function. While it is arguably less bad than ``follow_pfn``,
* it is not a good general-purpose API.
*
* Return: zero on success, -ve otherwise.
*/
int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, NULL,
ptepp, NULL, ptlp)));
return res;
return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
}
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, range,
ptepp, pmdpp, ptlp)));
return res;
}
EXPORT_SYMBOL(follow_pte_pmd);
EXPORT_SYMBOL_GPL(follow_pte);
/**
* follow_pfn - look up PFN at a user virtual address
@ -5247,6 +5249,9 @@ EXPORT_SYMBOL(follow_pte_pmd);
*
* Only IO mappings and raw PFN mappings are allowed.
*
* This function does not allow the caller to read the permissions
* of the PTE. Do not use it.
*
* Return: zero and the pfn at @pfn on success, -ve otherwise.
*/
int follow_pfn(struct vm_area_struct *vma, unsigned long address,

View File

@ -72,6 +72,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
return z;
}
EXPORT_SYMBOL_GPL(__next_zones_zonelist);
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
bool memmap_valid_within(unsigned long pfn,
@ -130,3 +131,4 @@ enum zone_type gfp_zone(gfp_t flags)
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
EXPORT_SYMBOL_GPL(gfp_zone);

View File

@ -3729,6 +3729,7 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES));
}
EXPORT_SYMBOL_GPL(zone_watermark_ok);
static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
@ -3781,6 +3782,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
free_pages);
}
EXPORT_SYMBOL_GPL(zone_watermark_ok_safe);
#ifdef CONFIG_NUMA
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)

View File

@ -264,13 +264,6 @@ struct rds_ib_device {
int *vector_load;
};
static inline int ibdev_to_node(struct ib_device *ibdev)
{
struct device *parent;
parent = ibdev->dev.parent;
return parent ? dev_to_node(parent) : NUMA_NO_NODE;
}
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
/* bits for i_ack_flags */

View File

@ -43,6 +43,9 @@ EOT
sed 's/ko$/mod/' $modlist |
xargs -n1 sed -n -e '2{s/ /\n/g;/^$/!p;}' -- |
cat - "$ksym_wl" |
# Remove the dot prefix for ppc64; symbol names with a dot (.) hold entry
# point addresses.
sed -e 's/^\.//' |
sort -u |
sed -e 's/\(.*\)/#define __KSYM_\1 1/' >> "$output_file"

View File

@ -268,7 +268,11 @@ if ($arch eq "x86_64") {
# force flags for this arch
$ld .= " -m shlelf_linux";
$objcopy .= " -O elf32-sh-linux";
if ($endian eq "big") {
$objcopy .= " -O elf32-shbig-linux";
} else {
$objcopy .= " -O elf32-sh-linux";
}
} elsif ($arch eq "powerpc") {
my $ldemulation;

View File

@ -11,7 +11,8 @@
#define KSTM_MODULE_GLOBALS() \
static unsigned int total_tests __initdata; \
static unsigned int failed_tests __initdata
static unsigned int failed_tests __initdata; \
static unsigned int skipped_tests __initdata
#define KSTM_CHECK_ZERO(x) do { \
total_tests++; \
@ -21,11 +22,16 @@ static unsigned int failed_tests __initdata
} \
} while (0)
static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests)
static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests,
unsigned int skipped_tests)
{
if (failed_tests == 0)
pr_info("all %u tests passed\n", total_tests);
else
if (failed_tests == 0) {
if (skipped_tests) {
pr_info("skipped %u tests\n", skipped_tests);
pr_info("remaining %u tests passed\n", total_tests);
} else
pr_info("all %u tests passed\n", total_tests);
} else
pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
return failed_tests ? -EINVAL : 0;
@ -36,7 +42,7 @@ static int __init __module##_init(void) \
{ \
pr_info("loaded.\n"); \
selftest(); \
return kstm_report(total_tests, failed_tests); \
return kstm_report(total_tests, failed_tests, skipped_tests); \
} \
static void __exit __module##_exit(void) \
{ \

View File

@ -1888,10 +1888,12 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
bool write_fault, bool *writable,
kvm_pfn_t *p_pfn)
{
unsigned long pfn;
kvm_pfn_t pfn;
pte_t *ptep;
spinlock_t *ptl;
int r;
r = follow_pfn(vma, addr, &pfn);
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
@ -1906,14 +1908,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
if (r)
return r;
r = follow_pfn(vma, addr, &pfn);
r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
if (r)
return r;
}
if (write_fault && !pte_write(*ptep)) {
pfn = KVM_PFN_ERR_RO_FAULT;
goto out;
}
if (writable)
*writable = true;
*writable = pte_write(*ptep);
pfn = pte_pfn(*ptep);
/*
* Get a reference here because callers of *hva_to_pfn* and
@ -1928,6 +1935,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
*/
kvm_get_pfn(pfn);
out:
pte_unmap_unlock(ptep, ptl);
*p_pfn = pfn;
return 0;
}