Merge "Merge keystone/android12-5.10-keystone-qcom-release.81+ (613c3eb
) into msm-5.10"
This commit is contained in:
commit
30721d2cd2
@ -92,12 +92,18 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1349291 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | MMU-500 | #841119,826419 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
@ -7,7 +7,7 @@ incfs: A stacked incremental filesystem for Linux
|
||||
/sys/fs interface
|
||||
=================
|
||||
|
||||
Please update Documentation/ABI/testing/sys-fs-incfs if you update this
|
||||
Please update Documentation/ABI/testing/sysfs-fs-incfs if you update this
|
||||
section.
|
||||
|
||||
incfs creates the following files in /sys/fs.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -549,6 +549,7 @@
|
||||
divider_ro_round_rate_parent
|
||||
divider_round_rate_parent
|
||||
dma_alloc_attrs
|
||||
dma_alloc_noncoherent
|
||||
dma_async_device_register
|
||||
dma_async_device_unregister
|
||||
dma_async_tx_descriptor_init
|
||||
@ -584,6 +585,7 @@
|
||||
dma_fence_signal_timestamp_locked
|
||||
dma_fence_wait_timeout
|
||||
dma_free_attrs
|
||||
dma_free_noncoherent
|
||||
dma_get_sgtable_attrs
|
||||
dma_get_slave_channel
|
||||
dma_heap_add
|
||||
@ -2512,6 +2514,7 @@
|
||||
__traceiter_android_rvh_force_compatible_post
|
||||
__traceiter_android_rvh_force_compatible_pre
|
||||
__traceiter_android_rvh_gic_v3_set_affinity
|
||||
__traceiter_android_rvh_iommu_setup_dma_ops
|
||||
__traceiter_android_rvh_irqs_disable
|
||||
__traceiter_android_rvh_irqs_enable
|
||||
__traceiter_android_rvh_migrate_queued_task
|
||||
@ -2625,6 +2628,7 @@
|
||||
__tracepoint_android_rvh_force_compatible_post
|
||||
__tracepoint_android_rvh_force_compatible_pre
|
||||
__tracepoint_android_rvh_gic_v3_set_affinity
|
||||
__tracepoint_android_rvh_iommu_setup_dma_ops
|
||||
__tracepoint_android_rvh_irqs_disable
|
||||
__tracepoint_android_rvh_irqs_enable
|
||||
__tracepoint_android_rvh_migrate_queued_task
|
||||
|
@ -669,19 +669,46 @@ config ARM64_ERRATUM_1508412
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1974925
|
||||
bool "Kryo 7XX: 1974925: Incorrect read value for Performance Monitors Common Event Identification Register"
|
||||
default y
|
||||
depends on HW_PERF_EVENTS
|
||||
config ARM64_ERRATUM_2051678
|
||||
bool "Cortex-A510: 2051678: disable Hardware Update of the page table's dirty bit"
|
||||
help
|
||||
This option adds a workaround for QCOM Kryo erratum 1974925.
|
||||
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
|
||||
Affected Coretex-A510 might not respect the ordering rules for
|
||||
hardware update of the page table's dirty bit. The workaround
|
||||
is to not enable the feature on affected CPUs.
|
||||
|
||||
Affected cores return an incorrect value of the AArch64 System
|
||||
Register Performance Monitors Common Event Identification Register 0
|
||||
(PMCEID0_EL0) in which bits corresponding to those events that are
|
||||
fully implemented are not set, which is incorrect.
|
||||
If unsure, say Y.
|
||||
|
||||
Work around this issue by manually setting those bits to true.
|
||||
config ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
bool
|
||||
|
||||
config ARM64_ERRATUM_2054223
|
||||
bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace"
|
||||
default y
|
||||
select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
help
|
||||
Enable workaround for ARM Cortex-A710 erratum 2054223
|
||||
|
||||
Affected cores may fail to flush the trace data on a TSB instruction, when
|
||||
the PE is in trace prohibited state. This will cause losing a few bytes
|
||||
of the trace cached.
|
||||
|
||||
Workaround is to issue two TSB consecutively on affected cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2067961
|
||||
bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace"
|
||||
default y
|
||||
select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
help
|
||||
Enable workaround for ARM Neoverse-N2 erratum 2067961
|
||||
|
||||
Affected cores may fail to flush the trace data on a TSB instruction, when
|
||||
the PE is in trace prohibited state. This will cause losing a few bytes
|
||||
of the trace cached.
|
||||
|
||||
Workaround is to issue two TSB consecutively on affected cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#define psb_csync() asm volatile("hint #17" : : : "memory")
|
||||
#define tsb_csync() asm volatile("hint #18" : : : "memory")
|
||||
#define __tsb_csync() asm volatile("hint #18" : : : "memory")
|
||||
#define csdb() asm volatile("hint #20" : : : "memory")
|
||||
|
||||
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
|
||||
@ -50,6 +50,20 @@
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
|
||||
#define tsb_csync() \
|
||||
do { \
|
||||
/* \
|
||||
* CPUs affected by Arm Erratum 2054223 or 2067961 needs \
|
||||
* another TSB to ensure the trace is flushed. The barriers \
|
||||
* don't have to be strictly back to back, as long as the \
|
||||
* CPU is in trace prohibited state. \
|
||||
*/ \
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
|
||||
__tsb_csync(); \
|
||||
__tsb_csync(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
|
||||
* and 0 otherwise.
|
||||
|
@ -69,6 +69,7 @@
|
||||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
|
||||
|
||||
/* kabi: reserve 62 - 76 for future cpu capabilities */
|
||||
#define ARM64_NCAPS 76
|
||||
|
@ -72,6 +72,9 @@
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
@ -91,9 +94,6 @@
|
||||
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
|
||||
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
|
||||
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
|
||||
#define QCOM_CPU_PART_KRYO_7XX_SILVER 0xD46
|
||||
#define QCOM_CPU_PART_KRYO_7XX_GOLD 0xD47
|
||||
#define QCOM_CPU_PART_KRYO_7XX_GOLD_PLUS 0xD48
|
||||
|
||||
#define NVIDIA_CPU_PART_DENVER 0x003
|
||||
#define NVIDIA_CPU_PART_CARMEL 0x004
|
||||
@ -112,6 +112,9 @@
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
@ -126,11 +129,6 @@
|
||||
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_7XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_7XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_7XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_7XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_7XX_GOLD_PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, \
|
||||
QCOM_CPU_PART_KRYO_7XX_GOLD_PLUS)
|
||||
|
||||
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
|
||||
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
||||
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
|
||||
|
@ -342,6 +342,18 @@ static const struct midr_range erratum_1463225[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
static const struct midr_range tsb_flush_fail_cpus[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2067961
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2054223
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
@ -527,6 +539,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
0, 0,
|
||||
1, 0),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
{
|
||||
.desc = "ARM erratum 2067961 or 2054223",
|
||||
.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
|
||||
ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@ -1599,9 +1599,9 @@ static bool cpu_has_broken_dbm(void)
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
/* Kryo4xx Silver (rdpe => r1p0) */
|
||||
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_7XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_7XX_GOLD),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_7XX_GOLD_PLUS),
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2051678
|
||||
MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
@ -24,9 +24,6 @@
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#define ERRATUM_1974925_MASK (BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(24) | BIT(25) \
|
||||
| BIT(26) | BIT(27))
|
||||
|
||||
/* ARMv8 Cortex-A53 specific event types. */
|
||||
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
|
||||
|
||||
@ -1017,13 +1014,6 @@ struct armv8pmu_probe_info {
|
||||
bool present;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1974925
|
||||
static void armv8pmu_overwrite_pmceid0_el0(u64 *val)
|
||||
{
|
||||
(*val) |= (ERRATUM_1974925_MASK | (ERRATUM_1974925_MASK << 32));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __armv8pmu_probe_pmu(void *info)
|
||||
{
|
||||
struct armv8pmu_probe_info *probe = info;
|
||||
@ -1049,11 +1039,7 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
/* Add the CPU cycles counter */
|
||||
cpu_pmu->num_events += 1;
|
||||
|
||||
pmceid_raw[0] = read_sysreg(pmceid0_el0);
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1974925
|
||||
armv8pmu_overwrite_pmceid0_el0(&pmceid_raw[0]);
|
||||
#endif
|
||||
pmceid[0] = pmceid_raw[0];
|
||||
pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
|
||||
pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
|
||||
|
||||
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
|
||||
|
@ -53,6 +53,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
if (iommu) {
|
||||
iommu_setup_dma_ops(dev, dma_base, size);
|
||||
trace_android_vh_iommu_setup_dma_ops(dev, dma_base, size);
|
||||
trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
@ -673,7 +673,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
||||
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
|
||||
|
||||
if (buffer->async_transaction) {
|
||||
alloc->free_async_space += size + sizeof(struct binder_buffer);
|
||||
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
|
||||
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
|
||||
"%d: binder_free_buf size %zd async free %zd\n",
|
||||
|
@ -192,6 +192,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_alloc_iova);
|
||||
|
@ -245,8 +245,10 @@ struct etm4_enable_arg {
|
||||
*/
|
||||
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
if (drvdata->trfcr)
|
||||
cpu_prohibit_trace();
|
||||
/* If the CPU doesn't support FEAT_TRF, nothing to do */
|
||||
if (!drvdata->trfcr)
|
||||
return;
|
||||
cpu_prohibit_trace();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -919,8 +919,10 @@ struct etmv4_save_state {
|
||||
* @nooverflow: Indicate if overflow prevention is supported.
|
||||
* @atbtrig: If the implementation can support ATB triggers
|
||||
* @lpoverride: If the implementation can support low-power state over.
|
||||
* @trfcr: If the CPU supportfs FEAT_TRF, value of the TRFCR_ELx with
|
||||
* trace allowed at user and kernel ELs. Otherwise, 0.
|
||||
* @trfcr: If the CPU supports FEAT_TRF, value of the TRFCR_ELx that
|
||||
* allows tracing at all ELs. We don't want to compute this
|
||||
* at runtime, due to the additional setting of TRFCR_CX when
|
||||
* in EL2. Otherwise, 0.
|
||||
* @config: structure holding configuration parameters.
|
||||
* @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event.
|
||||
* @save_state: State to be preserved across power loss
|
||||
|
@ -1,5 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
/*
|
||||
* Arm v8 Self-Hosted trace support.
|
||||
*
|
||||
@ -29,4 +28,4 @@ static inline void cpu_prohibit_trace(void)
|
||||
/* Prohibit tracing at EL0 & the kernel EL */
|
||||
write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
|
||||
}
|
||||
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
|
||||
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
|
||||
|
@ -664,8 +664,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
|
||||
if (!flat_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
|
||||
&flat_buf->daddr, GFP_KERNEL);
|
||||
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
|
||||
&flat_buf->daddr,
|
||||
DMA_FROM_DEVICE, GFP_KERNEL);
|
||||
if (!flat_buf->vaddr) {
|
||||
kfree(flat_buf);
|
||||
return -ENOMEM;
|
||||
@ -686,14 +687,18 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
|
||||
if (flat_buf && flat_buf->daddr) {
|
||||
struct device *real_dev = flat_buf->dev->parent;
|
||||
|
||||
dma_free_coherent(real_dev, flat_buf->size,
|
||||
flat_buf->vaddr, flat_buf->daddr);
|
||||
dma_free_noncoherent(real_dev, etr_buf->size,
|
||||
flat_buf->vaddr, flat_buf->daddr,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
kfree(flat_buf);
|
||||
}
|
||||
|
||||
static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
||||
{
|
||||
struct etr_flat_buf *flat_buf = etr_buf->private;
|
||||
struct device *real_dev = flat_buf->dev->parent;
|
||||
|
||||
/*
|
||||
* Adjust the buffer to point to the beginning of the trace data
|
||||
* and update the available trace data.
|
||||
@ -703,6 +708,19 @@ static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
||||
etr_buf->len = etr_buf->size;
|
||||
else
|
||||
etr_buf->len = rwp - rrp;
|
||||
|
||||
/*
|
||||
* The driver always starts tracing at the beginning of the buffer,
|
||||
* the only reason why we would get a wrap around is when the buffer
|
||||
* is full. Sync the entire buffer in one go for this case.
|
||||
*/
|
||||
if (etr_buf->offset + etr_buf->len > etr_buf->size)
|
||||
dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
|
||||
etr_buf->size, DMA_FROM_DEVICE);
|
||||
else
|
||||
dma_sync_single_for_cpu(real_dev,
|
||||
flat_buf->daddr + etr_buf->offset,
|
||||
etr_buf->len, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
|
||||
@ -1678,6 +1696,14 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
|
||||
*/
|
||||
if (etr_perf->snapshot)
|
||||
handle->head += size;
|
||||
|
||||
/*
|
||||
* Ensure that the AUX trace data is visible before the aux_head
|
||||
* is updated via perf_aux_output_end(), as expected by the
|
||||
* perf ring buffer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
out:
|
||||
/*
|
||||
* Don't set the TRUNCATED flag in snapshot mode because 1) the
|
||||
|
@ -869,6 +869,10 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
|
||||
if (WARN_ON(trbe_csdev))
|
||||
return;
|
||||
|
||||
/* If the TRBE was not probed on the CPU, we shouldn't be here */
|
||||
if (WARN_ON(!cpudata->drvdata))
|
||||
return;
|
||||
|
||||
dev = &cpudata->drvdata->pdev->dev;
|
||||
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
|
||||
if (!desc.name)
|
||||
@ -950,7 +954,9 @@ static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_cpu(cpu, &drvdata->supported_cpus) {
|
||||
smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
|
||||
/* If we fail to probe the CPU, let us defer it to hotplug callbacks */
|
||||
if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
|
||||
continue;
|
||||
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
|
||||
arm_trbe_register_coresight_cpu(drvdata, cpu);
|
||||
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
|
||||
|
@ -1,11 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
* Copyright (c) 2015-2017, 2019-2021 Linaro Limited
|
||||
*/
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
@ -28,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
|
||||
}
|
||||
}
|
||||
|
||||
static void tee_shm_release(struct tee_shm *shm)
|
||||
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
|
||||
{
|
||||
struct tee_device *teedev = shm->ctx->teedev;
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
|
||||
@ -64,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
|
||||
tee_device_put(teedev);
|
||||
}
|
||||
|
||||
static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
|
||||
*attach, enum dma_data_direction dir)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void tee_shm_op_release(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct tee_shm *shm = dmabuf->priv;
|
||||
|
||||
tee_shm_release(shm);
|
||||
}
|
||||
|
||||
static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct tee_shm *shm = dmabuf->priv;
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
return -EINVAL;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
size, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops tee_shm_dma_buf_ops = {
|
||||
.map_dma_buf = tee_shm_op_map_dma_buf,
|
||||
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
|
||||
.release = tee_shm_op_release,
|
||||
.mmap = tee_shm_op_mmap,
|
||||
};
|
||||
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
@ -137,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
refcount_set(&shm->refcount, 1);
|
||||
shm->flags = flags | TEE_SHM_POOL;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
@ -150,10 +104,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
@ -161,28 +112,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
exp_info.priv = shm;
|
||||
|
||||
shm->dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(shm->dmabuf)) {
|
||||
ret = ERR_CAST(shm->dmabuf);
|
||||
goto err_rem;
|
||||
}
|
||||
}
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
|
||||
return shm;
|
||||
err_rem:
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
err_pool_free:
|
||||
poolm->ops->free(poolm, shm);
|
||||
err_kfree:
|
||||
@ -243,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
goto err;
|
||||
}
|
||||
|
||||
refcount_set(&shm->refcount, 1);
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
@ -303,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
exp_info.priv = shm;
|
||||
|
||||
shm->dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(shm->dmabuf)) {
|
||||
ret = ERR_CAST(shm->dmabuf);
|
||||
teedev->desc->ops->shm_unregister(ctx, shm);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
@ -336,6 +255,35 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register);
|
||||
|
||||
static int tee_shm_fop_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
tee_shm_put(filp->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct tee_shm *shm = filp->private_data;
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
return -EINVAL;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
size, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static const struct file_operations tee_shm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = tee_shm_fop_release,
|
||||
.mmap = tee_shm_fop_mmap,
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_get_fd() - Increase reference count and return file descriptor
|
||||
* @shm: Shared memory handle
|
||||
@ -348,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
|
||||
if (!(shm->flags & TEE_SHM_DMA_BUF))
|
||||
return -EINVAL;
|
||||
|
||||
get_dma_buf(shm->dmabuf);
|
||||
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
|
||||
/* matched by tee_shm_put() in tee_shm_op_release() */
|
||||
refcount_inc(&shm->refcount);
|
||||
fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
|
||||
if (fd < 0)
|
||||
dma_buf_put(shm->dmabuf);
|
||||
tee_shm_put(shm);
|
||||
return fd;
|
||||
}
|
||||
|
||||
@ -361,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
|
||||
*/
|
||||
void tee_shm_free(struct tee_shm *shm)
|
||||
{
|
||||
/*
|
||||
* dma_buf_put() decreases the dmabuf reference counter and will
|
||||
* call tee_shm_release() when the last reference is gone.
|
||||
*
|
||||
* In the case of driver private memory we call tee_shm_release
|
||||
* directly instead as it doesn't have a reference counter.
|
||||
*/
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
dma_buf_put(shm->dmabuf);
|
||||
else
|
||||
tee_shm_release(shm);
|
||||
tee_shm_put(shm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_free);
|
||||
|
||||
@ -478,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
|
||||
teedev = ctx->teedev;
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm = idr_find(&teedev->idr, id);
|
||||
/*
|
||||
* If the tee_shm was found in the IDR it must have a refcount
|
||||
* larger than 0 due to the guarantee in tee_shm_put() below. So
|
||||
* it's safe to use refcount_inc().
|
||||
*/
|
||||
if (!shm || shm->ctx != ctx)
|
||||
shm = ERR_PTR(-EINVAL);
|
||||
else if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
get_dma_buf(shm->dmabuf);
|
||||
else
|
||||
refcount_inc(&shm->refcount);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return shm;
|
||||
}
|
||||
@ -493,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
|
||||
*/
|
||||
void tee_shm_put(struct tee_shm *shm)
|
||||
{
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
dma_buf_put(shm->dmabuf);
|
||||
struct tee_device *teedev = shm->ctx->teedev;
|
||||
bool do_release = false;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
if (refcount_dec_and_test(&shm->refcount)) {
|
||||
/*
|
||||
* refcount has reached 0, we must now remove it from the
|
||||
* IDR before releasing the mutex. This will guarantee that
|
||||
* the refcount_inc() in tee_shm_get_from_id() never starts
|
||||
* from 0.
|
||||
*/
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
do_release = true;
|
||||
}
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (do_release)
|
||||
tee_shm_release(teedev, shm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_put);
|
||||
|
@ -175,6 +175,7 @@ void incfs_free_mount_info(struct mount_info *mi)
|
||||
kfree(mi->pseudo_file_xattr[i].data);
|
||||
kfree(mi->mi_per_uid_read_timeouts);
|
||||
incfs_free_sysfs_node(mi->mi_sysfs_node);
|
||||
kfree(mi->mi_options.sysfs_name);
|
||||
kfree(mi);
|
||||
}
|
||||
|
||||
|
@ -147,8 +147,12 @@ static long ioctl_permit_fill(struct file *f, void __user *arg)
|
||||
return -EFAULT;
|
||||
|
||||
file = fget(permit_fill.file_descriptor);
|
||||
if (IS_ERR(file))
|
||||
if (IS_ERR_OR_NULL(file)) {
|
||||
if (!file)
|
||||
return -ENOENT;
|
||||
|
||||
return PTR_ERR(file);
|
||||
}
|
||||
|
||||
if (file->f_op != &incfs_file_ops) {
|
||||
error = -EPERM;
|
||||
|
@ -393,7 +393,7 @@ static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
|
||||
struct mount_info *mi = get_mount_info(file_superblock(file));
|
||||
bool root;
|
||||
|
||||
if (!dir) {
|
||||
if (!dir || !mi) {
|
||||
error = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
@ -1336,6 +1336,9 @@ static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *trap;
|
||||
int error = 0;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
|
||||
if (error)
|
||||
return error;
|
||||
@ -1664,6 +1667,9 @@ static ssize_t incfs_getxattr(struct dentry *d, const char *name,
|
||||
size_t stored_size;
|
||||
int i;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
if (di && di->backing_path.dentry)
|
||||
return vfs_getxattr(di->backing_path.dentry, name, value, size);
|
||||
|
||||
@ -1698,6 +1704,9 @@ static ssize_t incfs_setxattr(struct dentry *d, const char *name,
|
||||
size_t *stored_size;
|
||||
int i;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
if (di && di->backing_path.dentry)
|
||||
return vfs_setxattr(di->backing_path.dentry, name, value, size,
|
||||
flags);
|
||||
@ -1736,6 +1745,11 @@ static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
|
||||
return vfs_listxattr(di->backing_path.dentry, list, size);
|
||||
}
|
||||
|
||||
static int incfs_test_super(struct super_block *s, void *p)
|
||||
{
|
||||
return s->s_fs_info != NULL;
|
||||
}
|
||||
|
||||
struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
const char *dev_name, void *data)
|
||||
{
|
||||
@ -1746,7 +1760,8 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
struct dentry *incomplete_dir = NULL;
|
||||
struct super_block *src_fs_sb = NULL;
|
||||
struct inode *root_inode = NULL;
|
||||
struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL);
|
||||
struct super_block *sb = sget(type, incfs_test_super, set_anon_super,
|
||||
flags, NULL);
|
||||
int error = 0;
|
||||
|
||||
if (IS_ERR(sb))
|
||||
@ -1787,13 +1802,18 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
src_fs_sb = backing_dir_path.dentry->d_sb;
|
||||
sb->s_maxbytes = src_fs_sb->s_maxbytes;
|
||||
|
||||
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
|
||||
if (!sb->s_fs_info) {
|
||||
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
|
||||
|
||||
if (IS_ERR_OR_NULL(mi)) {
|
||||
error = PTR_ERR(mi);
|
||||
pr_err("incfs: Error allocating mount info. %d\n", error);
|
||||
mi = NULL;
|
||||
goto err;
|
||||
if (IS_ERR_OR_NULL(mi)) {
|
||||
error = PTR_ERR(mi);
|
||||
pr_err("incfs: Error allocating mount info. %d\n", error);
|
||||
mi = NULL;
|
||||
goto err;
|
||||
}
|
||||
sb->s_fs_info = mi;
|
||||
} else {
|
||||
mi = sb->s_fs_info;
|
||||
}
|
||||
|
||||
index_dir = open_or_create_special_dir(backing_dir_path.dentry,
|
||||
@ -1818,21 +1838,22 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
}
|
||||
mi->mi_incomplete_dir = incomplete_dir;
|
||||
|
||||
sb->s_fs_info = mi;
|
||||
root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
|
||||
if (IS_ERR(root_inode)) {
|
||||
error = PTR_ERR(root_inode);
|
||||
goto err;
|
||||
}
|
||||
|
||||
sb->s_root = d_make_root(root_inode);
|
||||
if (!sb->s_root) {
|
||||
error = -ENOMEM;
|
||||
goto err;
|
||||
sb->s_root = d_make_root(root_inode);
|
||||
if (!sb->s_root) {
|
||||
error = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
|
||||
if (error)
|
||||
goto err;
|
||||
}
|
||||
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
path_put(&backing_dir_path);
|
||||
sb->s_flags |= SB_ACTIVE;
|
||||
@ -1854,6 +1875,9 @@ static int incfs_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
struct mount_info *mi = get_mount_info(sb);
|
||||
int err = 0;
|
||||
|
||||
if (!mi)
|
||||
return err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
err = parse_options(&options, (char *)data);
|
||||
if (err)
|
||||
@ -1883,12 +1907,16 @@ void incfs_kill_sb(struct super_block *sb)
|
||||
pr_debug("incfs: unmount\n");
|
||||
generic_shutdown_super(sb);
|
||||
incfs_free_mount_info(mi);
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
static int show_options(struct seq_file *m, struct dentry *root)
|
||||
{
|
||||
struct mount_info *mi = get_mount_info(root->d_sb);
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
|
||||
seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
|
||||
if (mi->mi_options.read_log_pages != 0) {
|
||||
|
@ -19,7 +19,6 @@ static inline struct mount_info *get_mount_info(struct super_block *sb)
|
||||
{
|
||||
struct mount_info *result = sb->s_fs_info;
|
||||
|
||||
WARN_ON(!result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
|
||||
* @offset: offset of buffer in user space
|
||||
* @pages: locked pages from userspace
|
||||
* @num_pages: number of locked pages
|
||||
* @dmabuf: dmabuf used to for exporting to user space
|
||||
* @refcount: reference counter
|
||||
* @flags: defined by TEE_SHM_* in tee_drv.h
|
||||
* @id: unique id of a shared memory object on this device
|
||||
*
|
||||
@ -210,7 +210,7 @@ struct tee_shm {
|
||||
unsigned int offset;
|
||||
struct page **pages;
|
||||
size_t num_pages;
|
||||
struct dma_buf *dmabuf;
|
||||
refcount_t refcount;
|
||||
u32 flags;
|
||||
int id;
|
||||
};
|
||||
|
@ -12,6 +12,10 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops,
|
||||
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
|
||||
TP_ARGS(dev, dma_base, size), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_iommu_setup_dma_ops,
|
||||
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
|
||||
TP_ARGS(dev, dma_base, size));
|
||||
|
@ -92,7 +92,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
|
||||
int i;
|
||||
struct hlist_head *hash;
|
||||
|
||||
hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
|
||||
hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
|
||||
if (hash != NULL)
|
||||
for (i = 0; i < entries; i++)
|
||||
INIT_HLIST_HEAD(&hash[i]);
|
||||
@ -153,7 +153,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
|
||||
|
||||
spin_lock_init(&dtab->index_lock);
|
||||
} else {
|
||||
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
|
||||
dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
|
||||
sizeof(struct bpf_dtab_netdev *),
|
||||
dtab->map.numa_node);
|
||||
if (!dtab->netdev_map)
|
||||
|
@ -474,8 +474,10 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
|
||||
* state has been re-checked. A memcpy() for all of @desc
|
||||
* cannot be used because of the atomic_t @state_var field.
|
||||
*/
|
||||
memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
|
||||
sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
|
||||
if (desc_out) {
|
||||
memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
|
||||
sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
|
||||
}
|
||||
if (seq_out)
|
||||
*seq_out = info->seq; /* also part of desc_read:C */
|
||||
if (caller_id_out)
|
||||
@ -528,7 +530,8 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
|
||||
state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
|
||||
d_state = get_desc_state(id, state_val);
|
||||
out:
|
||||
atomic_long_set(&desc_out->state_var, state_val);
|
||||
if (desc_out)
|
||||
atomic_long_set(&desc_out->state_var, state_val);
|
||||
return d_state;
|
||||
}
|
||||
|
||||
@ -1450,6 +1453,9 @@ static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
|
||||
|
||||
atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
|
||||
DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
|
||||
|
||||
/* Best effort to remember the last finalized @id. */
|
||||
atomic_long_set(&desc_ring->last_finalized_id, id);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1659,7 +1665,12 @@ void prb_commit(struct prb_reserved_entry *e)
|
||||
*/
|
||||
void prb_final_commit(struct prb_reserved_entry *e)
|
||||
{
|
||||
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
|
||||
|
||||
_prb_commit(e, desc_finalized);
|
||||
|
||||
/* Best effort to remember the last finalized @id. */
|
||||
atomic_long_set(&desc_ring->last_finalized_id, e->id);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2007,9 +2018,39 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
|
||||
*/
|
||||
u64 prb_next_seq(struct printk_ringbuffer *rb)
|
||||
{
|
||||
u64 seq = 0;
|
||||
struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||||
enum desc_state d_state;
|
||||
unsigned long id;
|
||||
u64 seq;
|
||||
|
||||
/* Search forward from the oldest descriptor. */
|
||||
/* Check if the cached @id still points to a valid @seq. */
|
||||
id = atomic_long_read(&desc_ring->last_finalized_id);
|
||||
d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
|
||||
|
||||
if (d_state == desc_finalized || d_state == desc_reusable) {
|
||||
/*
|
||||
* Begin searching after the last finalized record.
|
||||
*
|
||||
* On 0, the search must begin at 0 because of hack#2
|
||||
* of the bootstrapping phase it is not known if a
|
||||
* record at index 0 exists.
|
||||
*/
|
||||
if (seq != 0)
|
||||
seq++;
|
||||
} else {
|
||||
/*
|
||||
* The information about the last finalized sequence number
|
||||
* has gone. It should happen only when there is a flood of
|
||||
* new messages and the ringbuffer is rapidly recycled.
|
||||
* Give up and start from the beginning.
|
||||
*/
|
||||
seq = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The information about the last finalized @seq might be inaccurate.
|
||||
* Search forward to find the current one.
|
||||
*/
|
||||
while (_prb_read_valid(rb, &seq, NULL, NULL))
|
||||
seq++;
|
||||
|
||||
@ -2046,6 +2087,7 @@ void prb_init(struct printk_ringbuffer *rb,
|
||||
rb->desc_ring.infos = infos;
|
||||
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
|
||||
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
|
||||
atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
|
||||
|
||||
rb->text_data_ring.size_bits = textbits;
|
||||
rb->text_data_ring.data = text_buf;
|
||||
|
@ -75,6 +75,7 @@ struct prb_desc_ring {
|
||||
struct printk_info *infos;
|
||||
atomic_long_t head_id;
|
||||
atomic_long_t tail_id;
|
||||
atomic_long_t last_finalized_id;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -258,6 +259,7 @@ static struct printk_ringbuffer name = { \
|
||||
.infos = &_##name##_infos[0], \
|
||||
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
.last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
}, \
|
||||
.text_data_ring = { \
|
||||
.size_bits = (avgtextbits) + (descbits), \
|
||||
|
@ -812,7 +812,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
||||
*/
|
||||
void synchronize_rcu_expedited(void)
|
||||
{
|
||||
bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
|
||||
bool no_wq;
|
||||
struct rcu_exp_work rew;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
@ -837,9 +837,15 @@ void synchronize_rcu_expedited(void)
|
||||
if (exp_funnel_lock(s))
|
||||
return; /* Someone else did our work for us. */
|
||||
|
||||
/* Don't use workqueue during boot or from an incoming CPU. */
|
||||
preempt_disable();
|
||||
no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT ||
|
||||
!cpumask_test_cpu(smp_processor_id(), cpu_active_mask);
|
||||
preempt_enable();
|
||||
|
||||
/* Ensure that load happens before action based on it. */
|
||||
if (unlikely(boottime)) {
|
||||
/* Direct call during scheduler init and early_initcalls(). */
|
||||
if (unlikely(no_wq)) {
|
||||
/* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */
|
||||
rcu_exp_sel_wait_wake(s);
|
||||
} else {
|
||||
/* Marshall arguments & schedule the expedited grace period. */
|
||||
@ -857,7 +863,7 @@ void synchronize_rcu_expedited(void)
|
||||
/* Let the next expedited grace period start. */
|
||||
mutex_unlock(&rcu_state.exp_mutex);
|
||||
|
||||
if (likely(!boottime))
|
||||
if (likely(!no_wq))
|
||||
destroy_work_on_stack(&rew.rew_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
@ -52,7 +52,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
if (err)
|
||||
goto free_stab;
|
||||
|
||||
stab->sks = bpf_map_area_alloc(stab->map.max_entries *
|
||||
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
|
||||
sizeof(struct sock *),
|
||||
stab->map.numa_node);
|
||||
if (stab->sks)
|
||||
|
@ -4409,6 +4409,7 @@ static int sysfs_test(const char *mount_dir)
|
||||
int fd = -1;
|
||||
int pid = -1;
|
||||
char buffer[32];
|
||||
char *null_buf = NULL;
|
||||
int status;
|
||||
struct incfs_per_uid_read_timeouts purt_set[] = {
|
||||
{
|
||||
@ -4437,13 +4438,13 @@ static int sysfs_test(const char *mount_dir)
|
||||
TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
|
||||
TESTEQUAL(ioctl_test_last_error(cmd_fd, NULL, 0, 0), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 0), 0);
|
||||
TEST(read(fd, NULL, 1), -1);
|
||||
TESTEQUAL(read(fd, null_buf, 1), -1);
|
||||
TESTEQUAL(ioctl_test_last_error(cmd_fd, &file.id, 0, -ETIME), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 2), 0);
|
||||
|
||||
TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 0), 0);
|
||||
TESTEQUAL(read(fd, NULL, 1), -1);
|
||||
TESTEQUAL(read(fd, null_buf, 1), -1);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 1), 0);
|
||||
TESTSYSCALL(close(fd));
|
||||
fd = -1;
|
||||
|
Loading…
Reference in New Issue
Block a user