Merge 6.1.24 into android14-6.1

Changes in 6.1.24
	dm cache: Add some documentation to dm-cache-background-tracker.h
	dm integrity: Remove bi_sector that's only used by commented debug code
	dm: change "unsigned" to "unsigned int"
	dm: fix improper splitting for abnormal bios
	KVM: arm64: PMU: Align chained counter implementation with architecture pseudocode
	KVM: arm64: PMU: Distinguish between 64bit counter and 64bit overflow
	KVM: arm64: PMU: Sanitise PMCR_EL0.LP on first vcpu run
	KVM: arm64: PMU: Don't save PMCR_EL0.{C,P} for the vCPU
	gpio: GPIO_REGMAP: select REGMAP instead of depending on it
	Drivers: vmbus: Check for channel allocation before looking up relids
	ASoC: SOF: ipc4: Ensure DSP is in D0I0 during sof_ipc4_set_get_data()
	pwm: Make .get_state() callback return an error code
	pwm: hibvt: Explicitly set .polarity in .get_state()
	pwm: cros-ec: Explicitly set .polarity in .get_state()
	pwm: iqs620a: Explicitly set .polarity in .get_state()
	pwm: sprd: Explicitly set .polarity in .get_state()
	pwm: meson: Explicitly set .polarity in .get_state()
	ASoC: codecs: lpass: fix the order or clks turn off during suspend
	KVM: s390: pv: fix external interruption loop not always detected
	wifi: mac80211: fix the size calculation of ieee80211_ie_len_eht_cap()
	wifi: mac80211: fix invalid drv_sta_pre_rcu_remove calls for non-uploaded sta
	net: qrtr: Fix a refcount bug in qrtr_recvmsg()
	net: phylink: add phylink_expects_phy() method
	net: stmmac: check if MAC needs to attach to a PHY
	net: stmmac: remove redundant fixup to support fixed-link mode
	l2tp: generate correct module alias strings
	wifi: brcmfmac: Fix SDIO suspend/resume regression
	NFSD: Avoid calling OPDESC() with ops->opnum == OP_ILLEGAL
	nfsd: call op_release, even when op_func returns an error
	icmp: guard against too small mtu
	ALSA: hda/hdmi: Preserve the previous PCM device upon re-enablement
	net: don't let netpoll invoke NAPI if in xmit context
	net: dsa: mv88e6xxx: Reset mv88e6393x force WD event bit
	sctp: check send stream number after wait_for_sndbuf
	net: qrtr: Do not do DEL_SERVER broadcast after DEL_CLIENT
	ipv6: Fix an uninit variable access bug in __ip6_make_skb()
	platform/x86: think-lmi: Fix memory leak when showing current settings
	platform/x86: think-lmi: Fix memory leaks when parsing ThinkStation WMI strings
	platform/x86: think-lmi: Clean up display of current_value on Thinkstation
	gpio: davinci: Do not clear the bank intr enable bit in save_context
	gpio: davinci: Add irq chip flag to skip set wake
	net: ethernet: ti: am65-cpsw: Fix mdio cleanup in probe
	net: stmmac: fix up RX flow hash indirection table when setting channels
	sunrpc: only free unix grouplist after RCU settles
	NFSD: callback request does not use correct credential for AUTH_SYS
	ice: fix wrong fallback logic for FDIR
	ice: Reset FDIR counter in FDIR init stage
	raw: use net_hash_mix() in hash function
	raw: Fix NULL deref in raw_get_next().
	ping: Fix potentail NULL deref for /proc/net/icmp.
	ethtool: reset #lanes when lanes is omitted
	netlink: annotate lockless accesses to nlk->max_recvmsg_len
	gve: Secure enough bytes in the first TX desc for all TCP pkts
	arm64: compat: Work around uninitialized variable warning
	net: stmmac: check fwnode for phy device before scanning for phy
	cxl/pci: Fix CDAT retrieval on big endian
	cxl/pci: Handle truncated CDAT header
	cxl/pci: Handle truncated CDAT entries
	cxl/pci: Handle excessive CDAT length
	PCI/DOE: Silence WARN splat with CONFIG_DEBUG_OBJECTS=y
	PCI/DOE: Fix memory leak with CONFIG_DEBUG_OBJECTS=y
	usb: xhci: tegra: fix sleep in atomic call
	xhci: Free the command allocated for setting LPM if we return early
	xhci: also avoid the XHCI_ZERO_64B_REGS quirk with a passthrough iommu
	usb: cdnsp: Fixes error: uninitialized symbol 'len'
	usb: dwc3: pci: add support for the Intel Meteor Lake-S
	USB: serial: cp210x: add Silicon Labs IFS-USB-DATACABLE IDs
	usb: typec: altmodes/displayport: Fix configure initial pin assignment
	USB: serial: option: add Telit FE990 compositions
	USB: serial: option: add Quectel RM500U-CN modem
	drivers: iio: adc: ltc2497: fix LSB shift
	iio: adis16480: select CONFIG_CRC32
	iio: adc: qcom-spmi-adc5: Fix the channel name
	iio: adc: ti-ads7950: Set `can_sleep` flag for GPIO chip
	iio: dac: cio-dac: Fix max DAC write value check for 12-bit
	iio: buffer: correctly return bytes written in output buffers
	iio: buffer: make sure O_NONBLOCK is respected
	iio: light: cm32181: Unregister second I2C client if present
	tty: serial: sh-sci: Fix transmit end interrupt handler
	tty: serial: sh-sci: Fix Rx on RZ/G2L SCI
	tty: serial: fsl_lpuart: avoid checking for transfer complete when UARTCTRL_SBK is asserted in lpuart32_tx_empty
	nilfs2: fix potential UAF of struct nilfs_sc_info in nilfs_segctor_thread()
	nilfs2: fix sysfs interface lifetime
	dt-bindings: serial: renesas,scif: Fix 4th IRQ for 4-IRQ SCIFs
	serial: 8250: Prevent starting up DMA Rx on THRI interrupt
	ksmbd: do not call kvmalloc() with __GFP_NORETRY | __GFP_NO_WARN
	ksmbd: fix slab-out-of-bounds in init_smb2_rsp_hdr
	ALSA: hda/realtek: Add quirk for Clevo X370SNW
	ALSA: hda/realtek: fix mute/micmute LEDs for a HP ProBook
	x86/acpi/boot: Correct acpi_is_processor_usable() check
	x86/ACPI/boot: Use FADT version to check support for online capable
	KVM: x86: Clear "has_error_code", not "error_code", for RM exception injection
	KVM: nVMX: Do not report error code when synthesizing VM-Exit from Real Mode
	mm: kfence: fix PG_slab and memcg_data clearing
	mm: kfence: fix handling discontiguous page
	coresight: etm4x: Do not access TRCIDR1 for identification
	coresight-etm4: Fix for() loop drvdata->nr_addr_cmp range bug
	counter: 104-quad-8: Fix race condition between FLAG and CNTR reads
	counter: 104-quad-8: Fix Synapse action reported for Index signals
	blk-mq: directly poll requests
	iio: adc: ad7791: fix IRQ flags
	io_uring: fix return value when removing provided buffers
	io_uring: fix memory leak when removing provided buffers
	scsi: qla2xxx: Fix memory leak in qla2x00_probe_one()
	scsi: iscsi_tcp: Check that sock is valid before iscsi_set_param()
	nvme: fix discard support without oncs
	cifs: sanitize paths in cifs_update_super_prepath.
	block: ublk: make sure that block size is set correctly
	block: don't set GD_NEED_PART_SCAN if scan partition failed
	perf/core: Fix the same task check in perf_event_set_output
	ftrace: Mark get_lock_parent_ip() __always_inline
	ftrace: Fix issue that 'direct->addr' not restored in modify_ftrace_direct()
	fs: drop peer group ids under namespace lock
	can: j1939: j1939_tp_tx_dat_new(): fix out-of-bounds memory access
	can: isotp: fix race between isotp_sendsmg() and isotp_release()
	can: isotp: isotp_ops: fix poll() to not report false EPOLLOUT events
	can: isotp: isotp_recvmsg(): use sock_recv_cmsgs() to get SOCK_RXQ_OVFL infos
	ACPI: video: Add auto_detect arg to __acpi_video_get_backlight_type()
	ACPI: video: Make acpi_backlight=video work independent from GPU driver
	ACPI: video: Add acpi_backlight=video quirk for Apple iMac14,1 and iMac14,2
	ACPI: video: Add acpi_backlight=video quirk for Lenovo ThinkPad W530
	net: stmmac: Add queue reset into stmmac_xdp_open() function
	tracing/synthetic: Fix races on freeing last_cmd
	tracing/timerlat: Notify new max thread latency
	tracing/osnoise: Fix notify new tracing_max_latency
	tracing: Free error logs of tracing instances
	ASoC: hdac_hdmi: use set_stream() instead of set_tdm_slots()
	tracing/synthetic: Make lastcmd_mutex static
	zsmalloc: document freeable stats
	mm: vmalloc: avoid warn_alloc noise caused by fatal signal
	wifi: mt76: ignore key disable commands
	ublk: read any SQE values upfront
	drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path
	drm/nouveau/disp: Support more modes by checking with lower bpc
	drm/i915: Fix context runtime accounting
	drm/i915: fix race condition UAF in i915_perf_add_config_ioctl
	ring-buffer: Fix race while reader and writer are on the same page
	mm/swap: fix swap_info_struct race between swapoff and get_swap_pages()
	mm/hugetlb: fix uffd wr-protection for CoW optimization path
	maple_tree: fix get wrong data_end in mtree_lookup_walk()
	maple_tree: fix a potential concurrency bug in RCU mode
	blk-throttle: Fix that bps of child could exceed bps limited in parent
	drm/amd/display: Clear MST topology if it fails to resume
	drm/amdgpu: for S0ix, skip SDMA 5.x+ suspend/resume
	drm/amdgpu: skip psp suspend for IMU enabled ASICs mode2 reset
	drm/display/dp_mst: Handle old/new payload states in drm_dp_remove_payload()
	drm/i915/dp_mst: Fix payload removal during output disabling
	drm/bridge: lt9611: Fix PLL being unable to lock
	drm/i915: Use _MMIO_PIPE() for SKL_BOTTOM_COLOR
	drm/i915: Split icl_color_commit_noarm() from skl_color_commit_noarm()
	mm: take a page reference when removing device exclusive entries
	maple_tree: remove GFP_ZERO from kmem_cache_alloc() and kmem_cache_alloc_bulk()
	maple_tree: fix potential rcu issue
	maple_tree: reduce user error potential
	maple_tree: fix handle of invalidated state in mas_wr_store_setup()
	maple_tree: fix mas_prev() and mas_find() state handling
	maple_tree: be more cautious about dead nodes
	maple_tree: refine ma_state init from mas_start()
	maple_tree: detect dead nodes in mas_start()
	maple_tree: fix freeing of nodes in rcu mode
	maple_tree: remove extra smp_wmb() from mas_dead_leaves()
	maple_tree: add smp_rmb() to dead node detection
	maple_tree: add RCU lock checking to rcu callback functions
	mm: enable maple tree RCU mode by default.
	bpftool: Print newline before '}' for struct with padding only fields
	Linux 6.1.24

Change-Id: I475408e1166927565c7788e7095bdf2cb236c4b2
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-04-17 11:09:32 +00:00
commit 0fff48d6fe
254 changed files with 2734 additions and 2050 deletions

View File

@ -91,7 +91,7 @@ properties:
- description: Error interrupt
- description: Receive buffer full interrupt
- description: Transmit buffer empty interrupt
- description: Transmit End interrupt
- description: Break interrupt
- items:
- description: Error interrupt
- description: Receive buffer full interrupt
@ -106,7 +106,7 @@ properties:
- const: eri
- const: rxi
- const: txi
- const: tei
- const: bri
- items:
- const: eri
- const: rxi

View File

@ -68,6 +68,8 @@ pages_used
the number of pages allocated for the class
pages_per_zspage
the number of 0-order pages to make a zspage
freeable
the approximate number of pages class compaction can free
We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 23
SUBLEVEL = 24
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
u32 instr = 0;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
int fault;
instrptr = instruction_pointer(regs);
if (compat_thumb_mode(regs)) {
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
u16 tinstr, tinst2;
fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2;
fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = ((u32)tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
if (alignment_get_thumb(regs, ptr, &tinstr))
return 1;
if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
if (alignment_get_thumb(regs, ptr + 1, &tinst2))
return 1;
instr = ((u32)tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
} else {
fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
return 1;
}
if (fault)
return 1;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
if (LDSTHD_I_BIT(instr))

View File

@ -15,16 +15,14 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static LIST_HEAD(arm_pmus);
static DEFINE_MUTEX(arm_pmus_lock);
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
@ -52,11 +50,22 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
* @select_idx: The counter index
*/
static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
{
return (select_idx == ARMV8_PMU_CYCLE_IDX);
}
static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
{
return (select_idx == ARMV8_PMU_CYCLE_IDX &&
__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
}
static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
{
return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
!kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
}
static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu;
@ -69,91 +78,22 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
}
/**
* kvm_pmu_pmc_is_chained - determine if the pmc is chained
* @pmc: The PMU counter pointer
*/
static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
{
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
}
/**
* kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
* @select_idx: The counter index
*/
static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
{
return select_idx & 0x1;
}
/**
* kvm_pmu_get_canonical_pmc - obtain the canonical pmc
* @pmc: The PMU counter pointer
*
* When a pair of PMCs are chained together we use the low counter (canonical)
* to hold the underlying perf event.
*/
static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
{
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(pmc->idx))
return pmc - 1;
return pmc;
}
static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
{
if (kvm_pmu_idx_is_high_counter(pmc->idx))
return pmc - 1;
else
return pmc + 1;
}
/**
* kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
* kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
* @select_idx: The counter index
*/
static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
{
u64 eventsel, reg;
u64 counter, reg, enabled, running;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
select_idx |= 0x1;
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
if (select_idx == ARMV8_PMU_CYCLE_IDX)
return false;
reg = PMEVTYPER0_EL0 + select_idx;
eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
}
/**
* kvm_pmu_get_pair_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
* @pmc: The PMU counter pointer
*/
static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
struct kvm_pmc *pmc)
{
u64 counter, counter_high, reg, enabled, running;
if (kvm_pmu_pmc_is_chained(pmc)) {
pmc = kvm_pmu_get_canonical_pmc(pmc);
reg = PMEVCNTR0_EL0 + pmc->idx;
counter = __vcpu_sys_reg(vcpu, reg);
counter_high = __vcpu_sys_reg(vcpu, reg + 1);
counter = lower_32_bits(counter) | (counter_high << 32);
} else {
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
counter = __vcpu_sys_reg(vcpu, reg);
}
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
counter = __vcpu_sys_reg(vcpu, reg);
/*
* The real counter value is equal to the value of counter register plus
@ -163,29 +103,7 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
counter += perf_event_read_value(pmc->perf_event, &enabled,
&running);
return counter;
}
/**
* kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
* @select_idx: The counter index
*/
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
{
u64 counter;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(select_idx))
counter = upper_32_bits(counter);
else if (select_idx != ARMV8_PMU_CYCLE_IDX)
if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
counter = lower_32_bits(counter);
return counter;
@ -218,7 +136,6 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
*/
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
{
pmc = kvm_pmu_get_canonical_pmc(pmc);
if (pmc->perf_event) {
perf_event_disable(pmc->perf_event);
perf_event_release_kernel(pmc->perf_event);
@ -236,11 +153,10 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{
u64 counter, reg, val;
pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event)
return;
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
reg = PMCCNTR_EL0;
@ -252,9 +168,6 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
__vcpu_sys_reg(vcpu, reg) = val;
if (kvm_pmu_pmc_is_chained(pmc))
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
kvm_pmu_release_perf_event(pmc);
}
@ -285,8 +198,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
for_each_set_bit(i, &mask, 32)
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
}
/**
@ -340,12 +251,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
pmc = &pmu->pmc[i];
/* A change in the enable state may affect the chain state */
kvm_pmu_update_pmc_chained(vcpu, i);
kvm_pmu_create_perf_event(vcpu, i);
/* At this point, pmc must be the canonical */
if (pmc->perf_event) {
if (!pmc->perf_event) {
kvm_pmu_create_perf_event(vcpu, i);
} else {
perf_event_enable(pmc->perf_event);
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
kvm_debug("fail to enable perf event\n");
@ -375,11 +283,6 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
pmc = &pmu->pmc[i];
/* A change in the enable state may affect the chain state */
kvm_pmu_update_pmc_chained(vcpu, i);
kvm_pmu_create_perf_event(vcpu, i);
/* At this point, pmc must be the canonical */
if (pmc->perf_event)
perf_event_disable(pmc->perf_event);
}
@ -484,6 +387,65 @@ static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
kvm_vcpu_kick(vcpu);
}
/*
* Perform an increment on any of the counters described in @mask,
* generating the overflow if required, and propagate it as a chained
* event if possible.
*/
static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
unsigned long mask, u32 event)
{
int i;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
/* Weed out disabled counters */
mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
u64 type, reg;
/* Filter on event type */
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
type &= kvm_pmu_event_mask(vcpu->kvm);
if (type != event)
continue;
/* Increment this counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
if (reg) /* No overflow? move on */
continue;
/* Mark overflow */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
if (kvm_pmu_counter_can_chain(vcpu, i))
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
ARMV8_PMUV3_PERFCTR_CHAIN);
}
}
/* Compute the sample period for a given counter value */
static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
{
u64 val;
if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
val = -(counter & GENMASK(31, 0));
else
val = (-counter) & GENMASK(63, 0);
} else {
val = (-counter) & GENMASK(31, 0);
}
return val;
}
/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
@ -503,10 +465,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
period = -(local64_read(&perf_event->count));
if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
period &= GENMASK(31, 0);
period = compute_period(vcpu, idx, local64_read(&perf_event->count));
local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
@ -514,6 +473,10 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
if (kvm_pmu_counter_can_chain(vcpu, idx))
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
ARMV8_PMUV3_PERFCTR_CHAIN);
if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
@ -533,50 +496,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
*/
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
if (!kvm_vcpu_has_pmu(vcpu))
return;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
/* Weed out disabled counters */
val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
u64 type, reg;
if (!(val & BIT(i)))
continue;
/* PMSWINC only applies to ... SW_INC! */
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
type &= kvm_pmu_event_mask(vcpu->kvm);
if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
continue;
/* increment this even SW_INC counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
if (reg) /* no overflow on the low part */
continue;
if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
/* increment the high counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
if (!reg) /* mark overflow on the high counter */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
} else {
/* mark overflow on low counter */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
}
}
kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
}
/**
@ -591,6 +511,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_vcpu_has_pmu(vcpu))
return;
/* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
@ -625,18 +548,11 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
{
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
struct perf_event *event;
struct perf_event_attr attr;
u64 eventsel, counter, reg, data;
/*
* For chained counters the event type and filtering attributes are
* obtained from the low/even counter. We also use this counter to
* determine if the event is enabled/disabled.
*/
pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
data = __vcpu_sys_reg(vcpu, reg);
@ -647,8 +563,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
else
eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
/* Software increment event doesn't need to be backed by a perf event */
if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
/*
* Neither SW increment nor chained events need to be backed
* by a perf event.
*/
if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
return;
/*
@ -670,30 +590,20 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
attr.exclude_host = 1; /* Don't count host events */
attr.config = eventsel;
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
if (kvm_pmu_pmc_is_chained(pmc)) {
/**
* The initial sample period (overflow count) of an event. For
* chained counters we only support overflow interrupts on the
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
/*
* If counting with a 64bit counter, advertise it to the perf
* code, carefully dealing with the initial sample period
* which also depends on the overflow.
*/
if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
pmc + 1);
} else {
/* The initial sample period (overflow count) of an event. */
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
attr.sample_period = (-counter) & GENMASK(63, 0);
else
attr.sample_period = (-counter) & GENMASK(31, 0);
attr.sample_period = compute_period(vcpu, select_idx, counter);
event = perf_event_create_kernel_counter(&attr, -1, current,
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, pmc);
}
if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
@ -704,41 +614,6 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
pmc->perf_event = event;
}
/**
* kvm_pmu_update_pmc_chained - update chained bitmap
* @vcpu: The vcpu pointer
* @select_idx: The number of selected counter
*
* Update the chained bitmap based on the event type written in the
* typer register and the enable state of the odd register.
*/
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
bool new_state, old_state;
old_state = kvm_pmu_pmc_is_chained(pmc);
new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
if (old_state == new_state)
return;
canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
kvm_pmu_stop_counter(vcpu, canonical_pmc);
if (new_state) {
/*
* During promotion from !chained to chained we must ensure
* the adjacent counter is stopped and its event destroyed
*/
kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
return;
}
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
}
/**
* kvm_pmu_set_counter_event_type - set selected counter to monitor some event
* @vcpu: The vcpu pointer
@ -766,7 +641,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
__vcpu_sys_reg(vcpu, reg) = data & mask;
kvm_pmu_update_pmc_chained(vcpu, select_idx);
kvm_pmu_create_perf_event(vcpu, select_idx);
}

View File

@ -665,13 +665,15 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false;
if (p->is_write) {
/* Only update writeable bits of PMCR */
/*
* Only update writeable bits of PMCR (continuing into
* kvm_pmu_handle_pmcr() as well)
*/
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {

View File

@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
* handle_external_interrupt - used for external interruption interceptions
* @vcpu: virtual cpu
*
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
* the new PSW does not have external interrupts disabled. In the first case,
* we've got to deliver the interrupt manually, and in the second case, we
* drop to userspace to handle the situation there.
* This interception occurs if:
* - the CPUSTAT_EXT_INT bit was already set when the external interrupt
* occurred. In this case, the interrupt needs to be injected manually to
* preserve interrupt priority.
* - the external new PSW has external interrupts enabled, which will cause an
* interruption loop. We drop to userspace in this case.
*
* The latter case can be detected by inspecting the external mask bit in the
* external new psw.
*
* Under PV, only the latter case can occur, since interrupt priorities are
* handled in the ultravisor.
*/
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
vcpu->stat.exit_external_interrupt++;
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
/* We can not handle clock comparator or timer interrupt with bad PSW */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
newpsw = vcpu->arch.sie_block->gpsw;
} else {
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
}
/*
* Clock comparator or timer interrupt with external interrupt enabled
* will cause interrupt loop. Drop to userspace.
*/
if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
(newpsw.mask & PSW_MASK_EXT))
return -EOPNOTSUPP;

View File

@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
if (madt->header.revision >= 5)
/* ACPI 6.3 and newer support the online capable bit. */
if (acpi_gbl_FADT.header.revision > 6 ||
(acpi_gbl_FADT.header.revision == 6 &&
acpi_gbl_FADT.minor_revision >= 3))
acpi_support_online_capable = true;
default_acpi_madt_oem_check(madt->header.oem_id,
@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
if (lapic_flags & ACPI_MADT_ENABLED)
return true;
if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
if (!acpi_support_online_capable ||
(lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
return true;
return false;

View File

@ -3845,7 +3845,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
exit_qual = 0;
}
if (ex->has_error_code) {
/*
* Unlike AMD's Paged Real Mode, which reports an error code on #PF
* VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
* "has error code" flags on VM-Exit if the CPU is in Real Mode.
*/
if (ex->has_error_code && is_protmode(vcpu)) {
/*
* Intel CPUs do not generate error codes with bits 31:16 set,
* and more importantly VMX disallows setting bits 31:16 in the

View File

@ -9853,13 +9853,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
/*
* Suppress the error code if the vCPU is in Real Mode, as Real Mode
* exceptions don't report error codes. The presence of an error code
* is carried with the exception and only stripped when the exception
* is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
* report an error code despite the CPU being in Real Mode.
*/
vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
trace_kvm_inj_exception(vcpu->arch.exception.vector,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.injected);
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
vcpu->arch.exception.error_code = false;
static_call(kvm_x86_inject_exception)(vcpu);
}

View File

@ -1340,8 +1340,6 @@ bool blk_rq_is_poll(struct request *rq)
return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false;
if (WARN_ON_ONCE(!rq->bio))
return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@ -1349,7 +1347,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
do {
bio_poll(rq->bio, NULL, 0);
blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
cond_resched();
} while (!completion_done(wait));
}

View File

@ -1066,7 +1066,6 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
sq->nr_queued[rw]--;
throtl_charge_bio(tg, bio);
bio_set_flag(bio, BIO_BPS_THROTTLED);
/*
* If our parent is another tg, we just need to transfer @bio to
@ -1079,6 +1078,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
start_parent_slice_with_credit(tg, parent_tg, rw);
} else {
bio_set_flag(bio, BIO_BPS_THROTTLED);
throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
&parent_sq->queued[rw]);
BUG_ON(tg->td->nr_queued[rw] <= 0);

View File

@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
if (disk->open_partitions)
return -EBUSY;
set_bit(GD_NEED_PART_SCAN, &disk->state);
/*
* If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
return ret;
}
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev))
ret = PTR_ERR(bdev);
else
blkdev_put(bdev, mode & ~FMODE_EXCL);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
* and this will cause that re-assemble partitioned raid device will
* creat partition for underlying disk.
*/
clear_bit(GD_NEED_PART_SCAN, &disk->state);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(disk->part0, disk_scan_partitions);
return ret;

View File

@ -1984,6 +1984,7 @@ static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
bool auto_detect;
int error;
acpi_status status;
@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_unlock(&video_list_lock);
/*
* The userspace visible backlight_device gets registered separately
* from acpi_video_register_backlight().
* If backlight-type auto-detection is used then a native backlight may
* show up later and this may change the result from video to native.
* Therefor normally the userspace visible /sys/class/backlight device
* gets registered separately by the GPU driver calling
* acpi_video_register_backlight() when an internal panel is detected.
* Register the backlight now when not using auto-detection, so that
* when the kernel cmdline or DMI-quirks are used the backlight will
* get registered even if acpi_video_register_backlight() is not called.
*/
acpi_video_run_bcl_for_osi(video);
if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
!auto_detect)
acpi_video_bus_register_backlight(video);
acpi_video_bus_add_notify_handler(video);
return 0;

View File

@ -274,6 +274,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
/*
* Models which need acpi_video backlight control where the GPU drivers
* do not call acpi_video_register_backlight() because no internal panel
* is detected. Typically these are all-in-ones (monitors with builtin
* PC) where the panel connection shows up as regular DP instead of eDP.
*/
{
.callback = video_detect_force_video,
/* Apple iMac14,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
},
},
{
.callback = video_detect_force_video,
/* Apple iMac14,2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
},
},
/*
* Older models with nvidia GPU which need acpi_video backlight
* control and where the old nvidia binary driver series does not
* call acpi_video_register_backlight().
*/
{
.callback = video_detect_force_video,
/* ThinkPad W530 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
},
},
/*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
@ -772,7 +809,7 @@ static bool prefer_native_over_acpi_video(void)
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
*/
static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
@ -797,6 +834,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
native_available = true;
mutex_unlock(&init_mutex);
if (auto_detect)
*auto_detect = false;
/*
* The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else.
@ -808,6 +848,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
if (auto_detect)
*auto_detect = true;
/* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
@ -827,15 +870,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return __acpi_video_get_backlight_type(false);
}
EXPORT_SYMBOL(acpi_video_get_backlight_type);
bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
}
EXPORT_SYMBOL(acpi_video_backlight_use_native);
EXPORT_SYMBOL(__acpi_video_get_backlight_type);

View File

@ -233,7 +233,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic;
if (p->logical_bs_shift > PAGE_SHIFT)
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
@ -1202,9 +1202,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
struct ublksrv_io_cmd *ub_cmd)
{
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
@ -1306,6 +1307,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
struct ublksrv_io_cmd ub_cmd;
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
ub_cmd.tag = READ_ONCE(ub_src->tag);
ub_cmd.result = READ_ONCE(ub_src->result);
ub_cmd.addr = READ_ONCE(ub_src->addr);
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
@ -1886,6 +1904,8 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
if (ret)
ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
ublk_put_device(ub);

View File

@ -97,10 +97,6 @@ struct quad8 {
struct quad8_reg __iomem *reg;
};
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@ -133,6 +129,9 @@ struct quad8 {
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
/* Each Counter is 24 bits wide */
#define LS7267_CNTR_MAX GENMASK(23, 0)
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
unsigned long irqflags;
int i;
flags = ioread8(&chan->control);
borrow = flags & QUAD8_FLAG_BT;
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (unsigned long)(borrow ^ carry) << 24;
*val = 0;
spin_lock_irqsave(&priv->lock, irqflags);
@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
unsigned long irqflags;
int i;
/* Only 24-bit values are supported */
if (val > 0xFFFFFF)
if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
/* Handle Index signals */
if (synapse->signal->id >= 16) {
if (priv->preset_enable[count->id])
if (!priv->preset_enable[count->id])
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
if (preset > 0xFFFFFF)
if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
*ceiling = priv->preset[count->id];
break;
default:
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
*ceiling = 0x1FFFFFF;
*ceiling = LS7267_CNTR_MAX;
break;
}
@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
if (ceiling > 0xFFFFFF)
if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);

View File

@ -483,7 +483,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
return NULL;
}
#define CDAT_DOE_REQ(entry_handle) \
#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
@ -496,8 +496,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
}
struct cdat_doe_task {
u32 request_pl;
u32 response_pl[32];
__le32 request_pl;
__le32 response_pl[32];
struct completion c;
struct pci_doe_task task;
};
@ -531,10 +531,10 @@ static int cxl_cdat_get_length(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
if (t.task.rv < sizeof(u32))
if (t.task.rv < 2 * sizeof(__le32))
return -EIO;
*length = t.response_pl[1];
*length = le32_to_cpu(t.response_pl[1]);
dev_dbg(dev, "CDAT length %zu\n", *length);
return 0;
@ -545,13 +545,13 @@ static int cxl_cdat_read_table(struct device *dev,
struct cxl_cdat *cdat)
{
size_t length = cdat->length;
u32 *data = cdat->table;
__le32 *data = cdat->table;
int entry_handle = 0;
do {
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
struct cdat_entry_header *entry;
size_t entry_dw;
u32 *entry;
int rc;
rc = pci_doe_submit_task(cdat_doe, &t.task);
@ -560,26 +560,34 @@ static int cxl_cdat_read_table(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
/* 1 DW header + 1 DW data min */
if (t.task.rv < (2 * sizeof(u32)))
/* 1 DW Table Access Response Header + CDAT entry */
entry = (struct cdat_entry_header *)(t.response_pl + 1);
if ((entry_handle == 0 &&
t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
(entry_handle > 0 &&
(t.task.rv < sizeof(__le32) + sizeof(*entry) ||
t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
return -EIO;
/* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
t.response_pl[0]);
entry = t.response_pl + 1;
entry_dw = t.task.rv / sizeof(u32);
le32_to_cpu(t.response_pl[0]));
entry_dw = t.task.rv / sizeof(__le32);
/* Skip Header */
entry_dw -= 1;
entry_dw = min(length / sizeof(u32), entry_dw);
entry_dw = min(length / sizeof(__le32), entry_dw);
/* Prevent length < 1 DW from causing a buffer overflow */
if (entry_dw) {
memcpy(data, entry, entry_dw * sizeof(u32));
length -= entry_dw * sizeof(u32);
memcpy(data, entry, entry_dw * sizeof(__le32));
length -= entry_dw * sizeof(__le32);
data += entry_dw;
}
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
/* Length in CDAT header may exceed concatenation of CDAT entries */
cdat->length -= length;
return 0;
}

View File

@ -71,6 +71,20 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
return pci_resource_start(pdev, map->barno) + map->block_offset;
}
struct cdat_header {
__le32 length;
u8 revision;
u8 checksum;
u8 reserved[6];
__le32 sequence;
} __packed;
struct cdat_entry_header {
u8 type;
u8 reserved;
__le16 length;
} __packed;
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);

View File

@ -100,7 +100,7 @@ config GPIO_GENERIC
tristate
config GPIO_REGMAP
depends on REGMAP
select REGMAP
tristate
# put drivers in the right section, in alphabetical order

View File

@ -328,7 +328,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
.flags = IRQCHIP_SET_TYPE_MASKED,
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)
@ -645,9 +645,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
context->set_falling = readl_relaxed(&g->set_falling);
}
/* Clear Bank interrupt enable bit */
writel_relaxed(0, base + BINTEN);
/* Clear all interrupt status registers */
writel_relaxed(GENMASK(31, 0), &g->intstat);
}

View File

@ -657,9 +657,10 @@ static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock_irqrestore(&mvpwm->lock, flags);
}
static void mvebu_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state) {
static int mvebu_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
@ -693,6 +694,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
state->enabled = false;
spin_unlock_irqrestore(&mvpwm->lock, flags);
return 0;
}
static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,

View File

@ -3034,6 +3034,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
continue;
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
if (adev->in_s0ix &&
(adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
* These are in TMR, hence are expected to be reused by PSP-TOS to reload
* from this location and RLC Autoload automatically also gets loaded
* from here based on PMFW -> PSP message during re-init sequence.
* Therefore, the psp suspend & resume should be skipped to avoid destroy
* the TMR and reload FWs again for IMU enabled APU ASICs.
*/
if (amdgpu_in_reset(adev) &&
(adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */

View File

@ -2175,6 +2175,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
break;
}
}

View File

@ -206,7 +206,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
if (enable)
drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
else
drm_dp_remove_payload(mst_mgr, mst_state, payload);
drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each

View File

@ -258,6 +258,7 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
{ 0x8126, 0x55 },
{ 0x8127, 0x66 },
{ 0x8128, 0x88 },
{ 0x812a, 0x20 },
};
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));

View File

@ -1500,8 +1500,8 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
static int ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
unsigned int pwm_en_inv;
@ -1512,19 +1512,19 @@ static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv);
if (ret)
return;
return 0;
ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale);
if (ret)
return;
return 0;
ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight);
if (ret)
return;
return 0;
ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div);
if (ret)
return;
return 0;
state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv);
if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv))
@ -1539,6 +1539,8 @@ static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->duty_cycle > state->period)
state->duty_cycle = state->period;
return 0;
}
static const struct pwm_ops ti_sn_pwm_ops = {

View File

@ -3342,7 +3342,8 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
* drm_dp_remove_payload() - Remove an MST payload
* @mgr: Manager to use.
* @mst_state: The MST atomic state
* @payload: The payload to write
* @old_payload: The payload with its old state
* @new_payload: The payload to write
*
* Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
* the starting time slots of all other payloads which would have been shifted towards the start of
@ -3350,36 +3351,37 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
*/
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload)
const struct drm_dp_mst_atomic_payload *old_payload,
struct drm_dp_mst_atomic_payload *new_payload)
{
struct drm_dp_mst_atomic_payload *pos;
bool send_remove = false;
/* We failed to make the payload, so nothing to do */
if (payload->vc_start_slot == -1)
if (new_payload->vc_start_slot == -1)
return;
mutex_lock(&mgr->lock);
send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
mutex_unlock(&mgr->lock);
if (send_remove)
drm_dp_destroy_payload_step1(mgr, mst_state, payload);
drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
else
drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
payload->vcpi);
new_payload->vcpi);
list_for_each_entry(pos, &mst_state->payloads, next) {
if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
pos->vc_start_slot -= payload->time_slots;
if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
pos->vc_start_slot -= old_payload->time_slots;
}
payload->vc_start_slot = -1;
new_payload->vc_start_slot = -1;
mgr->payload_count--;
mgr->next_start_slot -= payload->time_slots;
mgr->next_start_slot -= old_payload->time_slots;
if (payload->delete)
drm_dp_mst_put_port_malloc(payload->port);
if (new_payload->delete)
drm_dp_mst_put_port_malloc(new_payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);

View File

@ -2098,6 +2098,25 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
}
}
static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black.
*/
intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
intel_de_write(i915, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static struct drm_property_blob *
icl_read_lut_multi_segment(struct intel_crtc *crtc)
{
@ -2183,7 +2202,7 @@ static const struct intel_color_funcs i9xx_color_funcs = {
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.color_commit_arm = icl_color_commit_arm,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
};

View File

@ -364,8 +364,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_dp_mst_topology_state *old_mst_state =
drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_dp_mst_topology_state *new_mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@ -373,8 +379,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}

View File

@ -2017,6 +2017,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* inspecting the queue to see if we need to resumbit.
*/
if (*prev != *execlists->active) { /* elide lite-restores */
struct intel_context *prev_ce = NULL, *active_ce = NULL;
/*
* Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU
@ -2028,9 +2030,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and correct overselves later when updating from HW.
*/
if (*prev)
lrc_runtime_stop((*prev)->context);
prev_ce = (*prev)->context;
if (*execlists->active)
lrc_runtime_start((*execlists->active)->context);
active_ce = (*execlists->active)->context;
if (prev_ce != active_ce) {
if (prev_ce)
lrc_runtime_stop(prev_ce);
if (active_ce)
lrc_runtime_start(active_ce);
}
new_timeslice(execlists);
}

View File

@ -4270,13 +4270,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
err = oa_config->id;
goto sysfs_err;
}
mutex_unlock(&perf->metrics_lock);
id = oa_config->id;
drm_dbg(&perf->i915->drm,
"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
mutex_unlock(&perf->metrics_lock);
return oa_config->id;
return id;
sysfs_err:
mutex_unlock(&perf->metrics_lock);

View File

@ -3747,9 +3747,10 @@
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
#define _SKL_BOTTOM_COLOR_B 0x71034
#define SKL_BOTTOM_COLOR_GAMMA_ENABLE REG_BIT(31)
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
#define _ICL_PIPE_A_STATUS 0x70058
#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)

View File

@ -410,6 +410,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
return 0;
}
static void
nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
{
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
unsigned int max_rate, mode_rate;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
/* we don't support more than 10 anyway */
asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
/* reduce the bpc until it works out */
while (asyh->or.bpc > 6) {
mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
if (mode_rate <= max_rate)
break;
asyh->or.bpc -= 2;
}
break;
default:
break;
}
}
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@ -428,6 +457,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc;
/* We might have to reduce the bpc */
nv50_outp_atomic_fix_depth(encoder, crtc_state);
return 0;
}
@ -996,7 +1028,7 @@ nv50_msto_prepare(struct drm_atomic_state *state,
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) {
drm_dp_remove_payload(mgr, mst_state, payload);
drm_dp_remove_payload(mgr, mst_state, payload, payload);
} else {
if (msto->enabled)
drm_dp_add_payload_part1(mgr, mst_state, payload);

View File

@ -245,8 +245,6 @@ void nouveau_dp_irq(struct nouveau_drm *drm,
}
/* TODO:
* - Use the minimum possible BPC here, once we add support for the max bpc
* property.
* - Validate against the DP caps advertised by the GPU (we don't check these
* yet)
*/
@ -258,7 +256,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
{
const unsigned int min_clock = 25000;
unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
const u8 bpp = connector->display_info.bpc * 3;
/* Check with the minmum bpc always, so we can advertise better modes.
* In particlar not doing this causes modes to be dropped on HDR
* displays as we might check with a bpc of 16 even.
*/
const u8 bpp = 6 * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;

View File

@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
}
}

View File

@ -410,6 +410,10 @@ void vmbus_disconnect(void)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
if (vmbus_connection.channels == NULL) {
pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
return NULL;
}
if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
return NULL;
return READ_ONCE(vmbus_connection.channels[relid]);

View File

@ -451,7 +451,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
if (etm4x_sspcicrn_present(drvdata, i))
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
@ -1010,25 +1010,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
/*
* All ETMs must implement TRCDEVARCH to indicate that
* the component is an ETMv4. To support any broken
* implementations we fall back to TRCIDR1 check, which
* is not really reliable.
* the component is an ETMv4. Even though TRCIDR1 also
* contains the information, it is part of the "Trace"
* register and must be accessed with the OSLK cleared,
* with MMIO. But we cannot touch the OSLK until we are
* sure this is an ETM. So rely only on the TRCDEVARCH.
*/
if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
drvdata->arch = etm_devarch_to_arch(devarch);
} else {
pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
smp_processor_id(), devarch);
if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
return false;
drvdata->arch = etm_trcidr_to_arch(idr1);
if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
return false;
}
drvdata->arch = etm_devarch_to_arch(devarch);
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
return true;
}

View File

@ -753,14 +753,12 @@
* TRCDEVARCH - CoreSight architected register
* - Bits[15:12] - Major version
* - Bits[19:16] - Minor version
* TRCIDR1 - ETM architected register
* - Bits[11:8] - Major version
* - Bits[7:4] - Minor version
* We must rely on TRCDEVARCH for the version information,
* however we don't want to break the support for potential
* old implementations which might not implement it. Thus
* we fall back to TRCIDR1 if TRCDEVARCH is not implemented
* for memory mapped components.
*
* We must rely only on TRCDEVARCH for the version information. Even though,
* TRCIDR1 also provides the architecture version, it is a "Trace" register
* and as such must be accessed only with Trace power domain ON. This may
* not be available at probe time.
*
* Now to make certain decisions easier based on the version
* we use an internal representation of the version in the
* driver, as follows :
@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
ETM_DEVARCH_REVISION(devarch));
}
static inline u8 etm_trcidr_to_arch(u32 trcidr1)
{
return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
ETM_TRCIDR1_ARCH_MINOR(trcidr1));
}
enum etm_impdef_type {
ETM4_IMPDEF_HISI_CORE_COMMIT,
ETM4_IMPDEF_FEATURE_MAX,

View File

@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
.irq_flags = IRQF_TRIGGER_LOW,
.irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,

View File

@ -28,7 +28,6 @@ struct ltc2497_driverdata {
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
u32 recv_size;
u32 sub_lsb;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
* equivalent to a sign extension.
*/
if (st->recv_size == 3) {
*val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
*val = (get_unaligned_be24(st->data.d8) >> 6)
- BIT(ddata->chip_info->resolution + 1);
} else {
*val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
*val = (be32_to_cpu(st->data.d32) >> 6)
- BIT(ddata->chip_info->resolution + 1);
}
@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client,
st->common_ddata.chip_info = chip_info;
resolution = chip_info->resolution;
st->sub_lsb = 31 - (resolution + 1);
st->recv_size = BITS_TO_BYTES(resolution) + 1;
return ltc2497core_probe(dev, indio_dev);

View File

@ -626,12 +626,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
struct fwnode_handle *fwnode,
const struct adc5_data *data)
{
const char *name = fwnode_get_name(fwnode), *channel_name;
const char *channel_name;
char *name;
u32 chan, value, varr[2];
u32 sid = 0;
int ret;
struct device *dev = adc->dev;
name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
if (!name)
return -ENOMEM;
/* Cut the address part */
name[strchrnul(name, '@') - name] = '\0';
ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);

View File

@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.label = dev_name(&st->spi->dev);
st->chip.parent = &st->spi->dev;
st->chip.owner = THIS_MODULE;
st->chip.can_sleep = true;
st->chip.base = -1;
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
st->chip.get_direction = ti_ads7950_get_direction;

View File

@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
/* DAC can only accept up to a 16-bit value */
if ((unsigned int)val > 65535)
/* DAC can only accept up to a 12-bit value */
if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;

View File

@ -47,6 +47,7 @@ config ADIS16480
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
select CRC32
help
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.

View File

@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
break;
}
if (filp->f_flags & O_NONBLOCK) {
if (!written)
ret = -EAGAIN;
break;
}
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
continue;
}
ret = rb->access->write(rb, n - written, buf + written);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
if (ret < 0)
break;
if (ret > 0) {
written += ret;
if (written != n && !(filp->f_flags & O_NONBLOCK))
continue;
}
} while (ret == 0);
written += ret;
} while (written != n);
remove_wait_queue(&rb->pollq, &wait);
return ret < 0 ? ret : n;
return ret < 0 ? ret : written;
}
/**

View File

@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
static void cm32181_unregister_dummy_client(void *data)
{
struct i2c_client *client = data;
/* Unregister the dummy client */
i2c_unregister_device(client);
}
static int cm32181_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
client = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(client))
return PTR_ERR(client);
ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
if (ret)
return ret;
}
cm32181 = iio_priv(indio_dev);

View File

@ -972,8 +972,8 @@ static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct lpg *lpg = container_of(chip, struct lpg, pwm);
struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
@ -986,20 +986,20 @@ static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val);
if (ret)
return;
return 0;
refclk = lpg_clk_rates[val & PWM_CLK_SELECT_MASK];
if (refclk) {
ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val);
if (ret)
return;
return 0;
pre_div = lpg_pre_divs[FIELD_GET(PWM_FREQ_PRE_DIV_MASK, val)];
m = FIELD_GET(PWM_FREQ_EXP_MASK, val);
ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value));
if (ret)
return;
return 0;
state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * LPG_RESOLUTION * pre_div * (1 << m), refclk);
state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
@ -1010,13 +1010,15 @@ static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val);
if (ret)
return;
return 0;
state->enabled = FIELD_GET(LPG_ENABLE_CONTROL_OUTPUT, val);
state->polarity = PWM_POLARITY_NORMAL;
if (state->duty_cycle > state->period)
state->duty_cycle = state->period;
return 0;
}
static const struct pwm_ops lpg_pwm_ops = {

View File

@ -285,14 +285,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
struct dm_deferred_entry {
struct dm_deferred_set *ds;
unsigned count;
unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
unsigned current_entry;
unsigned sweeper;
unsigned int current_entry;
unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
@ -338,7 +338,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
static unsigned ds_next(unsigned index)
static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
@ -373,7 +373,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
unsigned next_entry;
unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&

View File

@ -148,7 +148,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
static bool __get(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell)
@ -171,7 +171,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
@ -224,7 +224,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
static int __lock(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@ -255,7 +255,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
static int __promote(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level)
unsigned int new_lock_level)
{
if (!cell->exclusive_lock)
return -EINVAL;
@ -302,7 +302,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level)
unsigned int new_lock_level)
{
int r;

View File

@ -44,8 +44,8 @@ struct dm_cell_key_v2 {
struct dm_bio_prison_cell_v2 {
// FIXME: pack these
bool exclusive_lock;
unsigned exclusive_level;
unsigned shared_count;
unsigned int exclusive_level;
unsigned int shared_count;
struct work_struct *quiesce_continuation;
struct rb_node node;
@ -86,7 +86,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
*/
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@ -114,7 +114,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@ -132,7 +132,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level);
unsigned int new_lock_level);
/*
* Adds any held bios to the bio list.

View File

@ -89,7 +89,7 @@ struct dm_bufio_client {
unsigned long n_buffers[LIST_SIZE];
struct block_device *bdev;
unsigned block_size;
unsigned int block_size;
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
@ -98,9 +98,9 @@ struct dm_bufio_client {
struct dm_io_client *dm_io;
struct list_head reserved_buffers;
unsigned need_reserved_buffers;
unsigned int need_reserved_buffers;
unsigned minimum_buffers;
unsigned int minimum_buffers;
struct rb_root buffer_tree;
wait_queue_head_t free_buffer_wait;
@ -145,14 +145,14 @@ struct dm_buffer {
unsigned char list_mode; /* LIST_* */
blk_status_t read_error;
blk_status_t write_error;
unsigned accessed;
unsigned hold_count;
unsigned int accessed;
unsigned int hold_count;
unsigned long state;
unsigned long last_accessed;
unsigned dirty_start;
unsigned dirty_end;
unsigned write_start;
unsigned write_end;
unsigned int dirty_start;
unsigned int dirty_end;
unsigned int write_start;
unsigned int write_end;
struct dm_bufio_client *c;
struct list_head write_list;
void (*end_io)(struct dm_buffer *, blk_status_t);
@ -220,7 +220,7 @@ static unsigned long global_num = 0;
/*
* Buffers are freed after this timeout
*/
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@ -438,7 +438,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
* as if GFP_NOIO was specified.
*/
if (gfp_mask & __GFP_NORETRY) {
unsigned noio_flag = memalloc_noio_save();
unsigned int noio_flag = memalloc_noio_save();
void *ptr = __vmalloc(c->block_size, gfp_mask);
memalloc_noio_restore(noio_flag);
@ -591,7 +591,7 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
unsigned int n_sectors, unsigned int offset)
{
int r;
struct dm_io_request io_req = {
@ -629,11 +629,11 @@ static void bio_complete(struct bio *bio)
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
unsigned int n_sectors, unsigned int offset)
{
struct bio *bio;
char *ptr;
unsigned vec_size, len;
unsigned int vec_size, len;
vec_size = b->c->block_size >> PAGE_SHIFT;
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
@ -654,7 +654,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
len = n_sectors << SECTOR_SHIFT;
do {
unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
if (!bio_add_page(bio, virt_to_page(ptr), this_step,
offset_in_page(ptr))) {
bio_put(bio);
@ -684,9 +684,9 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
static void submit_io(struct dm_buffer *b, enum req_op op,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
unsigned int n_sectors;
sector_t sector;
unsigned offset, end;
unsigned int offset, end;
b->end_io = end_io;
@ -1156,7 +1156,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
EXPORT_SYMBOL_GPL(dm_bufio_new);
void dm_bufio_prefetch(struct dm_bufio_client *c,
sector_t block, unsigned n_blocks)
sector_t block, unsigned int n_blocks)
{
struct blk_plug plug;
@ -1232,7 +1232,7 @@ void dm_bufio_release(struct dm_buffer *b)
EXPORT_SYMBOL_GPL(dm_bufio_release);
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
unsigned start, unsigned end)
unsigned int start, unsigned int end)
{
struct dm_bufio_client *c = b->c;
@ -1529,13 +1529,13 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
}
EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
{
c->minimum_buffers = n;
}
EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
{
return c->block_size;
}
@ -1734,15 +1734,15 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
/*
* Create the buffering interface
*/
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
void (*write_callback)(struct dm_buffer *),
unsigned int flags)
{
int r;
struct dm_bufio_client *c;
unsigned i;
unsigned int i;
char slab_name[27];
if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
@ -1796,7 +1796,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
@ -1872,7 +1872,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_create);
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c)
{
unsigned i;
unsigned int i;
drop_buffers(c);
@ -1920,9 +1920,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
static unsigned int get_max_age_hz(void)
{
unsigned max_age = READ_ONCE(dm_bufio_max_age);
unsigned int max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
@ -1973,7 +1973,7 @@ static void do_global_cleanup(struct work_struct *w)
struct dm_bufio_client *locked_client = NULL;
struct dm_bufio_client *current_client;
struct dm_buffer *b;
unsigned spinlock_hold_count;
unsigned int spinlock_hold_count;
unsigned long threshold = dm_bufio_cache_size -
dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
unsigned long loops = global_num * 2;

View File

@ -17,7 +17,7 @@ struct bt_work {
};
struct background_tracker {
unsigned max_work;
unsigned int max_work;
atomic_t pending_promotes;
atomic_t pending_writebacks;
atomic_t pending_demotes;
@ -29,7 +29,7 @@ struct background_tracker {
struct kmem_cache *work_cache;
};
struct background_tracker *btracker_create(unsigned max_work)
struct background_tracker *btracker_create(unsigned int max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
@ -155,13 +155,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
}
}
unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_writebacks);
}
EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
unsigned btracker_nr_demotions_queued(struct background_tracker *b)
unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_demotes);
}

View File

@ -12,19 +12,44 @@
/*----------------------------------------------------------------*/
/*
* The cache policy decides what background work should be performed,
* such as promotions, demotions and writebacks. The core cache target
* is in charge of performing the work, and does so when it sees fit.
*
* The background_tracker acts as a go between. Keeping track of future
* work that the policy has decided upon, and handing (issuing) it to
* the core target when requested.
*
* There is no locking in this, so calls will probably need to be
* protected with a spinlock.
*/
struct background_work;
struct background_tracker;
/*
* FIXME: discuss lack of locking in all methods.
* Create a new tracker, it will not be able to queue more than
* 'max_work' entries.
*/
struct background_tracker *btracker_create(unsigned max_work);
void btracker_destroy(struct background_tracker *b);
unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
unsigned btracker_nr_demotions_queued(struct background_tracker *b);
struct background_tracker *btracker_create(unsigned int max_work);
/*
* Destroy the tracker. No issued, but not complete, work should
* exist when this is called. It is fine to have queued but unissued
* work.
*/
void btracker_destroy(struct background_tracker *b);
unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
/*
* Queue some work within the tracker. 'work' should point to the work
* to queue, this will be copied (ownership doesn't pass). If pwork
* is not NULL then it will be set to point to the tracker's internal
* copy of the work.
*
* returns -EINVAL iff the work is already queued. -ENOMEM if the work
* couldn't be queued for another reason.
*/
@ -33,11 +58,20 @@ int btracker_queue(struct background_tracker *b,
struct policy_work **pwork);
/*
* Hands out the next piece of work to be performed.
* Returns -ENODATA if there's no work.
*/
int btracker_issue(struct background_tracker *b, struct policy_work **work);
void btracker_complete(struct background_tracker *b,
struct policy_work *op);
/*
* Informs the tracker that the work has been completed and it may forget
* about it.
*/
void btracker_complete(struct background_tracker *b, struct policy_work *op);
/*
* Predicate to see if an origin block is already scheduled for promotion.
*/
bool btracker_promotion_already_present(struct background_tracker *b,
dm_oblock_t oblock);

View File

@ -104,7 +104,7 @@ struct dm_cache_metadata {
refcount_t ref_count;
struct list_head list;
unsigned version;
unsigned int version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@ -129,7 +129,7 @@ struct dm_cache_metadata {
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
@ -260,10 +260,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@ -727,7 +727,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
*/
#define FLAGS_MASK ((1 << 16) - 1)
static __le64 pack_value(dm_oblock_t block, unsigned flags)
static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
value <<= 16;
@ -735,7 +735,7 @@ static __le64 pack_value(dm_oblock_t block, unsigned flags)
return cpu_to_le64(value);
}
static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
@ -749,7 +749,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
@ -810,7 +810,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
@ -855,7 +855,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
@ -890,7 +890,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
int r;
__le64 value;
dm_oblock_t ob;
unsigned flags;
unsigned int flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
if (r)
@ -1288,7 +1288,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
/*
@ -1339,7 +1339,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@ -1381,7 +1381,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@ -1513,7 +1513,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
{
__le64 value;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
memcpy(&value, leaf, sizeof(value));
unpack_value(value, &oblock, &flags);
@ -1547,7 +1547,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
{
int r;
unsigned flags;
unsigned int flags;
dm_oblock_t oblock;
__le64 value;
@ -1574,10 +1574,10 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
}
static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r;
unsigned i;
unsigned int i;
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
@ -1594,7 +1594,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
return 0;
}
static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r = 0;
@ -1613,7 +1613,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
}
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
unsigned nr_bits,
unsigned int nr_bits,
unsigned long *bits)
{
int r;
@ -1712,7 +1712,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
int r;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))

View File

@ -60,7 +60,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version);
unsigned int metadata_version);
void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
@ -96,7 +96,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
void *context);
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
unsigned nr_bits, unsigned long *bits);
unsigned int nr_bits, unsigned long *bits);
struct dm_cache_statistics {
uint32_t read_hits;

View File

@ -85,7 +85,7 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
}
static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr)
unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
if (p->emit_config_values)
@ -112,18 +112,18 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
/*
* Some utility functions commonly used by policies and the core target.
*/
static inline size_t bitset_size_in_bytes(unsigned nr_entries)
static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
{
return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
}
static inline unsigned long *alloc_bitset(unsigned nr_entries)
static inline unsigned long *alloc_bitset(unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
return vzalloc(s);
}
static inline void clear_bitset(void *bitset, unsigned nr_entries)
static inline void clear_bitset(void *bitset, unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
memset(bitset, 0, s);
@ -154,7 +154,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
*/
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);

View File

@ -23,12 +23,12 @@
/*
* Safe division functions that return zero on divide by zero.
*/
static unsigned safe_div(unsigned n, unsigned d)
static unsigned int safe_div(unsigned int n, unsigned int d)
{
return d ? n / d : 0u;
}
static unsigned safe_mod(unsigned n, unsigned d)
static unsigned int safe_mod(unsigned int n, unsigned int d)
{
return d ? n % d : 0u;
}
@ -36,10 +36,10 @@ static unsigned safe_mod(unsigned n, unsigned d)
/*----------------------------------------------------------------*/
struct entry {
unsigned hash_next:28;
unsigned prev:28;
unsigned next:28;
unsigned level:6;
unsigned int hash_next:28;
unsigned int prev:28;
unsigned int next:28;
unsigned int level:6;
bool dirty:1;
bool allocated:1;
bool sentinel:1;
@ -62,7 +62,7 @@ struct entry_space {
struct entry *end;
};
static int space_init(struct entry_space *es, unsigned nr_entries)
static int space_init(struct entry_space *es, unsigned int nr_entries)
{
if (!nr_entries) {
es->begin = es->end = NULL;
@ -82,7 +82,7 @@ static void space_exit(struct entry_space *es)
vfree(es->begin);
}
static struct entry *__get_entry(struct entry_space *es, unsigned block)
static struct entry *__get_entry(struct entry_space *es, unsigned int block)
{
struct entry *e;
@ -92,13 +92,13 @@ static struct entry *__get_entry(struct entry_space *es, unsigned block)
return e;
}
static unsigned to_index(struct entry_space *es, struct entry *e)
static unsigned int to_index(struct entry_space *es, struct entry *e)
{
BUG_ON(e < es->begin || e >= es->end);
return e - es->begin;
}
static struct entry *to_entry(struct entry_space *es, unsigned block)
static struct entry *to_entry(struct entry_space *es, unsigned int block)
{
if (block == INDEXER_NULL)
return NULL;
@ -109,8 +109,8 @@ static struct entry *to_entry(struct entry_space *es, unsigned block)
/*----------------------------------------------------------------*/
struct ilist {
unsigned nr_elts; /* excluding sentinel entries */
unsigned head, tail;
unsigned int nr_elts; /* excluding sentinel entries */
unsigned int head, tail;
};
static void l_init(struct ilist *l)
@ -252,23 +252,23 @@ static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
struct queue {
struct entry_space *es;
unsigned nr_elts;
unsigned nr_levels;
unsigned int nr_elts;
unsigned int nr_levels;
struct ilist qs[MAX_LEVELS];
/*
* We maintain a count of the number of entries we would like in each
* level.
*/
unsigned last_target_nr_elts;
unsigned nr_top_levels;
unsigned nr_in_top_levels;
unsigned target_count[MAX_LEVELS];
unsigned int last_target_nr_elts;
unsigned int nr_top_levels;
unsigned int nr_in_top_levels;
unsigned int target_count[MAX_LEVELS];
};
static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
{
unsigned i;
unsigned int i;
q->es = es;
q->nr_elts = 0;
@ -284,7 +284,7 @@ static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
q->nr_in_top_levels = 0u;
}
static unsigned q_size(struct queue *q)
static unsigned int q_size(struct queue *q)
{
return q->nr_elts;
}
@ -332,9 +332,9 @@ static void q_del(struct queue *q, struct entry *e)
/*
* Return the oldest entry of the lowest populated level.
*/
static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
{
unsigned level;
unsigned int level;
struct entry *e;
max_level = min(max_level, q->nr_levels);
@ -369,7 +369,7 @@ static struct entry *q_pop(struct queue *q)
* used by redistribute, so we know this is true. It also doesn't adjust
* the q->nr_elts count.
*/
static struct entry *__redist_pop_from(struct queue *q, unsigned level)
static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
{
struct entry *e;
@ -383,9 +383,10 @@ static struct entry *__redist_pop_from(struct queue *q, unsigned level)
return NULL;
}
static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
unsigned int lbegin, unsigned int lend)
{
unsigned level, nr_levels, entries_per_level, remainder;
unsigned int level, nr_levels, entries_per_level, remainder;
BUG_ON(lbegin > lend);
BUG_ON(lend > q->nr_levels);
@ -426,7 +427,7 @@ static void q_set_targets(struct queue *q)
static void q_redistribute(struct queue *q)
{
unsigned target, level;
unsigned int target, level;
struct ilist *l, *l_above;
struct entry *e;
@ -467,12 +468,12 @@ static void q_redistribute(struct queue *q)
}
}
static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
struct entry *s1, struct entry *s2)
{
struct entry *de;
unsigned sentinels_passed = 0;
unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
unsigned int sentinels_passed = 0;
unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
/* try and find an entry to swap with */
if (extra_levels && (e->level < q->nr_levels - 1u)) {
@ -512,9 +513,9 @@ static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
#define EIGHTH (1u << (FP_SHIFT - 3u))
struct stats {
unsigned hit_threshold;
unsigned hits;
unsigned misses;
unsigned int hit_threshold;
unsigned int hits;
unsigned int misses;
};
enum performance {
@ -523,7 +524,7 @@ enum performance {
Q_WELL
};
static void stats_init(struct stats *s, unsigned nr_levels)
static void stats_init(struct stats *s, unsigned int nr_levels)
{
s->hit_threshold = (nr_levels * 3u) / 4u;
s->hits = 0u;
@ -535,7 +536,7 @@ static void stats_reset(struct stats *s)
s->hits = s->misses = 0u;
}
static void stats_level_accessed(struct stats *s, unsigned level)
static void stats_level_accessed(struct stats *s, unsigned int level)
{
if (level >= s->hit_threshold)
s->hits++;
@ -556,7 +557,7 @@ static void stats_miss(struct stats *s)
*/
static enum performance stats_assess(struct stats *s)
{
unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
if (confidence < SIXTEENTH)
return Q_POOR;
@ -573,16 +574,16 @@ static enum performance stats_assess(struct stats *s)
struct smq_hash_table {
struct entry_space *es;
unsigned long long hash_bits;
unsigned *buckets;
unsigned int *buckets;
};
/*
* All cache entries are stored in a chained hash table. To save space we
* use indexing again, and only store indexes to the next entry.
*/
static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
{
unsigned i, nr_buckets;
unsigned int i, nr_buckets;
ht->es = es;
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
@ -603,7 +604,7 @@ static void h_exit(struct smq_hash_table *ht)
vfree(ht->buckets);
}
static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
{
return to_entry(ht->es, ht->buckets[bucket]);
}
@ -613,7 +614,7 @@ static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
return to_entry(ht->es, e->hash_next);
}
static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
{
e->hash_next = ht->buckets[bucket];
ht->buckets[bucket] = to_index(ht->es, e);
@ -621,11 +622,11 @@ static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry
static void h_insert(struct smq_hash_table *ht, struct entry *e)
{
unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
__h_insert(ht, h, e);
}
static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
struct entry **prev)
{
struct entry *e;
@ -641,7 +642,7 @@ static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock
return NULL;
}
static void __h_unlink(struct smq_hash_table *ht, unsigned h,
static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
struct entry *e, struct entry *prev)
{
if (prev)
@ -656,7 +657,7 @@ static void __h_unlink(struct smq_hash_table *ht, unsigned h,
static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
{
struct entry *e, *prev;
unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
e = __h_lookup(ht, h, oblock, &prev);
if (e && prev) {
@ -673,7 +674,7 @@ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
static void h_remove(struct smq_hash_table *ht, struct entry *e)
{
unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
struct entry *prev;
/*
@ -689,16 +690,16 @@ static void h_remove(struct smq_hash_table *ht, struct entry *e)
struct entry_alloc {
struct entry_space *es;
unsigned begin;
unsigned int begin;
unsigned nr_allocated;
unsigned int nr_allocated;
struct ilist free;
};
static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
unsigned begin, unsigned end)
unsigned int begin, unsigned int end)
{
unsigned i;
unsigned int i;
ea->es = es;
ea->nr_allocated = 0u;
@ -742,7 +743,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
/*
* This assumes the cblock hasn't already been allocated.
*/
static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
{
struct entry *e = __get_entry(ea->es, ea->begin + i);
@ -770,12 +771,12 @@ static bool allocator_empty(struct entry_alloc *ea)
return l_empty(&ea->free);
}
static unsigned get_index(struct entry_alloc *ea, struct entry *e)
static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
{
return to_index(ea->es, e) - ea->begin;
}
static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
{
return __get_entry(ea->es, ea->begin + index);
}
@ -800,9 +801,9 @@ struct smq_policy {
sector_t cache_block_size;
sector_t hotspot_block_size;
unsigned nr_hotspot_blocks;
unsigned cache_blocks_per_hotspot_block;
unsigned hotspot_level_jump;
unsigned int nr_hotspot_blocks;
unsigned int cache_blocks_per_hotspot_block;
unsigned int hotspot_level_jump;
struct entry_space es;
struct entry_alloc writeback_sentinel_alloc;
@ -831,7 +832,7 @@ struct smq_policy {
* Keeps track of time, incremented by the core. We use this to
* avoid attributing multiple hits within the same tick.
*/
unsigned tick;
unsigned int tick;
/*
* The hash tables allows us to quickly find an entry by origin
@ -846,8 +847,8 @@ struct smq_policy {
bool current_demote_sentinels;
unsigned long next_demote_period;
unsigned write_promote_level;
unsigned read_promote_level;
unsigned int write_promote_level;
unsigned int read_promote_level;
unsigned long next_hotspot_period;
unsigned long next_cache_period;
@ -859,24 +860,24 @@ struct smq_policy {
/*----------------------------------------------------------------*/
static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
{
return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
}
static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
}
static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
}
static void __update_writeback_sentinels(struct smq_policy *mq)
{
unsigned level;
unsigned int level;
struct queue *q = &mq->dirty;
struct entry *sentinel;
@ -889,7 +890,7 @@ static void __update_writeback_sentinels(struct smq_policy *mq)
static void __update_demote_sentinels(struct smq_policy *mq)
{
unsigned level;
unsigned int level;
struct queue *q = &mq->clean;
struct entry *sentinel;
@ -917,7 +918,7 @@ static void update_sentinels(struct smq_policy *mq)
static void __sentinels_init(struct smq_policy *mq)
{
unsigned level;
unsigned int level;
struct entry *sentinel;
for (level = 0; level < NR_CACHE_LEVELS; level++) {
@ -1008,7 +1009,7 @@ static void requeue(struct smq_policy *mq, struct entry *e)
}
}
static unsigned default_promote_level(struct smq_policy *mq)
static unsigned int default_promote_level(struct smq_policy *mq)
{
/*
* The promote level depends on the current performance of the
@ -1030,9 +1031,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
};
unsigned hits = mq->cache_stats.hits;
unsigned misses = mq->cache_stats.misses;
unsigned index = safe_div(hits << 4u, hits + misses);
unsigned int hits = mq->cache_stats.hits;
unsigned int misses = mq->cache_stats.misses;
unsigned int index = safe_div(hits << 4u, hits + misses);
return table[index];
}
@ -1042,7 +1043,7 @@ static void update_promote_levels(struct smq_policy *mq)
* If there are unused cache entries then we want to be really
* eager to promote.
*/
unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
@ -1124,7 +1125,7 @@ static void end_cache_period(struct smq_policy *mq)
#define CLEAN_TARGET 25u
#define FREE_TARGET 25u
static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
{
return from_cblock(mq->cache_size) * p / 100u;
}
@ -1150,7 +1151,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
static bool free_target_met(struct smq_policy *mq)
{
unsigned nr_free;
unsigned int nr_free;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
@ -1300,7 +1301,7 @@ static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
{
unsigned hi;
unsigned int hi;
dm_oblock_t hb = to_hblock(mq, b);
struct entry *e = h_lookup(&mq->hotspot_table, hb);
@ -1549,7 +1550,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
spin_unlock_irqrestore(&mq->lock, flags);
}
static unsigned random_level(dm_cblock_t cblock)
static unsigned int random_level(dm_cblock_t cblock)
{
return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
@ -1660,7 +1661,7 @@ static int mq_set_config_value(struct dm_cache_policy *p,
}
static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr)
unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
@ -1699,16 +1700,16 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
static bool too_many_hotspot_blocks(sector_t origin_size,
sector_t hotspot_block_size,
unsigned nr_hotspot_blocks)
unsigned int nr_hotspot_blocks)
{
return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
}
static void calc_hotspot_params(sector_t origin_size,
sector_t cache_block_size,
unsigned nr_cache_blocks,
unsigned int nr_cache_blocks,
sector_t *hotspot_block_size,
unsigned *nr_hotspot_blocks)
unsigned int *nr_hotspot_blocks)
{
*hotspot_block_size = cache_block_size * 16u;
*nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
@ -1724,9 +1725,9 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
bool mimic_mq,
bool migrations_allowed)
{
unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
unsigned total_sentinels = 2u * nr_sentinels_per_queue;
unsigned int i;
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)

View File

@ -154,7 +154,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;

View File

@ -128,7 +128,7 @@ struct dm_cache_policy {
* Configuration.
*/
int (*emit_config_values)(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr);
unsigned int maxlen, ssize_t *sz_ptr);
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
@ -157,7 +157,7 @@ struct dm_cache_policy_type {
* what gets passed on the target line to select your policy.
*/
char name[CACHE_POLICY_NAME_SIZE];
unsigned version[CACHE_POLICY_VERSION_SIZE];
unsigned int version[CACHE_POLICY_VERSION_SIZE];
/*
* For use by an alias dm_cache_policy_type to point to the

View File

@ -275,7 +275,7 @@ enum cache_io_mode {
struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
unsigned metadata_version;
unsigned int metadata_version;
bool discard_passdown:1;
};
@ -362,7 +362,7 @@ struct cache {
* Rather than reconstructing the table line for the status we just
* save it and regurgitate.
*/
unsigned nr_ctr_args;
unsigned int nr_ctr_args;
const char **ctr_args;
struct dm_kcopyd_client *copier;
@ -378,7 +378,7 @@ struct cache {
unsigned long *dirty_bitset;
atomic_t nr_dirty;
unsigned policy_nr_args;
unsigned int policy_nr_args;
struct dm_cache_policy *policy;
/*
@ -409,7 +409,7 @@ struct cache {
struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
unsigned int req_nr:2;
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
@ -517,7 +517,7 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
#define WRITE_LOCK_LEVEL 0
#define READ_WRITE_LOCK_LEVEL 1
static unsigned lock_level(struct bio *bio)
static unsigned int lock_level(struct bio *bio)
{
return bio_data_dir(bio) == WRITE ?
WRITE_LOCK_LEVEL :
@ -1884,7 +1884,7 @@ static void check_migrations(struct work_struct *ws)
*/
static void destroy(struct cache *cache)
{
unsigned i;
unsigned int i;
mempool_exit(&cache->migration_pool);
@ -2124,7 +2124,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
};
int r, mode_ctr = 0;
unsigned argc;
unsigned int argc;
const char *arg;
struct cache_features *cf = &ca->features;
@ -2544,7 +2544,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{
unsigned i;
unsigned int i;
const char **copy;
copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
@ -2566,7 +2566,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
return 0;
}
static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r = -EINVAL;
struct cache_args *ca;
@ -2669,7 +2669,7 @@ static int write_dirty_bitset(struct cache *cache)
static int write_discard_bitset(struct cache *cache)
{
unsigned i, r;
unsigned int i, r;
if (get_cache_mode(cache) >= CM_READ_ONLY)
return -EINVAL;
@ -2983,11 +2983,11 @@ static void cache_resume(struct dm_target *ti)
}
static void emit_flags(struct cache *cache, char *result,
unsigned maxlen, ssize_t *sz_ptr)
unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
struct cache_features *cf = &cache->features;
unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
DMEMIT("%u ", count);
@ -3027,10 +3027,10 @@ static void emit_flags(struct cache *cache, char *result,
* <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
int r = 0;
unsigned i;
unsigned int i;
ssize_t sz = 0;
dm_block_t nr_free_blocks_metadata = 0;
dm_block_t nr_blocks_metadata = 0;
@ -3067,18 +3067,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
(unsigned long long)cache->sectors_per_block,
(unsigned long long) from_cblock(residency),
(unsigned long long) from_cblock(cache->cache_size),
(unsigned) atomic_read(&cache->stats.read_hit),
(unsigned) atomic_read(&cache->stats.read_miss),
(unsigned) atomic_read(&cache->stats.write_hit),
(unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion),
(unsigned int) atomic_read(&cache->stats.read_hit),
(unsigned int) atomic_read(&cache->stats.read_miss),
(unsigned int) atomic_read(&cache->stats.write_hit),
(unsigned int) atomic_read(&cache->stats.write_miss),
(unsigned int) atomic_read(&cache->stats.demotion),
(unsigned int) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
emit_flags(cache, result, maxlen, &sz);
@ -3257,11 +3257,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
return r;
}
static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
const char **cblock_ranges)
{
int r = 0;
unsigned i;
unsigned int i;
struct cblock_range range;
if (!passthrough_mode(cache)) {
@ -3298,8 +3298,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
*
* The key migration_threshold is supported by the cache target core.
*/
static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct cache *cache = ti->private;

View File

@ -119,7 +119,7 @@ struct mapped_device {
struct dm_stats stats;
/* the number of internal suspends */
unsigned internal_suspend_count;
unsigned int internal_suspend_count;
int swap_bios;
struct semaphore swap_bios_semaphore;
@ -326,9 +326,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
}
unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
{
return !maxlen || strlen(result) + 1 >= maxlen;
}

View File

@ -173,14 +173,14 @@ struct crypt_config {
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
unsigned short int sector_size;
unsigned short sector_size;
unsigned char sector_shift;
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
} cipher_tfm;
unsigned tfms_count;
unsigned int tfms_count;
unsigned long cipher_flags;
/*
@ -214,7 +214,7 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
unsigned tag_pool_max_sectors;
unsigned int tag_pool_max_sectors;
mempool_t tag_pool;
mempool_t req_pool;
mempool_t page_pool;
@ -231,7 +231,7 @@ struct crypt_config {
#define POOL_ENTRY_SIZE 512
static DEFINE_SPINLOCK(dm_crypt_clients_lock);
static unsigned dm_crypt_clients_n = 0;
static unsigned int dm_crypt_clients_n = 0;
static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
@ -356,7 +356,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
unsigned bs;
unsigned int bs;
int log;
if (crypt_integrity_aead(cc))
@ -1466,7 +1466,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
@ -1660,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
* non-blocking allocations without a mutex first but on failure we fallback
* to blocking allocations with a mutex.
*/
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned i, len, remaining_size;
unsigned int i, len, remaining_size;
struct page *page;
retry:
@ -1806,7 +1806,7 @@ static void crypt_endio(struct bio *clone)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned rw = bio_data_dir(clone);
unsigned int rw = bio_data_dir(clone);
blk_status_t error;
/*
@ -2261,7 +2261,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc)
static void crypt_free_tfms_skcipher(struct crypt_config *cc)
{
unsigned i;
unsigned int i;
if (!cc->cipher_tfm.tfms)
return;
@ -2286,7 +2286,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
{
unsigned i;
unsigned int i;
int err;
cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
@ -2344,12 +2344,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return crypt_alloc_tfms_skcipher(cc, ciphermode);
}
static unsigned crypt_subkey_size(struct crypt_config *cc)
static unsigned int crypt_subkey_size(struct crypt_config *cc)
{
return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
}
static unsigned crypt_authenckey_size(struct crypt_config *cc)
static unsigned int crypt_authenckey_size(struct crypt_config *cc)
{
return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
@ -2360,7 +2360,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
* This funcion converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
unsigned enckeylen, unsigned authkeylen)
unsigned int enckeylen, unsigned int authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
@ -2378,7 +2378,7 @@ static void crypt_copy_authenckey(char *p, const void *key,
static int crypt_setkey(struct crypt_config *cc)
{
unsigned subkey_size;
unsigned int subkey_size;
int err = 0, i, r;
/* Ignore extra keys (which are used for IV etc) */
@ -3417,7 +3417,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) {
unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
unlikely(!(io->integrity_metadata = kmalloc(tag_len,
@ -3445,14 +3445,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
static char hex2asc(unsigned char c)
{
return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
}
static void crypt_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
unsigned i, sz = 0;
unsigned int i, sz = 0;
int num_feature_args = 0;
switch (type) {
@ -3568,8 +3568,8 @@ static void crypt_resume(struct dm_target *ti)
* key set <key>
* key wipe
*/
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
int key_size, ret = -EINVAL;
@ -3630,10 +3630,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
max_t(unsigned, limits->logical_block_size, cc->sector_size);
max_t(unsigned int, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
max_t(unsigned int, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
}

View File

@ -20,8 +20,8 @@
struct delay_class {
struct dm_dev *dev;
sector_t start;
unsigned delay;
unsigned ops;
unsigned int delay;
unsigned int ops;
};
struct delay_c {
@ -305,7 +305,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
static void delay_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;

View File

@ -390,7 +390,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
}
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct ebs_c *ec = ti->private;

View File

@ -51,7 +51,7 @@ static void writeset_free(struct writeset *ws)
}
static int setup_on_disk_bitset(struct dm_disk_bitset *info,
unsigned nr_bits, dm_block_t *root)
unsigned int nr_bits, dm_block_t *root)
{
int r;
@ -62,7 +62,7 @@ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
}
static size_t bitset_size(unsigned nr_bits)
static size_t bitset_size(unsigned int nr_bits)
{
return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
}
@ -323,10 +323,10 @@ static int superblock_lock(struct era_metadata *md,
static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@ -363,12 +363,12 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
core->root = le64_to_cpu(disk->root);
}
static void ws_inc(void *context, const void *value, unsigned count)
static void ws_inc(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
unsigned i;
unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@ -377,12 +377,12 @@ static void ws_inc(void *context, const void *value, unsigned count)
}
}
static void ws_dec(void *context, const void *value, unsigned count)
static void ws_dec(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
unsigned i;
unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@ -667,7 +667,7 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
*--------------------------------------------------------------*/
struct digest {
uint32_t era;
unsigned nr_bits, current_bit;
unsigned int nr_bits, current_bit;
struct writeset_metadata writeset;
__le32 value;
struct dm_disk_bitset info;
@ -702,7 +702,7 @@ static int metadata_digest_transcribe_writeset(struct era_metadata *md,
{
int r;
bool marked;
unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
unsigned int b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
for (b = d->current_bit; b < e; b++) {
r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
@ -1439,7 +1439,7 @@ static bool valid_block_size(dm_block_t block_size)
/*
* <metadata dev> <data dev> <data block size (sectors)>
*/
static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
char dummy;
@ -1618,7 +1618,7 @@ static int era_preresume(struct dm_target *ti)
* <current era> <held metadata root | '-'>
*/
static void era_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
struct era *era = ti->private;
@ -1633,10 +1633,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
goto err;
DMEMIT("%u %llu/%llu %u",
(unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned int) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long) stats.used,
(unsigned long long) stats.total,
(unsigned) stats.era);
(unsigned int) stats.era);
if (stats.snap != SUPERBLOCK_LOCATION)
DMEMIT(" %llu", stats.snap);
@ -1662,8 +1662,8 @@ static void era_status(struct dm_target *ti, status_type_t type,
DMEMIT("Error");
}
static int era_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int era_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct era *era = ti->private;

View File

@ -142,7 +142,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
static int set_chunk_size(struct dm_exception_store *store,
const char *chunk_size_arg, char **error)
{
unsigned chunk_size;
unsigned int chunk_size;
if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
*error = "Invalid chunk size";
@ -158,7 +158,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
unsigned chunk_size,
unsigned int chunk_size,
char **error)
{
/* Check chunk_size is a power of 2 */
@ -190,7 +190,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
unsigned *args_used,
unsigned int *args_used,
struct dm_exception_store **store)
{
int r = 0;

View File

@ -96,9 +96,9 @@ struct dm_exception_store_type {
*/
void (*drop_snapshot) (struct dm_exception_store *store);
unsigned (*status) (struct dm_exception_store *store,
status_type_t status, char *result,
unsigned maxlen);
unsigned int (*status) (struct dm_exception_store *store,
status_type_t status, char *result,
unsigned int maxlen);
/*
* Return how full the snapshot is.
@ -118,9 +118,9 @@ struct dm_exception_store {
struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
unsigned chunk_size;
unsigned chunk_mask;
unsigned chunk_shift;
unsigned int chunk_size;
unsigned int chunk_mask;
unsigned int chunk_shift;
void *context;
@ -144,7 +144,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
@ -181,12 +181,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
unsigned chunk_size,
unsigned int chunk_size,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
unsigned *args_used,
unsigned int *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);

View File

@ -26,12 +26,12 @@ struct flakey_c {
struct dm_dev *dev;
unsigned long start_time;
sector_t start;
unsigned up_interval;
unsigned down_interval;
unsigned int up_interval;
unsigned int down_interval;
unsigned long flags;
unsigned corrupt_bio_byte;
unsigned corrupt_bio_rw;
unsigned corrupt_bio_value;
unsigned int corrupt_bio_byte;
unsigned int corrupt_bio_rw;
unsigned int corrupt_bio_value;
blk_opf_t corrupt_bio_flags;
};
@ -48,7 +48,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
int r;
unsigned argc;
unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@ -148,7 +148,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
sizeof(unsigned int));
r = dm_read_arg(_args + 3, as,
(__force unsigned *)&fc->corrupt_bio_flags,
(__force unsigned int *)&fc->corrupt_bio_flags,
&ti->error);
if (r)
return r;
@ -324,7 +324,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
@ -417,11 +417,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
}
static void flakey_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct flakey_c *fc = ti->private;
unsigned drop_writes, error_writes;
unsigned int drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:

View File

@ -157,13 +157,13 @@ struct alg_spec {
char *alg_string;
char *key_string;
__u8 *key;
unsigned key_size;
unsigned int key_size;
};
struct dm_integrity_c {
struct dm_dev *dev;
struct dm_dev *meta_dev;
unsigned tag_size;
unsigned int tag_size;
__s8 log2_tag_size;
sector_t start;
mempool_t journal_io_mempool;
@ -171,8 +171,8 @@ struct dm_integrity_c {
struct dm_bufio_client *bufio;
struct workqueue_struct *metadata_wq;
struct superblock *sb;
unsigned journal_pages;
unsigned n_bitmap_blocks;
unsigned int journal_pages;
unsigned int n_bitmap_blocks;
struct page_list *journal;
struct page_list *journal_io;
@ -180,7 +180,7 @@ struct dm_integrity_c {
struct page_list *recalc_bitmap;
struct page_list *may_write_bitmap;
struct bitmap_block_status *bbs;
unsigned bitmap_flush_interval;
unsigned int bitmap_flush_interval;
int synchronous_mode;
struct bio_list synchronous_bios;
struct delayed_work bitmap_flush_work;
@ -201,12 +201,12 @@ struct dm_integrity_c {
unsigned char journal_entries_per_sector;
unsigned char journal_section_entries;
unsigned short journal_section_sectors;
unsigned journal_sections;
unsigned journal_entries;
unsigned int journal_sections;
unsigned int journal_entries;
sector_t data_device_sectors;
sector_t meta_device_sectors;
unsigned initial_sectors;
unsigned metadata_run;
unsigned int initial_sectors;
unsigned int metadata_run;
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
@ -230,17 +230,17 @@ struct dm_integrity_c {
unsigned char commit_seq;
commit_id_t commit_ids[N_COMMIT_IDS];
unsigned committed_section;
unsigned n_committed_sections;
unsigned int committed_section;
unsigned int n_committed_sections;
unsigned uncommitted_section;
unsigned n_uncommitted_sections;
unsigned int uncommitted_section;
unsigned int n_uncommitted_sections;
unsigned free_section;
unsigned int free_section;
unsigned char free_section_entry;
unsigned free_sectors;
unsigned int free_sectors;
unsigned free_sectors_threshold;
unsigned int free_sectors_threshold;
struct workqueue_struct *commit_wq;
struct work_struct commit_work;
@ -257,7 +257,7 @@ struct dm_integrity_c {
unsigned long autocommit_jiffies;
struct timer_list autocommit_timer;
unsigned autocommit_msec;
unsigned int autocommit_msec;
wait_queue_head_t copy_to_journal_wait;
@ -305,7 +305,7 @@ struct dm_integrity_io {
struct dm_integrity_range range;
sector_t metadata_block;
unsigned metadata_offset;
unsigned int metadata_offset;
atomic_t in_flight;
blk_status_t bi_status;
@ -329,7 +329,7 @@ struct journal_io {
struct bitmap_block_status {
struct work_struct work;
struct dm_integrity_c *ic;
unsigned idx;
unsigned int idx;
unsigned long *bitmap;
struct bio_list bio_queue;
spinlock_t bio_queue_lock;
@ -410,8 +410,8 @@ static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
return false;
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
unsigned int j, unsigned char seq)
{
/*
* Xor the number with section and sector, so that if a piece of
@ -426,7 +426,7 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
if (!ic->meta_dev) {
__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
*area = data_sector >> log2_interleave_sectors;
*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
*offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
} else {
*area = 0;
*offset = data_sector;
@ -435,15 +435,15 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
#define sector_to_block(ic, n) \
do { \
BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
(n) >>= (ic)->sb->log2_sectors_per_block; \
} while (0)
static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
sector_t offset, unsigned *metadata_offset)
sector_t offset, unsigned int *metadata_offset)
{
__u64 ms;
unsigned mo;
unsigned int mo;
ms = area << ic->sb->log2_interleave_sectors;
if (likely(ic->log2_metadata_run >= 0))
@ -484,7 +484,7 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
return result;
}
static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
{
if (unlikely(*sec_ptr >= ic->journal_sections))
*sec_ptr -= ic->journal_sections;
@ -508,7 +508,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
unsigned size = crypto_shash_digestsize(ic->journal_mac);
unsigned int size = crypto_shash_digestsize(ic->journal_mac);
if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
@ -704,8 +704,8 @@ static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
{
unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
unsigned i;
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
unsigned int i;
for (i = 0; i < n_bitmap_pages; i++) {
unsigned long *dst_data = lowmem_page_address(dst[i].page);
@ -716,18 +716,18 @@ static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst,
static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
{
unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
return &ic->bbs[bitmap_block];
}
static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
bool e, const char *function)
{
#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
@ -738,10 +738,10 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
#endif
}
static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
unsigned *pl_index, unsigned *pl_offset)
static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
unsigned int *pl_index, unsigned int *pl_offset)
{
unsigned sector;
unsigned int sector;
access_journal_check(ic, section, offset, false, "page_list_location");
@ -752,9 +752,9 @@ static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsi
}
static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
unsigned section, unsigned offset, unsigned *n_sectors)
unsigned int section, unsigned int offset, unsigned int *n_sectors)
{
unsigned pl_index, pl_offset;
unsigned int pl_index, pl_offset;
char *va;
page_list_location(ic, section, offset, &pl_index, &pl_offset);
@ -767,14 +767,14 @@ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct
return (struct journal_sector *)(va + pl_offset);
}
static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
{
return access_page_list(ic, ic->journal, section, offset, NULL);
}
static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
unsigned rel_sector, offset;
unsigned int rel_sector, offset;
struct journal_sector *js;
access_journal_check(ic, section, n, true, "access_journal_entry");
@ -786,7 +786,7 @@ static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, uns
return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
}
static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
n <<= ic->sb->log2_sectors_per_block;
@ -797,11 +797,11 @@ static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, uns
return access_journal(ic, section, n);
}
static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
unsigned j, size;
unsigned int j, size;
desc->tfm = ic->journal_mac;
@ -866,10 +866,10 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
memset(result, 0, JOURNAL_MAC_SIZE);
}
static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
{
__u8 result[JOURNAL_MAC_SIZE];
unsigned j;
unsigned int j;
if (!ic->journal_mac)
return;
@ -898,12 +898,12 @@ static void complete_journal_op(void *context)
complete(&comp->comp);
}
static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
unsigned n_sections, struct journal_completion *comp)
static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
unsigned int n_sections, struct journal_completion *comp)
{
struct async_submit_ctl submit;
size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
unsigned pl_index, pl_offset, section_index;
unsigned int pl_index, pl_offset, section_index;
struct page_list *source_pl, *target_pl;
if (likely(encrypt)) {
@ -928,7 +928,7 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
struct page *dst_page;
while (unlikely(pl_index == section_index)) {
unsigned dummy;
unsigned int dummy;
if (likely(encrypt))
rw_section_mac(ic, section, true);
section++;
@ -990,8 +990,8 @@ static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_
return false;
}
static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
unsigned n_sections, struct journal_completion *comp)
static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
unsigned int n_sections, struct journal_completion *comp)
{
struct scatterlist **source_sg;
struct scatterlist **target_sg;
@ -1008,7 +1008,7 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
do {
struct skcipher_request *req;
unsigned ivsize;
unsigned int ivsize;
char *iv;
if (likely(encrypt))
@ -1034,8 +1034,8 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
complete_journal_op(comp);
}
static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
unsigned n_sections, struct journal_completion *comp)
static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
unsigned int n_sections, struct journal_completion *comp)
{
if (ic->journal_xor)
return xor_journal(ic, encrypt, section, n_sections, comp);
@ -1052,12 +1052,12 @@ static void complete_journal_io(unsigned long error, void *context)
}
static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
unsigned sector, unsigned n_sectors,
unsigned int sector, unsigned int n_sectors,
struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
unsigned pl_index, pl_offset;
unsigned int pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@ -1099,10 +1099,10 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
}
static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
unsigned section, unsigned n_sections,
unsigned int section, unsigned int n_sections,
struct journal_completion *comp)
{
unsigned sector, n_sectors;
unsigned int sector, n_sectors;
sector = section * ic->journal_section_sectors;
n_sectors = n_sections * ic->journal_section_sectors;
@ -1110,12 +1110,12 @@ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
rw_journal_sectors(ic, opf, sector, n_sectors, comp);
}
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
{
struct journal_completion io_comp;
struct journal_completion crypt_comp_1;
struct journal_completion crypt_comp_2;
unsigned i;
unsigned int i;
io_comp.ic = ic;
init_completion(&io_comp.comp);
@ -1135,7 +1135,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else {
unsigned to_end;
unsigned int to_end;
io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
to_end = ic->journal_sections - commit_start;
if (ic->journal_io) {
@ -1172,15 +1172,15 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
wait_for_completion_io(&io_comp.comp);
}
static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
int r;
unsigned sector, pl_index, pl_offset;
unsigned int sector, pl_index, pl_offset;
BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
if (unlikely(dm_integrity_failed(ic))) {
fn(-1UL, data);
@ -1221,7 +1221,7 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
struct rb_node **n = &ic->in_progress.rb_node;
struct rb_node *parent;
BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
if (likely(check_waiting)) {
struct dm_integrity_range *range;
@ -1339,10 +1339,10 @@ static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *
#define NOT_FOUND (-1U)
static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
{
struct rb_node *n = ic->journal_tree_root.rb_node;
unsigned found = NOT_FOUND;
unsigned int found = NOT_FOUND;
*next_sector = (sector_t)-1;
while (n) {
struct journal_node *j = container_of(n, struct journal_node, node);
@ -1360,7 +1360,7 @@ static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, se
return found;
}
static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
{
struct journal_node *node, *next_node;
struct rb_node *next;
@ -1385,7 +1385,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
{
struct rb_node *next;
struct journal_node *next_node;
unsigned next_section;
unsigned int next_section;
BUG_ON(RB_EMPTY_NODE(&node->node));
@ -1398,7 +1398,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
if (next_node->sector != node->sector)
return false;
next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
if (next_section >= ic->committed_section &&
next_section < ic->committed_section + ic->n_committed_sections)
return true;
@ -1413,17 +1413,17 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
#define TAG_CMP 2
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned *metadata_offset, unsigned total_size, int op)
unsigned int *metadata_offset, unsigned int total_size, int op)
{
#define MAY_BE_FILLER 1
#define MAY_BE_HASH 2
unsigned hash_offset = 0;
unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
unsigned int hash_offset = 0;
unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
do {
unsigned char *data, *dp;
struct dm_buffer *b;
unsigned to_copy;
unsigned int to_copy;
int r;
r = dm_integrity_failed(ic);
@ -1453,7 +1453,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
goto thorough_test;
}
} else {
unsigned i, ts;
unsigned int i, ts;
thorough_test:
ts = total_size;
@ -1652,7 +1652,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
__le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
unsigned digest_size;
unsigned int digest_size;
req->tfm = ic->internal_hash;
@ -1709,13 +1709,13 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned sectors_to_process;
unsigned int sectors_to_process;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@ -1735,14 +1735,13 @@ static void integrity_metadata(struct work_struct *w)
}
if (unlikely(dio->op == REQ_OP_DISCARD)) {
sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
unsigned bi_size = dio->bio_details.bi_iter.bi_size;
unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
unsigned max_blocks = max_size / ic->tag_size;
unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
unsigned int max_blocks = max_size / ic->tag_size;
memset(checksums, DISCARD_FILLER, max_size);
while (bi_size) {
unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
this_step_blocks = min(this_step_blocks, max_blocks);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
this_step_blocks * ic->tag_size, TAG_WRITE);
@ -1752,13 +1751,7 @@ static void integrity_metadata(struct work_struct *w)
goto error;
}
/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
BUG();
}*/
bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
}
if (likely(checksums != checksums_onstack))
@ -1770,7 +1763,7 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process = dio->range.n_sectors;
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
unsigned int pos;
char *mem, *checksums_ptr;
again:
@ -1823,13 +1816,13 @@ static void integrity_metadata(struct work_struct *w)
if (bip) {
struct bio_vec biv;
struct bvec_iter iter;
unsigned data_to_process = dio->range.n_sectors;
unsigned int data_to_process = dio->range.n_sectors;
sector_to_block(ic, data_to_process);
data_to_process *= ic->tag_size;
bip_for_each_vec(biv, bip, iter) {
unsigned char *tag;
unsigned this_len;
unsigned int this_len;
BUG_ON(PageHighMem(biv.bv_page));
tag = bvec_virt(&biv);
@ -1867,7 +1860,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned log2_max_io_len = __fls(ti->max_io_len);
unsigned int log2_max_io_len = __fls(ti->max_io_len);
sector_t start_boundary = sec >> log2_max_io_len;
sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
if (start_boundary < end_boundary) {
@ -1897,7 +1890,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
ic->provided_data_sectors);
return DM_MAPIO_KILL;
}
if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
ic->sectors_per_block,
dio->range.logical_sector, bio_sectors(bio));
@ -1919,7 +1912,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
bip = bio_integrity(bio);
if (!ic->internal_hash) {
if (bip) {
unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
if (ic->log2_tag_size >= 0)
wanted_tag_size <<= ic->log2_tag_size;
else
@ -1949,11 +1942,11 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
unsigned journal_section, unsigned journal_entry)
unsigned int journal_section, unsigned int journal_entry)
{
struct dm_integrity_c *ic = dio->ic;
sector_t logical_sector;
unsigned n_sectors;
unsigned int n_sectors;
logical_sector = dio->range.logical_sector;
n_sectors = dio->range.n_sectors;
@ -1976,7 +1969,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (unlikely(dio->op == REQ_OP_READ)) {
struct journal_sector *js;
char *mem_ptr;
unsigned s;
unsigned int s;
if (unlikely(journal_entry_is_inprogress(je))) {
flush_dcache_page(bv.bv_page);
@ -2013,12 +2006,12 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (!ic->internal_hash) {
struct bio_integrity_payload *bip = bio_integrity(bio);
unsigned tag_todo = ic->tag_size;
unsigned int tag_todo = ic->tag_size;
char *tag_ptr = journal_entry_tag(ic, je);
if (bip) do {
struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
unsigned tag_now = min(biv.bv_len, tag_todo);
unsigned int tag_now = min(biv.bv_len, tag_todo);
char *tag_addr;
BUG_ON(PageHighMem(biv.bv_page));
tag_addr = bvec_virt(&biv);
@ -2037,7 +2030,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (likely(dio->op == REQ_OP_WRITE)) {
struct journal_sector *js;
unsigned s;
unsigned int s;
js = access_journal_data(ic, journal_section, journal_entry);
memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
@ -2048,7 +2041,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
@ -2105,8 +2098,8 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
{
struct dm_integrity_c *ic = dio->ic;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
unsigned journal_section, journal_entry;
unsigned journal_read_pos;
unsigned int journal_section, journal_entry;
unsigned int journal_read_pos;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
@ -2131,8 +2124,8 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
journal_read_pos = NOT_FOUND;
if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
if (dio->op == REQ_OP_WRITE) {
unsigned next_entry, i, pos;
unsigned ws, we, range_sectors;
unsigned int next_entry, i, pos;
unsigned int ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
(sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
@ -2185,8 +2178,8 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
dio->range.n_sectors = next_sector - dio->range.logical_sector;
} else {
unsigned i;
unsigned jp = journal_read_pos + 1;
unsigned int i;
unsigned int jp = journal_read_pos + 1;
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
break;
@ -2218,7 +2211,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
*/
if (journal_read_pos != NOT_FOUND) {
sector_t next_sector;
unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != journal_read_pos)) {
remove_range_unlocked(ic, &dio->range);
goto retry;
@ -2227,7 +2220,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
}
if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
sector_t next_sector;
unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != NOT_FOUND) ||
unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
remove_range_unlocked(ic, &dio->range);
@ -2354,8 +2347,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
static void integrity_commit(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
unsigned commit_start, commit_sections;
unsigned i, j, n;
unsigned int commit_start, commit_sections;
unsigned int i, j, n;
struct bio *flushes;
del_timer(&ic->autocommit_timer);
@ -2433,17 +2426,17 @@ static void complete_copy_from_journal(unsigned long error, void *context)
static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
struct journal_entry *je)
{
unsigned s = 0;
unsigned int s = 0;
do {
js->commit_id = je->last_bytes[s];
js++;
} while (++s < ic->sectors_per_block);
}
static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unsigned write_sections, bool from_replay)
static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
unsigned int write_sections, bool from_replay)
{
unsigned i, j, n;
unsigned int i, j, n;
struct journal_completion comp;
struct blk_plug plug;
@ -2462,9 +2455,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
sector_t sec, area, offset;
unsigned k, l, next_loop;
unsigned int k, l, next_loop;
sector_t metadata_block;
unsigned metadata_offset;
unsigned int metadata_offset;
struct journal_io *io;
if (journal_entry_is_unused(je))
@ -2472,7 +2465,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
sec = journal_entry_get_sector(je);
if (unlikely(from_replay)) {
if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
@ -2590,9 +2583,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
static void integrity_writer(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
unsigned write_start, write_sections;
unsigned int write_start, write_sections;
unsigned prev_free_sectors;
unsigned int prev_free_sectors;
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
@ -2639,12 +2632,12 @@ static void integrity_recalc(struct work_struct *w)
struct dm_io_region io_loc;
sector_t area, offset;
sector_t metadata_block;
unsigned metadata_offset;
unsigned int metadata_offset;
sector_t logical_sector, n_sectors;
__u8 *t;
unsigned i;
unsigned int i;
int r;
unsigned super_counter = 0;
unsigned int super_counter = 0;
DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
@ -2668,7 +2661,7 @@ static void integrity_recalc(struct work_struct *w)
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
@ -2859,10 +2852,10 @@ static void bitmap_flush_work(struct work_struct *work)
}
static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
unsigned n_sections, unsigned char commit_seq)
static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
unsigned int n_sections, unsigned char commit_seq)
{
unsigned i, j, n;
unsigned int i, j, n;
if (!n_sections)
return;
@ -2885,7 +2878,7 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
write_journal(ic, start_section, n_sections);
}
static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
{
unsigned char k;
for (k = 0; k < N_COMMIT_IDS; k++) {
@ -2898,11 +2891,11 @@ static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, co
static void replay_journal(struct dm_integrity_c *ic)
{
unsigned i, j;
unsigned int i, j;
bool used_commit_ids[N_COMMIT_IDS];
unsigned max_commit_id_sections[N_COMMIT_IDS];
unsigned write_start, write_sections;
unsigned continue_section;
unsigned int max_commit_id_sections[N_COMMIT_IDS];
unsigned int write_start, write_sections;
unsigned int continue_section;
bool journal_empty;
unsigned char unused, last_used, want_commit_seq;
@ -3020,7 +3013,7 @@ static void replay_journal(struct dm_integrity_c *ic)
ic->commit_seq = want_commit_seq;
DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
} else {
unsigned s;
unsigned int s;
unsigned char erase_seq;
clear_journal:
DEBUG_print("clearing journal\n");
@ -3252,10 +3245,10 @@ static void dm_integrity_resume(struct dm_target *ti)
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
unsigned arg_count;
unsigned int arg_count;
size_t sz = 0;
switch (type) {
@ -3305,7 +3298,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
if (ic->mode == 'B') {
@ -3384,7 +3377,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
static void calculate_journal_section_size(struct dm_integrity_c *ic)
{
unsigned sector_space = JOURNAL_SECTOR_DATA;
unsigned int sector_space = JOURNAL_SECTOR_DATA;
ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
@ -3461,9 +3454,10 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
}
}
static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
static int initialize_superblock(struct dm_integrity_c *ic,
unsigned int journal_sectors, unsigned int interleave_sectors)
{
unsigned journal_sections;
unsigned int journal_sections;
int test_bit;
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
@ -3548,7 +3542,7 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned i;
unsigned int i;
if (!pl)
return;
@ -3557,10 +3551,10 @@ static void dm_integrity_free_page_list(struct page_list *pl)
kvfree(pl);
}
static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
{
struct page_list *pl;
unsigned i;
unsigned int i;
pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
@ -3583,7 +3577,7 @@ static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
{
unsigned i;
unsigned int i;
for (i = 0; i < ic->journal_sections; i++)
kvfree(sl[i]);
kvfree(sl);
@ -3593,7 +3587,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
struct page_list *pl)
{
struct scatterlist **sl;
unsigned i;
unsigned int i;
sl = kvmalloc_array(ic->journal_sections,
sizeof(struct scatterlist *),
@ -3603,10 +3597,10 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist *s;
unsigned start_index, start_offset;
unsigned end_index, end_offset;
unsigned n_pages;
unsigned idx;
unsigned int start_index, start_offset;
unsigned int end_index, end_offset;
unsigned int n_pages;
unsigned int idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
page_list_location(ic, i, ic->journal_section_sectors - 1,
@ -3624,7 +3618,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
sg_init_table(s, n_pages);
for (idx = start_index; idx <= end_index; idx++) {
char *va = lowmem_page_address(pl[idx].page);
unsigned start = 0, end = PAGE_SIZE;
unsigned int start = 0, end = PAGE_SIZE;
if (idx == start_index)
start = start_offset;
if (idx == end_index)
@ -3711,7 +3705,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
static int create_journal(struct dm_integrity_c *ic, char **error)
{
int r = 0;
unsigned i;
unsigned int i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
struct skcipher_request *req = NULL;
@ -3738,7 +3732,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
if (ic->journal_crypt_alg.alg_string) {
unsigned ivsize, blocksize;
unsigned int ivsize, blocksize;
struct journal_completion comp;
comp.ic = ic;
@ -3827,7 +3821,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
unsigned crypt_len = roundup(ivsize, blocksize);
unsigned int crypt_len = roundup(ivsize, blocksize);
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
if (!req) {
@ -3915,7 +3909,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
for (i = 0; i < N_COMMIT_IDS; i++) {
unsigned j;
unsigned int j;
retest_commit_id:
for (j = 0; j < i; j++) {
if (ic->commit_ids[j] == ic->commit_ids[i]) {
@ -3969,17 +3963,17 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
* journal_mac
* recalculate
*/
static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_integrity_c *ic;
char dummy;
int r;
unsigned extra_args;
unsigned int extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 18, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
@ -4058,7 +4052,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
unsigned val;
unsigned int val;
unsigned long long llval;
opt_string = dm_shift_arg(&as);
if (!opt_string) {
@ -4391,7 +4385,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
@ -4465,8 +4459,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
if (ic->mode == 'B') {
unsigned i;
unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
unsigned int i;
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
@ -4486,7 +4480,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
for (i = 0; i < ic->n_bitmap_blocks; i++) {
struct bitmap_block_status *bbs = &ic->bbs[i];
unsigned sector, pl_index, pl_offset;
unsigned int sector, pl_index, pl_offset;
INIT_WORK(&bbs->work, bitmap_block_work);
bbs->ic = ic;
@ -4523,7 +4517,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
if (ic->mode == 'B') {
unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
unsigned int max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
if (!max_io_len)
max_io_len = 1U << 31;
DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
@ -4594,7 +4588,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->journal_io_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
if (ic->sk_requests) {
unsigned i;
unsigned int i;
for (i = 0; i < ic->journal_sections; i++) {
struct skcipher_request *req = ic->sk_requests[i];

View File

@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
@ -131,7 +131,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
static void dm_bio_rewind(struct bio *bio, unsigned bytes)
static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);

View File

@ -48,7 +48,7 @@ static struct kmem_cache *_dm_io_cache;
struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
unsigned int min_ios = dm_get_reserved_bio_based_ios();
int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@ -88,7 +88,7 @@ EXPORT_SYMBOL(dm_io_client_destroy);
* bi_private.
*---------------------------------------------------------------*/
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
unsigned region)
unsigned int region)
{
if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
DMCRIT("Unaligned struct io pointer %p", io);
@ -99,7 +99,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
}
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
unsigned *region)
unsigned int *region)
{
unsigned long val = (unsigned long)bio->bi_private;
@ -137,7 +137,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
static void endio(struct bio *bio)
{
struct io *io;
unsigned region;
unsigned int region;
blk_status_t error;
if (bio->bi_status && bio_data_dir(bio) == READ)
@ -160,11 +160,11 @@ static void endio(struct bio *bio)
*---------------------------------------------------------------*/
struct dpages {
void (*get_page)(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset);
struct page **p, unsigned long *len, unsigned int *offset);
void (*next_page)(struct dpages *dp);
union {
unsigned context_u;
unsigned int context_u;
struct bvec_iter context_bi;
};
void *context_ptr;
@ -177,9 +177,9 @@ struct dpages {
* Functions for getting the pages from a list.
*/
static void list_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
struct page **p, unsigned long *len, unsigned int *offset)
{
unsigned o = dp->context_u;
unsigned int o = dp->context_u;
struct page_list *pl = (struct page_list *) dp->context_ptr;
*p = pl->page;
@ -194,7 +194,7 @@ static void list_next_page(struct dpages *dp)
dp->context_u = 0;
}
static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
{
dp->get_page = list_get_page;
dp->next_page = list_next_page;
@ -206,7 +206,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
* Functions for getting the pages from a bvec.
*/
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
unsigned long *len, unsigned int *offset)
{
struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
dp->context_bi);
@ -244,7 +244,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
* Functions for getting the pages from a VMA.
*/
static void vm_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
struct page **p, unsigned long *len, unsigned int *offset)
{
*p = vmalloc_to_page(dp->context_ptr);
*offset = dp->context_u;
@ -269,7 +269,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
* Functions for getting the pages from kernel memory.
*/
static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
unsigned *offset)
unsigned int *offset)
{
*p = virt_to_page(dp->context_ptr);
*offset = dp->context_u;
@ -293,15 +293,15 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
static void do_region(const blk_opf_t opf, unsigned region,
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
struct bio *bio;
struct page *page;
unsigned long len;
unsigned offset;
unsigned num_bvecs;
unsigned int offset;
unsigned int num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
@ -508,7 +508,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
int r;

View File

@ -31,7 +31,7 @@ struct dm_file {
* poll will wait until the global event number is greater than
* this value.
*/
volatile unsigned global_event_nr;
volatile unsigned int global_event_nr;
};
/*-----------------------------------------------------------------
@ -413,7 +413,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
struct hash_cell *hc;
struct dm_table *table;
struct mapped_device *md;
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
int srcu_idx;
/*
@ -1021,7 +1021,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
int r;
char *new_data = (char *) param + param->data_start;
struct mapped_device *md;
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
@ -1096,7 +1096,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
static int do_suspend(struct dm_ioctl *param)
{
int r = 0;
unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct mapped_device *md;
md = find_device(param);
@ -1125,7 +1125,7 @@ static int do_suspend(struct dm_ioctl *param)
static int do_resume(struct dm_ioctl *param)
{
int r = 0;
unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *new_map, *old_map = NULL;
@ -1243,7 +1243,7 @@ static void retrieve_status(struct dm_table *table,
char *outbuf, *outptr;
status_type_t type;
size_t remaining, len, used = 0;
unsigned status_flags = 0;
unsigned int status_flags = 0;
outptr = outbuf = get_result_buffer(param, param_size, &len);
@ -1648,8 +1648,8 @@ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int message_for_md(struct mapped_device *md, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r;
@ -1859,7 +1859,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
struct dm_ioctl *dmi;
int secure_data;
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
unsigned noio_flag;
unsigned int noio_flag;
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;

View File

@ -34,14 +34,14 @@
#define DEFAULT_SUB_JOB_SIZE_KB 512
#define MAX_SUB_JOB_SIZE_KB 1024
static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
static unsigned dm_get_kcopyd_subjob_size(void)
static unsigned int dm_get_kcopyd_subjob_size(void)
{
unsigned sub_job_size_kb;
unsigned int sub_job_size_kb;
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
DEFAULT_SUB_JOB_SIZE_KB,
@ -56,9 +56,9 @@ static unsigned dm_get_kcopyd_subjob_size(void)
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
struct page_list *pages;
unsigned nr_reserved_pages;
unsigned nr_free_pages;
unsigned sub_job_size;
unsigned int nr_reserved_pages;
unsigned int nr_free_pages;
unsigned int sub_job_size;
struct dm_io_client *io_client;
@ -119,7 +119,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
static void io_job_start(struct dm_kcopyd_throttle *t)
{
unsigned throttle, now, difference;
unsigned int throttle, now, difference;
int slept = 0, skew;
if (unlikely(!t))
@ -182,7 +182,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
goto skip_limit;
if (!t->num_io_jobs) {
unsigned now, difference;
unsigned int now, difference;
now = jiffies;
difference = now - t->last_jiffies;
@ -303,9 +303,9 @@ static void drop_pages(struct page_list *pl)
/*
* Allocate and reserve nr_pages for the use of a specific client.
*/
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{
unsigned i;
unsigned int i;
struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) {
@ -341,7 +341,7 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
unsigned flags;
unsigned int flags;
/*
* Error state of the job.
@ -582,7 +582,7 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
@ -849,8 +849,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
EXPORT_SYMBOL(dm_kcopyd_copy);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
unsigned num_dests, struct dm_io_region *dests,
unsigned flags, dm_kcopyd_notify_fn fn, void *context)
unsigned int num_dests, struct dm_io_region *dests,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
@ -906,7 +906,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
unsigned reserve_pages;
unsigned int reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);

View File

@ -95,7 +95,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
}
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
size_t sz = 0;

View File

@ -123,7 +123,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
}
static int build_constructor_string(struct dm_target *ti,
unsigned argc, char **argv,
unsigned int argc, char **argv,
char **ctr_str)
{
int i, str_size;
@ -188,7 +188,7 @@ static void do_flush(struct work_struct *work)
* to the userspace ctr function.
*/
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
int r = 0;
int str_size;
@ -792,7 +792,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
* Returns: amount of space consumed
*/
static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen)
char *result, unsigned int maxlen)
{
int r = 0;
char *table_args;

View File

@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
fill_pkg(msg, NULL);
else if (msg->len < sizeof(*tfr))
DMERR("Incomplete message received (expected %u, got %u): [%u]",
(unsigned)sizeof(*tfr), msg->len, msg->seq);
(unsigned int)sizeof(*tfr), msg->len, msg->seq);
else
fill_pkg(NULL, tfr);
spin_unlock(&receiving_list_lock);

View File

@ -792,10 +792,10 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio,
* INFO format: <logged entries> <highest allocated sector>
*/
static void log_writes_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result,
unsigned maxlen)
unsigned int status_flags, char *result,
unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct log_writes_c *lc = ti->private;
switch (type) {
@ -844,8 +844,8 @@ static int log_writes_iterate_devices(struct dm_target *ti,
* Messages supported:
* mark <mark data> - specify the marked data.
*/
static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct log_writes_c *lc = ti->private;

View File

@ -223,7 +223,7 @@ struct log_c {
unsigned int region_count;
region_t sync_count;
unsigned bitset_uint32_count;
unsigned int bitset_uint32_count;
uint32_t *clean_bits;
uint32_t *sync_bits;
uint32_t *recovering_bits; /* FIXME: this seems excessive */
@ -255,20 +255,20 @@ struct log_c {
* The touched member needs to be updated every time we access
* one of the bitsets.
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
static inline int log_test_bit(uint32_t *bs, unsigned int bit)
{
return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
uint32_t *bs, unsigned int bit)
{
__set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
uint32_t *bs, unsigned int bit)
{
__clear_bit_le(bit, bs);
l->touched_dirtied = 1;
@ -582,7 +582,7 @@ static void fail_log_device(struct log_c *lc)
static int disk_resume(struct dm_dirty_log *log)
{
int r;
unsigned i;
unsigned int i;
struct log_c *lc = (struct log_c *) log->context;
size_t size = lc->bitset_uint32_count * sizeof(uint32_t);

View File

@ -29,7 +29,7 @@
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
@ -39,7 +39,7 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
unsigned fail_count; /* Cumulative failure count */
unsigned int fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
@ -59,8 +59,8 @@ struct priority_group {
struct multipath *m; /* Owning multipath instance */
struct path_selector ps;
unsigned pg_num; /* Reference number */
unsigned nr_pgpaths; /* Number of paths in PG */
unsigned int pg_num; /* Reference number */
unsigned int nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
bool bypassed:1; /* Temporarily bypass this PG? */
@ -78,14 +78,14 @@ struct multipath {
struct priority_group *next_pg; /* Switch to this PG if set */
atomic_t nr_valid_paths; /* Total number of usable paths */
unsigned nr_priority_groups;
unsigned int nr_priority_groups;
struct list_head priority_groups;
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
unsigned int pg_init_retries; /* Number of times to retry pg_init */
unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
@ -397,7 +397,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned long flags;
struct priority_group *pg;
struct pgpath *pgpath;
unsigned bypassed = 1;
unsigned int bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
spin_lock_irqsave(&m->lock, flags);
@ -840,7 +840,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
unsigned int ps_argc;
static const struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
@ -983,7 +983,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
};
int r;
unsigned i, nr_selector_args, nr_args;
unsigned int i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@ -1049,7 +1049,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
unsigned int hw_argc;
int ret;
struct dm_target *ti = m->ti;
@ -1101,7 +1101,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
unsigned int argc;
struct dm_target *ti = m->ti;
const char *arg_name;
@ -1170,7 +1170,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
return r;
}
static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
/* target arguments */
static const struct dm_arg _args[] = {
@ -1181,8 +1181,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
int r;
struct multipath *m;
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
unsigned int pg_count = 0;
unsigned int next_pg_num;
unsigned long flags;
as.argc = argc;
@ -1224,7 +1224,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
@ -1365,7 +1365,7 @@ static int reinstate_path(struct pgpath *pgpath)
int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
unsigned nr_valid_paths;
unsigned int nr_valid_paths;
spin_lock_irqsave(&m->lock, flags);
@ -1454,7 +1454,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
unsigned pgnum;
unsigned int pgnum;
unsigned long flags;
char dummy;
@ -1487,7 +1487,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{
struct priority_group *pg;
unsigned pgnum;
unsigned int pgnum;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@ -1789,14 +1789,14 @@ static void multipath_resume(struct dm_target *ti)
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
static void multipath_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
unsigned int pg_num;
char state;
spin_lock_irqsave(&m->lock, flags);
@ -1948,8 +1948,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
spin_unlock_irqrestore(&m->lock, flags);
}
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct dm_dev *dev;

View File

@ -17,6 +17,6 @@ struct dm_path {
};
/* Callback for hwh_pg_init_fn to use when complete */
void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
void dm_pg_init_complete(struct dm_path *path, unsigned int err_flags);
#endif

View File

@ -52,7 +52,7 @@ struct path_selector_type {
/*
* Constructs a path selector object, takes custom arguments
*/
int (*create) (struct path_selector *ps, unsigned argc, char **argv);
int (*create) (struct path_selector *ps, unsigned int argc, char **argv);
void (*destroy) (struct path_selector *ps);
/*

View File

@ -108,7 +108,7 @@ static int ioa_add_path(struct path_selector *ps, struct dm_path *path,
return ret;
}
static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@ -138,7 +138,7 @@ static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
static void ioa_destroy(struct path_selector *ps)
{
struct selector *s = ps->context;
unsigned cpu;
unsigned int cpu;
for_each_cpu(cpu, s->path_mask)
ioa_free_path(s, cpu);

View File

@ -35,7 +35,7 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
unsigned repeat_count;
unsigned int repeat_count;
atomic_t qlen; /* the number of in-flight I/Os */
};
@ -52,7 +52,7 @@ static struct selector *alloc_selector(void)
return s;
}
static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@ -84,9 +84,9 @@ static void ql_destroy(struct path_selector *ps)
}
static int ql_status(struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned maxlen)
status_type_t type, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct path_info *pi;
/* When called with NULL path, return selector status/args. */
@ -116,7 +116,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = QL_MIN_IO;
unsigned int repeat_count = QL_MIN_IO;
char dummy;
unsigned long flags;

View File

@ -26,7 +26,7 @@
struct path_info {
struct list_head list;
struct dm_path *path;
unsigned repeat_count;
unsigned int repeat_count;
};
static void free_paths(struct list_head *paths)
@ -62,7 +62,7 @@ static struct selector *alloc_selector(void)
return s;
}
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@ -119,7 +119,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = RR_MIN_IO;
unsigned int repeat_count = RR_MIN_IO;
char dummy;
unsigned long flags;

View File

@ -30,8 +30,8 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
unsigned repeat_count;
unsigned relative_throughput;
unsigned int repeat_count;
unsigned int relative_throughput;
atomic_t in_flight_size; /* Total size of in-flight I/Os */
};
@ -48,7 +48,7 @@ static struct selector *alloc_selector(void)
return s;
}
static int st_create(struct path_selector *ps, unsigned argc, char **argv)
static int st_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@ -80,9 +80,9 @@ static void st_destroy(struct path_selector *ps)
}
static int st_status(struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned maxlen)
status_type_t type, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct path_info *pi;
if (!path)
@ -113,8 +113,8 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = ST_MIN_IO;
unsigned relative_throughput = 1;
unsigned int repeat_count = ST_MIN_IO;
unsigned int relative_throughput = 1;
char dummy;
unsigned long flags;

View File

@ -3712,7 +3712,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
}
static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned maxlen)
char *result, unsigned int maxlen)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;

View File

@ -82,7 +82,7 @@ struct mirror_set {
struct work_struct trigger_event;
unsigned nr_mirrors;
unsigned int nr_mirrors;
struct mirror mirror[];
};
@ -327,7 +327,7 @@ static void recovery_complete(int read_err, unsigned long write_err,
static void recover(struct mirror_set *ms, struct dm_region *reg)
{
unsigned i;
unsigned int i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
@ -593,7 +593,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
static void write_callback(unsigned long error, void *context)
{
unsigned i;
unsigned int i;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
@ -963,10 +963,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
unsigned argc, char **argv,
unsigned *args_used)
unsigned int argc, char **argv,
unsigned int *args_used)
{
unsigned param_count;
unsigned int param_count;
struct dm_dirty_log *dl;
char dummy;
@ -997,10 +997,10 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return dl;
}
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
unsigned *args_used)
static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
unsigned int *args_used)
{
unsigned num_features;
unsigned int num_features;
struct dm_target *ti = ms->ti;
char dummy;
int i;
@ -1389,7 +1389,7 @@ static char device_status_char(struct mirror *m)
static void mirror_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned int m, sz = 0;
int num_feature_args = 0;
@ -1458,7 +1458,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
{
struct mirror_set *ms = ti->private;
int ret = 0;
unsigned i;
unsigned int i;
for (i = 0; !ret && i < ms->nr_mirrors; i++)
ret = fn(ti, ms->mirror[i].dev,

View File

@ -56,17 +56,17 @@
*---------------------------------------------------------------*/
struct dm_region_hash {
uint32_t region_size;
unsigned region_shift;
unsigned int region_shift;
/* holds persistent region state */
struct dm_dirty_log *log;
/* hash table */
rwlock_t hash_lock;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
unsigned int mask;
unsigned int nr_buckets;
unsigned int prime;
unsigned int shift;
struct list_head *buckets;
/*
@ -74,7 +74,7 @@ struct dm_region_hash {
*/
int flush_failure;
unsigned max_recovery; /* Max # of regions to recover in parallel */
unsigned int max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
@ -163,12 +163,12 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
sector_t target_begin, unsigned max_recovery,
sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions)
{
struct dm_region_hash *rh;
unsigned nr_buckets, max_buckets;
unsigned int nr_buckets, max_buckets;
size_t i;
int ret;
@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create);
void dm_region_hash_destroy(struct dm_region_hash *rh)
{
unsigned h;
unsigned int h;
struct dm_region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
@ -263,9 +263,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
}
EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
{
return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
}
static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)

View File

@ -23,33 +23,33 @@ struct dm_rq_target_io {
union map_info info;
struct dm_stats_aux stats_aux;
unsigned long duration_jiffies;
unsigned n_sectors;
unsigned completed;
unsigned int n_sectors;
unsigned int completed;
};
#define DM_MQ_NR_HW_QUEUES 1
#define DM_MQ_QUEUE_DEPTH 2048
static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
/*
* Request-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
unsigned dm_get_reserved_rq_based_ios(void)
unsigned int dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
static unsigned dm_get_blk_mq_nr_hw_queues(void)
static unsigned int dm_get_blk_mq_nr_hw_queues(void)
{
return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
}
static unsigned dm_get_blk_mq_queue_depth(void)
static unsigned int dm_get_blk_mq_queue_depth(void)
{
return __dm_get_module_param(&dm_mq_queue_depth,
DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);

View File

@ -38,7 +38,7 @@ void dm_stop_queue(struct request_queue *q);
void dm_mq_kick_requeue_list(struct mapped_device *md);
unsigned dm_get_reserved_rq_based_ios(void);
unsigned int dm_get_reserved_rq_based_ios(void);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,

View File

@ -303,7 +303,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
{
int r;
struct disk_header *dh;
unsigned chunk_size;
unsigned int chunk_size;
int chunk_size_supplied = 1;
char *chunk_err;
@ -895,11 +895,11 @@ static int persistent_ctr(struct dm_exception_store *store, char *options)
return r;
}
static unsigned persistent_status(struct dm_exception_store *store,
static unsigned int persistent_status(struct dm_exception_store *store,
status_type_t status, char *result,
unsigned maxlen)
unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:

View File

@ -84,11 +84,11 @@ static int transient_ctr(struct dm_exception_store *store, char *options)
return 0;
}
static unsigned transient_status(struct dm_exception_store *store,
static unsigned int transient_status(struct dm_exception_store *store,
status_type_t status, char *result,
unsigned maxlen)
unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:

View File

@ -41,7 +41,7 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
unsigned int hash_shift;
struct hlist_bl_head *table;
};
@ -106,7 +106,7 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
unsigned in_progress;
unsigned int in_progress;
struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@ -161,7 +161,7 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
@ -324,7 +324,7 @@ struct origin {
struct dm_origin {
struct dm_dev *dev;
struct dm_target *ti;
unsigned split_boundary;
unsigned int split_boundary;
struct list_head hash_list;
};
@ -377,7 +377,7 @@ static void exit_origin_hash(void)
kfree(_dm_origins);
}
static unsigned origin_hash(struct block_device *bdev)
static unsigned int origin_hash(struct block_device *bdev)
{
return bdev->bd_dev & ORIGIN_MASK;
}
@ -652,7 +652,7 @@ static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
}
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
uint32_t size, unsigned int hash_shift)
{
unsigned int i;
@ -850,7 +850,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
@ -1010,7 +1010,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
}
static int origin_write_extent(struct dm_snapshot *merging_snap,
sector_t sector, unsigned chunk_size);
sector_t sector, unsigned int chunk_size);
static void merge_callback(int read_err, unsigned long write_err,
void *context);
@ -1183,7 +1183,7 @@ static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
struct dm_target *ti)
{
int r;
unsigned argc;
unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@ -1241,7 +1241,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int r = -EINVAL;
char *origin_path, *cow_path;
dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
unsigned int args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc < 4) {
@ -2315,11 +2315,11 @@ static void snapshot_merge_resume(struct dm_target *ti)
}
static void snapshot_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct dm_snapshot *snap = ti->private;
unsigned num_features;
unsigned int num_features;
switch (type) {
case STATUSTYPE_INFO:
@ -2592,7 +2592,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
* size must be a multiple of merging_snap's chunk_size.
*/
static int origin_write_extent(struct dm_snapshot *merging_snap,
sector_t sector, unsigned size)
sector_t sector, unsigned int size)
{
int must_wait = 0;
sector_t n;
@ -2668,7 +2668,7 @@ static void origin_dtr(struct dm_target *ti)
static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_origin *o = ti->private;
unsigned available_sectors;
unsigned int available_sectors;
bio_set_dev(bio, o->dev->bdev);
@ -2679,7 +2679,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary -
((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
if (bio_sectors(bio) > available_sectors)
dm_accept_partial_bio(bio, available_sectors);
@ -2713,7 +2713,7 @@ static void origin_postsuspend(struct dm_target *ti)
}
static void origin_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_origin *o = ti->private;

View File

@ -42,12 +42,12 @@ struct dm_stat_shared {
struct dm_stat {
struct list_head list_entry;
int id;
unsigned stat_flags;
unsigned int stat_flags;
size_t n_entries;
sector_t start;
sector_t end;
sector_t step;
unsigned n_histogram_entries;
unsigned int n_histogram_entries;
unsigned long long *histogram_boundaries;
const char *program_id;
const char *aux_data;
@ -63,7 +63,7 @@ struct dm_stat {
struct dm_stats_last_position {
sector_t last_sector;
unsigned last_rw;
unsigned int last_rw;
};
/*
@ -255,8 +255,8 @@ static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
sector_t step, unsigned stat_flags,
unsigned n_histogram_entries,
sector_t step, unsigned int stat_flags,
unsigned int n_histogram_entries,
unsigned long long *histogram_boundaries,
const char *program_id, const char *aux_data,
void (*suspend_callback)(struct mapped_device *),
@ -475,11 +475,11 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
}
static int dm_stats_list(struct dm_stats *stats, const char *program,
char *result, unsigned maxlen)
char *result, unsigned int maxlen)
{
struct dm_stat *s;
sector_t len;
unsigned sz = 0;
unsigned int sz = 0;
/*
* Output format:
@ -499,7 +499,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
DMEMIT(" precise_timestamps");
if (s->n_histogram_entries) {
unsigned i;
unsigned int i;
DMEMIT(" histogram:");
for (i = 0; i < s->n_histogram_entries; i++) {
if (i)
@ -523,7 +523,7 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
* This is racy, but so is part_round_stats_single.
*/
unsigned long long now, difference;
unsigned in_flight_read, in_flight_write;
unsigned int in_flight_read, in_flight_write;
if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
now = jiffies;
@ -534,8 +534,8 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
if (!difference)
return;
in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
if (in_flight_read)
p->io_ticks[READ] += difference;
if (in_flight_write)
@ -596,9 +596,9 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
duration = stats_aux->duration_ns;
}
if (s->n_histogram_entries) {
unsigned lo = 0, hi = s->n_histogram_entries + 1;
unsigned int lo = 0, hi = s->n_histogram_entries + 1;
while (lo + 1 < hi) {
unsigned mid = (lo + hi) / 2;
unsigned int mid = (lo + hi) / 2;
if (s->histogram_boundaries[mid - 1] > duration) {
hi = mid;
} else {
@ -656,7 +656,7 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
}
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *stats_aux)
{
@ -745,7 +745,7 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
@ -779,7 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
p->time_in_queue -= shared->tmp.time_in_queue;
local_irq_enable();
if (s->n_histogram_entries) {
unsigned i;
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++) {
local_irq_disable();
p = &s->stat_percpu[smp_processor_id()][x];
@ -816,7 +816,7 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
{
unsigned long long result;
unsigned mult;
unsigned int mult;
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
return j;
@ -836,9 +836,9 @@ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long
static int dm_stats_print(struct dm_stats *stats, int id,
size_t idx_start, size_t idx_len,
bool clear, char *result, unsigned maxlen)
bool clear, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct dm_stat *s;
size_t x;
sector_t start, end, step;
@ -894,7 +894,7 @@ static int dm_stats_print(struct dm_stats *stats, int id,
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
if (s->n_histogram_entries) {
unsigned i;
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++) {
DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
}
@ -943,11 +943,11 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
return 0;
}
static int parse_histogram(const char *h, unsigned *n_histogram_entries,
static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
unsigned long long **histogram_boundaries)
{
const char *q;
unsigned n;
unsigned int n;
unsigned long long last;
*n_histogram_entries = 1;
@ -982,23 +982,23 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
}
static int message_stats_create(struct mapped_device *md,
unsigned argc, char **argv,
char *result, unsigned maxlen)
unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r;
int id;
char dummy;
unsigned long long start, end, len, step;
unsigned divisor;
unsigned int divisor;
const char *program_id, *aux_data;
unsigned stat_flags = 0;
unsigned int stat_flags = 0;
unsigned n_histogram_entries = 0;
unsigned int n_histogram_entries = 0;
unsigned long long *histogram_boundaries = NULL;
struct dm_arg_set as, as_backup;
const char *a;
unsigned feature_args;
unsigned int feature_args;
/*
* Input format:
@ -1107,7 +1107,7 @@ static int message_stats_create(struct mapped_device *md,
}
static int message_stats_delete(struct mapped_device *md,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
int id;
char dummy;
@ -1122,7 +1122,7 @@ static int message_stats_delete(struct mapped_device *md,
}
static int message_stats_clear(struct mapped_device *md,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
int id;
char dummy;
@ -1137,8 +1137,8 @@ static int message_stats_clear(struct mapped_device *md,
}
static int message_stats_list(struct mapped_device *md,
unsigned argc, char **argv,
char *result, unsigned maxlen)
unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r;
const char *program = NULL;
@ -1160,8 +1160,8 @@ static int message_stats_list(struct mapped_device *md,
}
static int message_stats_print(struct mapped_device *md,
unsigned argc, char **argv, bool clear,
char *result, unsigned maxlen)
unsigned int argc, char **argv, bool clear,
char *result, unsigned int maxlen)
{
int id;
char dummy;
@ -1187,7 +1187,7 @@ static int message_stats_print(struct mapped_device *md,
}
static int message_stats_set_aux(struct mapped_device *md,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
int id;
char dummy;
@ -1201,8 +1201,8 @@ static int message_stats_set_aux(struct mapped_device *md,
return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
}
int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen)
int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r;

View File

@ -26,11 +26,11 @@ void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen);
int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
char *result, unsigned int maxlen);
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *aux);

View File

@ -273,7 +273,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_bio_nr;
unsigned int target_bio_nr;
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
@ -359,7 +359,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
*/
static void stripe_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
unsigned int sz = 0;
@ -406,7 +406,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
unsigned i;
unsigned int i;
char major_minor[16];
struct stripe_c *sc = ti->private;
@ -444,7 +444,7 @@ static int stripe_iterate_devices(struct dm_target *ti,
{
struct stripe_c *sc = ti->private;
int ret = 0;
unsigned i = 0;
unsigned int i = 0;
do {
ret = fn(ti, sc->stripe[i].dev,
@ -459,7 +459,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);

View File

@ -38,9 +38,9 @@ struct switch_path {
struct switch_ctx {
struct dm_target *ti;
unsigned nr_paths; /* Number of paths in path_list. */
unsigned int nr_paths; /* Number of paths in path_list. */
unsigned region_size; /* Region size in 512-byte sectors */
unsigned int region_size; /* Region size in 512-byte sectors */
unsigned long nr_regions; /* Number of regions making up the device */
signed char region_size_bits; /* log2 of region_size or -1 */
@ -56,8 +56,8 @@ struct switch_ctx {
struct switch_path path_list[];
};
static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
unsigned region_size)
static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned int nr_paths,
unsigned int region_size)
{
struct switch_ctx *sctx;
@ -73,7 +73,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
return sctx;
}
static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
{
struct switch_ctx *sctx = ti->private;
sector_t nr_regions = ti->len;
@ -124,7 +124,7 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
}
static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
unsigned long *region_index, unsigned *bit)
unsigned long *region_index, unsigned int *bit)
{
if (sctx->region_entries_per_slot_bits >= 0) {
*region_index = region_nr >> sctx->region_entries_per_slot_bits;
@ -137,10 +137,10 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr
*bit *= sctx->region_table_entry_bits;
}
static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
static unsigned int switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
{
unsigned long region_index;
unsigned bit;
unsigned int bit;
switch_get_position(sctx, region_nr, &region_index, &bit);
@ -151,9 +151,9 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
/*
* Find which path to use at given offset.
*/
static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
static unsigned int switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
{
unsigned path_nr;
unsigned int path_nr;
sector_t p;
p = offset;
@ -172,10 +172,10 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
}
static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
unsigned value)
unsigned int value)
{
unsigned long region_index;
unsigned bit;
unsigned int bit;
region_table_slot_t pte;
switch_get_position(sctx, region_nr, &region_index, &bit);
@ -191,7 +191,7 @@ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long reg
*/
static void initialise_region_table(struct switch_ctx *sctx)
{
unsigned path_nr = 0;
unsigned int path_nr = 0;
unsigned long region_nr;
for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
@ -249,7 +249,7 @@ static void switch_dtr(struct dm_target *ti)
* Optional args are to allow for future extension: currently this
* parameter must be 0.
*/
static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
static int switch_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
static const struct dm_arg _args[] = {
{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"},
@ -259,7 +259,7 @@ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct switch_ctx *sctx;
struct dm_arg_set as;
unsigned nr_paths, region_size, nr_optional_args;
unsigned int nr_paths, region_size, nr_optional_args;
int r;
as.argc = argc;
@ -320,7 +320,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
unsigned int path_nr = switch_get_path_nr(sctx, offset);
bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
@ -371,9 +371,9 @@ static __always_inline unsigned long parse_hex(const char **string)
}
static int process_set_region_mappings(struct switch_ctx *sctx,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
unsigned i;
unsigned int i;
unsigned long region_index = 0;
for (i = 1; i < argc; i++) {
@ -466,8 +466,8 @@ static int process_set_region_mappings(struct switch_ctx *sctx,
*
* Only set_region_mappings is supported.
*/
static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
static int switch_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
static DEFINE_MUTEX(message_mutex);
@ -487,10 +487,10 @@ static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
}
static void switch_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct switch_ctx *sctx = ti->private;
unsigned sz = 0;
unsigned int sz = 0;
int path_nr;
switch (type) {
@ -519,7 +519,7 @@ static void switch_status(struct dm_target *ti, status_type_t type,
static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct switch_ctx *sctx = ti->private;
unsigned path_nr;
unsigned int path_nr;
path_nr = switch_get_path_nr(sctx, 0);

View File

@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
}
int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
unsigned int num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@ -470,10 +470,10 @@ static int adjoin(struct dm_table *t, struct dm_target *ti)
* On the other hand, dm-switch needs to process bulk data using messages and
* excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *size, char **old_argv)
static char **realloc_argv(unsigned int *size, char **old_argv)
{
char **argv;
unsigned new_size;
unsigned int new_size;
gfp_t gfp;
if (*size) {
@ -499,7 +499,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
int dm_split_args(int *argc, char ***argvp, char *input)
{
char *start, *end = input, *out, **argv = NULL;
unsigned array_size = 0;
unsigned int array_size = 0;
*argc = 0;
@ -732,9 +732,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
/*
* Target argument parsing helpers.
*/
static int validate_next_arg(const struct dm_arg *arg,
struct dm_arg_set *arg_set,
unsigned *value, char **error, unsigned grouped)
static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned int *value, char **error, unsigned int grouped)
{
const char *arg_str = dm_shift_arg(arg_set);
char dummy;
@ -752,14 +751,14 @@ static int validate_next_arg(const struct dm_arg *arg,
}
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error)
unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error)
unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 1);
}
@ -780,7 +779,7 @@ const char *dm_shift_arg(struct dm_arg_set *as)
}
EXPORT_SYMBOL(dm_shift_arg);
void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
{
BUG_ON(as->argc < num_args);
as->argc -= num_args;
@ -856,7 +855,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
static int dm_table_determine_type(struct dm_table *t)
{
unsigned bio_based = 0, request_based = 0, hybrid = 0;
unsigned int bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *ti;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@ -1590,7 +1589,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
unsigned *num_devices = data;
unsigned int *num_devices = data;
(*num_devices)++;
@ -1620,7 +1619,7 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
unsigned num_devices = 0;
unsigned int num_devices = 0;
if (!ti->type->iterate_devices)
return false;

View File

@ -318,12 +318,12 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*/
typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
{
uint64_t b, begin, end;
uint32_t t;
bool in_run = false;
unsigned i;
unsigned int i;
for (i = 0; i < count; i++, value_le++) {
/* We know value_le is 8 byte aligned */
@ -348,13 +348,13 @@ static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned
fn(sm, begin, end);
}
static void data_block_inc(void *context, const void *value_le, unsigned count)
static void data_block_inc(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_inc_blocks);
}
static void data_block_dec(void *context, const void *value_le, unsigned count)
static void data_block_dec(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_dec_blocks);
@ -374,21 +374,21 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
return b1 == b2;
}
static void subtree_inc(void *context, const void *value, unsigned count)
static void subtree_inc(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
unsigned i;
unsigned int i;
for (i = 0; i < count; i++, root_le++)
dm_tm_inc(info->tm, le64_to_cpu(*root_le));
}
static void subtree_dec(void *context, const void *value, unsigned count)
static void subtree_dec(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
unsigned i;
unsigned int i;
for (i = 0; i < count; i++, root_le++)
if (dm_btree_del(info, le64_to_cpu(*root_le)))
@ -448,10 +448,10 @@ static int superblock_lock(struct dm_pool_metadata *pmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
unsigned i;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@ -971,7 +971,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
{
int r;
unsigned open_devices = 0;
unsigned int open_devices = 0;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
@ -1679,7 +1679,7 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
unsigned count, total_count = 0;
unsigned int count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;

Some files were not shown because too many files have changed in this diff Show More