Merge 5.10.213 into android12-5.10-lts

Changes in 5.10.213
	mmc: mmci: stm32: use a buffer for unaligned DMA requests
	mmc: mmci: stm32: fix DMA API overlapping mappings warning
	lan78xx: Fix white space and style issues
	lan78xx: Add missing return code checks
	lan78xx: Fix partial packet errors on suspend/resume
	lan78xx: Fix race conditions in suspend/resume handling
	net: lan78xx: fix runtime PM count underflow on link stop
	ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able
	i40e: disable NAPI right after disabling irqs when handling xsk_pool
	tracing/net_sched: Fix tracepoints that save qdisc_dev() as a string
	geneve: make sure to pull inner header in geneve_rx()
	net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink()
	net/ipv6: avoid possible UAF in ip6_route_mpath_notify()
	cpumap: Zero-initialise xdp_rxq_info struct before running XDP program
	net/rds: fix WARNING in rds_conn_connect_if_down
	netfilter: nft_ct: fix l3num expectations with inet pseudo family
	netfilter: nf_conntrack_h323: Add protection for bmp length out of range
	netrom: Fix a data-race around sysctl_netrom_default_path_quality
	netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser
	netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser
	netrom: Fix a data-race around sysctl_netrom_transport_timeout
	netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries
	netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay
	netrom: Fix a data-race around sysctl_netrom_transport_busy_delay
	netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size
	netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout
	netrom: Fix a data-race around sysctl_netrom_routing_control
	netrom: Fix a data-race around sysctl_netrom_link_fails_count
	netrom: Fix data-races around sysctl_net_busy_read
	selftests/mm: switch to bash from sh
	selftests: mm: fix map_hugetlb failure on 64K page size systems
	um: allow not setting extra rpaths in the linux binary
	xhci: remove extra loop in interrupt context
	xhci: prevent double-fetch of transfer and transfer event TRBs
	xhci: process isoc TD properly when there was a transaction error mid TD.
	xhci: handle isoc Babble and Buffer Overrun events properly
	serial: max310x: Use devm_clk_get_optional() to get the input clock
	serial: max310x: Try to get crystal clock rate from property
	serial: max310x: fail probe if clock crystal is unstable
	serial: max310x: Make use of device properties
	serial: max310x: use regmap methods for SPI batch operations
	serial: max310x: use a separate regmap for each port
	serial: max310x: prevent infinite while() loop in port startup
	net: Change sock_getsockopt() to take the sk ptr instead of the sock ptr
	bpf: net: Change sk_getsockopt() to take the sockptr_t argument
	lsm: make security_socket_getpeersec_stream() sockptr_t safe
	lsm: fix default return value of the socket_getpeersec_*() hooks
	ext4: make ext4_es_insert_extent() return void
	ext4: refactor ext4_da_map_blocks()
	ext4: convert to exclusive lock while inserting delalloc extents
	Drivers: hv: vmbus: Add vmbus_requestor data structure for VMBus hardening
	hv_netvsc: Use vmbus_requestor to generate transaction IDs for VMBus hardening
	hv_netvsc: Wait for completion on request SWITCH_DATA_PATH
	hv_netvsc: Process NETDEV_GOING_DOWN on VF hot remove
	hv_netvsc: Make netvsc/VF binding check both MAC and serial number
	hv_netvsc: use netif_is_bond_master() instead of open code
	hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed
	mm/hugetlb: change hugetlb_reserve_pages() to type bool
	mm: hugetlb pages should not be reserved by shmat() if SHM_NORESERVE
	getrusage: add the "signal_struct *sig" local variable
	getrusage: move thread_group_cputime_adjusted() outside of lock_task_sighand()
	getrusage: use __for_each_thread()
	getrusage: use sig->stats_lock rather than lock_task_sighand()
	serial: max310x: Unprepare and disable clock in error path
	Drivers: hv: vmbus: Drop error message when 'No request id available'
	regmap: allow to define reg_update_bits for no bus configuration
	regmap: Add bulk read/write callbacks into regmap_config
	serial: max310x: make accessing revision id interface-agnostic
	serial: max310x: implement I2C support
	serial: max310x: fix IO data corruption in batched operations
	Linux 5.10.213

Change-Id: I3450b2b1b545eeb2e3eb862f39d1846a31d17a0a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-04-12 12:36:57 +00:00
commit 578a3af78b
60 changed files with 1957 additions and 683 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 212
SUBLEVEL = 213
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -92,6 +92,19 @@ config LD_SCRIPT_DYN
depends on !LD_SCRIPT_STATIC
select MODULE_REL_CRCS if MODVERSIONS
config LD_SCRIPT_DYN_RPATH
bool "set rpath in the binary" if EXPERT
default y
depends on LD_SCRIPT_DYN
help
Add /lib (and /lib64 for 64-bit) to the linux binary's rpath
explicitly.
You may need to turn this off if compiling for nix systems
that have their libraries in random /nix directories and
might otherwise unexpected use libraries from /lib or /lib64
instead of the desired ones.
config HOSTFS
tristate "Host filesystem"
help

View File

@ -118,7 +118,8 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
-fno-stack-protector $(call cc-option, -fno-stack-protector-all)

View File

@ -44,7 +44,7 @@ ELF_FORMAT := elf64-x86-64
# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib64
LINK-y += -m64
endif

View File

@ -104,6 +104,10 @@ struct regmap {
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
int (*reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
/* Bulk read/write */
int (*read)(void *context, const void *reg_buf, size_t reg_size,
void *val_buf, size_t val_size);
int (*write)(void *context, const void *data, size_t count);
bool defer_caching;

View File

@ -800,12 +800,15 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride_order = ilog2(map->reg_stride);
else
map->reg_stride_order = -1;
map->use_single_read = config->use_single_read || !bus || !bus->read;
map->use_single_write = config->use_single_write || !bus || !bus->write;
map->can_multi_write = config->can_multi_write && bus && bus->write;
map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
if (bus) {
map->max_raw_read = bus->max_raw_read;
map->max_raw_write = bus->max_raw_write;
} else if (config->max_raw_read && config->max_raw_write) {
map->max_raw_read = config->max_raw_read;
map->max_raw_write = config->max_raw_write;
}
map->dev = dev;
map->bus = bus;
@ -839,9 +842,19 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
if (!bus) {
if (config && config->read && config->write) {
map->reg_read = _regmap_bus_read;
/* Bulk read/write */
map->read = config->read;
map->write = config->write;
reg_endian = REGMAP_ENDIAN_NATIVE;
val_endian = REGMAP_ENDIAN_NATIVE;
} else if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
@ -855,10 +868,13 @@ struct regmap *__regmap_init(struct device *dev,
} else {
map->reg_read = _regmap_bus_read;
map->reg_update_bits = bus->reg_update_bits;
}
/* Bulk read/write */
map->read = bus->read;
map->write = bus->write;
reg_endian = regmap_get_reg_endian(bus, config);
val_endian = regmap_get_val_endian(dev, bus, config);
reg_endian = regmap_get_reg_endian(bus, config);
val_endian = regmap_get_val_endian(dev, bus, config);
}
switch (config->reg_bits + map->reg_shift) {
case 2:
@ -1627,8 +1643,6 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
size_t len;
int i;
WARN_ON(!map->bus);
/* Check for unwritable or noinc registers in range
* before we start
*/
@ -1710,7 +1724,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
val = work_val;
}
if (map->async && map->bus->async_write) {
if (map->async && map->bus && map->bus->async_write) {
struct regmap_async *async;
trace_regmap_async_write_start(map, reg, val_len);
@ -1778,10 +1792,10 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
* write.
*/
if (val == work_val)
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
val_len);
ret = map->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
val_len);
else if (map->bus->gather_write)
ret = map->bus->gather_write(map->bus_context, map->work_buf,
map->format.reg_bytes +
@ -1800,7 +1814,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
memcpy(buf, map->work_buf, map->format.reg_bytes);
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
ret = map->bus->write(map->bus_context, buf, len);
ret = map->write(map->bus_context, buf, len);
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
@ -1857,7 +1871,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range;
struct regmap *map = context;
WARN_ON(!map->bus || !map->format.format_write);
WARN_ON(!map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
@ -1870,8 +1884,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
trace_regmap_hw_write_start(map, reg, 1);
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.buf_size);
ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
trace_regmap_hw_write_done(map, reg, 1);
@ -1891,7 +1904,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{
struct regmap *map = context;
WARN_ON(!map->bus || !map->format.format_val);
WARN_ON(!map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
@ -1905,7 +1918,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
static inline void *_regmap_map_get_context(struct regmap *map)
{
return (map->bus) ? map : map->bus_context;
return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
}
int _regmap_write(struct regmap *map, unsigned int reg,
@ -2312,7 +2325,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
u8 = buf;
*u8 |= map->write_flag_mask;
ret = map->bus->write(map->bus_context, buf, len);
ret = map->write(map->bus_context, buf, len);
kfree(buf);
@ -2618,9 +2631,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
struct regmap_range_node *range;
int ret;
WARN_ON(!map->bus);
if (!map->bus || !map->bus->read)
if (!map->read)
return -EINVAL;
range = _regmap_range_lookup(map, reg);
@ -2636,9 +2647,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
map->read_flag_mask);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->bus->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
ret = map->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
@ -2749,8 +2760,6 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
if (!map->bus)
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@ -2765,7 +2774,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
if (!map->bus->read) {
if (!map->read) {
ret = -ENOTSUPP;
goto out;
}
@ -2825,7 +2834,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_read);
* @val: Pointer to data buffer
* @val_len: Length of output buffer in bytes.
*
* The regmap API usually assumes that bulk bus read operations will read a
* The regmap API usually assumes that bulk read operations will read a
* range of registers. Some devices have certain registers for which a read
* operation read will read from an internal FIFO.
*
@ -2843,10 +2852,6 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
size_t read_len;
int ret;
if (!map->bus)
return -EINVAL;
if (!map->bus->read)
return -ENOTSUPP;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@ -2960,7 +2965,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (val_count == 0)
return -EINVAL;
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;

View File

@ -503,6 +503,70 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
/**
* request_arr_init - Allocates memory for the requestor array. Each slot
* keeps track of the next available slot in the array. Initially, each
* slot points to the next one (as in a Linked List). The last slot
* does not point to anything, so its value is U64_MAX by default.
* @size The size of the array
*/
static u64 *request_arr_init(u32 size)
{
int i;
u64 *req_arr;
req_arr = kcalloc(size, sizeof(u64), GFP_KERNEL);
if (!req_arr)
return NULL;
for (i = 0; i < size - 1; i++)
req_arr[i] = i + 1;
/* Last slot (no more available slots) */
req_arr[i] = U64_MAX;
return req_arr;
}
/*
* vmbus_alloc_requestor - Initializes @rqstor's fields.
* Index 0 is the first free slot
* @size: Size of the requestor array
*/
static int vmbus_alloc_requestor(struct vmbus_requestor *rqstor, u32 size)
{
u64 *rqst_arr;
unsigned long *bitmap;
rqst_arr = request_arr_init(size);
if (!rqst_arr)
return -ENOMEM;
bitmap = bitmap_zalloc(size, GFP_KERNEL);
if (!bitmap) {
kfree(rqst_arr);
return -ENOMEM;
}
rqstor->req_arr = rqst_arr;
rqstor->req_bitmap = bitmap;
rqstor->size = size;
rqstor->next_request_id = 0;
spin_lock_init(&rqstor->req_lock);
return 0;
}
/*
* vmbus_free_requestor - Frees memory allocated for @rqstor
* @rqstor: Pointer to the requestor struct
*/
static void vmbus_free_requestor(struct vmbus_requestor *rqstor)
{
kfree(rqstor->req_arr);
bitmap_free(rqstor->req_bitmap);
}
static int __vmbus_open(struct vmbus_channel *newchannel,
void *userdata, u32 userdatalen,
void (*onchannelcallback)(void *context), void *context)
@ -523,6 +587,12 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (newchannel->state != CHANNEL_OPEN_STATE)
return -EINVAL;
/* Create and init requestor */
if (newchannel->rqstor_size) {
if (vmbus_alloc_requestor(&newchannel->requestor, newchannel->rqstor_size))
return -ENOMEM;
}
newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@ -626,6 +696,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
vmbus_free_requestor(&newchannel->requestor);
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
@ -808,6 +879,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
channel->ringbuffer_gpadlhandle = 0;
}
if (!ret)
vmbus_free_requestor(&channel->requestor);
return ret;
}
@ -888,7 +962,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
/* in 8-bytes granularity */
desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
desc.len8 = (u16)(packetlen_aligned >> 3);
desc.trans_id = requestid;
desc.trans_id = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
@ -897,7 +971,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, num_vecs);
return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid);
}
EXPORT_SYMBOL(vmbus_sendpacket);
@ -939,7 +1013,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc.reserved = 0;
desc.rangecount = pagecount;
@ -956,7 +1030,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3);
return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@ -983,7 +1057,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
desc->rangecount = 1;
@ -994,7 +1068,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
return hv_ringbuffer_write(channel, bufferlist, 3);
return hv_ringbuffer_write(channel, bufferlist, 3, requestid);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@ -1042,3 +1116,91 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
buffer_actual_len, requestid, true);
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
/*
* vmbus_next_request_id - Returns a new request id. It is also
* the index at which the guest memory address is stored.
* Uses a spin lock to avoid race conditions.
* @rqstor: Pointer to the requestor struct
* @rqst_add: Guest memory address to be stored in the array
*/
u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr)
{
unsigned long flags;
u64 current_id;
const struct vmbus_channel *channel =
container_of(rqstor, const struct vmbus_channel, requestor);
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
return VMBUS_NO_RQSTOR;
spin_lock_irqsave(&rqstor->req_lock, flags);
current_id = rqstor->next_request_id;
/* Requestor array is full */
if (current_id >= rqstor->size) {
spin_unlock_irqrestore(&rqstor->req_lock, flags);
return VMBUS_RQST_ERROR;
}
rqstor->next_request_id = rqstor->req_arr[current_id];
rqstor->req_arr[current_id] = rqst_addr;
/* The already held spin lock provides atomicity */
bitmap_set(rqstor->req_bitmap, current_id, 1);
spin_unlock_irqrestore(&rqstor->req_lock, flags);
/*
* Cannot return an ID of 0, which is reserved for an unsolicited
* message from Hyper-V.
*/
return current_id + 1;
}
EXPORT_SYMBOL_GPL(vmbus_next_request_id);
/*
* vmbus_request_addr - Returns the memory address stored at @trans_id
* in @rqstor. Uses a spin lock to avoid race conditions.
* @rqstor: Pointer to the requestor struct
* @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
* next request id.
*/
u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id)
{
unsigned long flags;
u64 req_addr;
const struct vmbus_channel *channel =
container_of(rqstor, const struct vmbus_channel, requestor);
/* Check rqstor has been initialized */
if (!channel->rqstor_size)
return VMBUS_NO_RQSTOR;
/* Hyper-V can send an unsolicited message with ID of 0 */
if (!trans_id)
return trans_id;
spin_lock_irqsave(&rqstor->req_lock, flags);
/* Data corresponding to trans_id is stored at trans_id - 1 */
trans_id--;
/* Invalid trans_id */
if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) {
spin_unlock_irqrestore(&rqstor->req_lock, flags);
return VMBUS_RQST_ERROR;
}
req_addr = rqstor->req_arr[trans_id];
rqstor->req_arr[trans_id] = rqstor->next_request_id;
rqstor->next_request_id = trans_id;
/* The already held spin lock provides atomicity */
bitmap_clear(rqstor->req_bitmap, trans_id, 1);
spin_unlock_irqrestore(&rqstor->req_lock, flags);
return req_addr;
}
EXPORT_SYMBOL_GPL(vmbus_request_addr);

View File

@ -180,7 +180,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count);
const struct kvec *kv_list, u32 kv_count,
u64 requestid);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,

View File

@ -261,7 +261,8 @@ EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count)
const struct kvec *kv_list, u32 kv_count,
u64 requestid)
{
int i;
u32 bytes_avail_towrite;
@ -271,6 +272,8 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
u64 prev_indices;
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
struct vmpacket_descriptor *desc = kv_list[0].iov_base;
u64 rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
@ -313,6 +316,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
kv_list[i].iov_len);
}
/*
* Allocate the request ID after the data has been copied into the
* ring buffer. Once this request ID is allocated, the completion
* path could find the data and free it.
*/
if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
if (rqst_id == VMBUS_RQST_ERROR) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
}
desc = hv_get_ring_buffer(outring_info) + old_write;
desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
@ -332,8 +351,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_signal_on_write(old_write, channel);
if (channel->rescind)
if (channel->rescind) {
if (rqst_id != VMBUS_NO_RQSTOR) {
/* Reclaim request ID to avoid leak of IDs */
vmbus_request_addr(&channel->requestor, rqst_id);
}
return -ENODEV;
}
return 0;
}

View File

@ -43,6 +43,9 @@ struct sdmmc_lli_desc {
struct sdmmc_idma {
dma_addr_t sg_dma;
void *sg_cpu;
dma_addr_t bounce_dma_addr;
void *bounce_buf;
bool use_bounce_buffer;
};
struct sdmmc_dlyb {
@ -54,6 +57,8 @@ struct sdmmc_dlyb {
static int sdmmc_idma_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
struct sdmmc_idma *idma = host->dma_priv;
struct device *dev = mmc_dev(host->mmc);
struct scatterlist *sg;
int i;
@ -61,41 +66,69 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* idma has constraints on idmabase & idmasize for each element
* excepted the last element which has no constraint on idmasize
*/
idma->use_bounce_buffer = false;
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
!IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
dev_err(mmc_dev(host->mmc),
dev_dbg(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
return -EINVAL;
goto use_bounce_buffer;
}
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
dev_err(mmc_dev(host->mmc),
dev_dbg(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
return -EINVAL;
goto use_bounce_buffer;
}
return 0;
use_bounce_buffer:
if (!idma->bounce_buf) {
idma->bounce_buf = dmam_alloc_coherent(dev,
host->mmc->max_req_size,
&idma->bounce_dma_addr,
GFP_KERNEL);
if (!idma->bounce_buf) {
dev_err(dev, "Unable to map allocate DMA bounce buffer.\n");
return -ENOMEM;
}
}
idma->use_bounce_buffer = true;
return 0;
}
static int _sdmmc_idma_prep_data(struct mmci_host *host,
struct mmc_data *data)
{
int n_elem;
struct sdmmc_idma *idma = host->dma_priv;
n_elem = dma_map_sg(mmc_dev(host->mmc),
data->sg,
data->sg_len,
mmc_get_dma_dir(data));
if (idma->use_bounce_buffer) {
if (data->flags & MMC_DATA_WRITE) {
unsigned int xfer_bytes = data->blksz * data->blocks;
if (!n_elem) {
dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
return -EINVAL;
sg_copy_to_buffer(data->sg, data->sg_len,
idma->bounce_buf, xfer_bytes);
dma_wmb();
}
} else {
int n_elem;
n_elem = dma_map_sg(mmc_dev(host->mmc),
data->sg,
data->sg_len,
mmc_get_dma_dir(data));
if (!n_elem) {
dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
return -EINVAL;
}
}
return 0;
}
@ -112,8 +145,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host,
static void sdmmc_idma_unprep_data(struct mmci_host *host,
struct mmc_data *data, int err)
{
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
struct sdmmc_idma *idma = host->dma_priv;
if (idma->use_bounce_buffer) {
if (data->flags & MMC_DATA_READ) {
unsigned int xfer_bytes = data->blksz * data->blocks;
sg_copy_from_buffer(data->sg, data->sg_len,
idma->bounce_buf, xfer_bytes);
}
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
}
static int sdmmc_idma_setup(struct mmci_host *host)
@ -137,6 +181,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
sizeof(struct sdmmc_lli_desc);
host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
host->mmc->max_req_size = SZ_1M;
} else {
host->mmc->max_segs = 1;
host->mmc->max_seg_size = host->mmc->max_req_size;
@ -154,8 +200,18 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
if (!host->variant->dma_lli || data->sg_len == 1) {
writel_relaxed(sg_dma_address(data->sg),
host->dma_in_progress = true;
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
if (idma->use_bounce_buffer)
dma_addr = idma->bounce_dma_addr;
else
dma_addr = sg_dma_address(data->sg);
writel_relaxed(dma_addr,
host->base + MMCI_STM32_IDMABASE0R);
writel_relaxed(MMCI_STM32_IDMAEN,
host->base + MMCI_STM32_IDMACTRLR);
@ -184,9 +240,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
static void sdmmc_idma_error(struct mmci_host *host)
{
struct mmc_data *data = host->data;
struct sdmmc_idma *idma = host->dma_priv;
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
data->host_cookie = 0;
if (!idma->use_bounce_buffer)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@ -512,6 +589,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
.dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,

View File

@ -12944,9 +12944,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);

View File

@ -6546,6 +6546,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;

View File

@ -2949,8 +2949,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@ -10394,6 +10394,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
}
/**
* ixgbe_irq_disable_single - Disable single IRQ vector
* @adapter: adapter structure
* @ring: ring index
**/
static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u64 qmask = BIT_ULL(ring);
u32 mask;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
mask = qmask & IXGBE_EIMC_RTX_QUEUE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
mask = (qmask >> 32);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
break;
default:
break;
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
synchronize_irq(adapter->msix_entries[ring].vector);
else
synchronize_irq(adapter->pdev->irq);
}
/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
@ -10410,6 +10448,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
ixgbe_irq_disable_single(adapter, ring);
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@ -10418,9 +10461,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@ -10448,9 +10488,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
/* Rx/Tx/XDP Tx share the same napi context. */
napi_enable(&rx_ring->q_vector->napi);
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@ -10459,6 +10496,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
/* Rx/Tx/XDP Tx share the same napi context. */
napi_enable(&rx_ring->q_vector->napi);
ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**

View File

@ -218,7 +218,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
int err = 0;
int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@ -262,9 +262,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
goto drop;
}
oiph = skb_network_header(skb);
/* Save offset of outer header relative to skb->head,
* because we are going to reset the network header to the inner header
* and might change skb->head.
*/
nh = skb_network_header(skb) - skb->head;
skb_reset_network_header(skb);
if (!pskb_inet_may_pull(skb)) {
DEV_STATS_INC(geneve->dev, rx_length_errors);
DEV_STATS_INC(geneve->dev, rx_errors);
goto drop;
}
/* Get the outer header. */
oiph = skb->head + nh;
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)

View File

@ -847,6 +847,19 @@ struct nvsp_message {
#define NETVSC_XDP_HDRM 256
#define NETVSC_MIN_OUT_MSG_SIZE (sizeof(struct vmpacket_descriptor) + \
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
static inline u32 netvsc_rqstor_size(unsigned long ringbytes)
{
return ringbytes / NETVSC_MIN_OUT_MSG_SIZE +
ringbytes / NETVSC_MIN_IN_MSG_SIZE;
}
#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
(offsetof(struct vmtransfer_page_packet_header, ranges) + \
(rng_cnt) * sizeof(struct vmtransfer_page_range))

View File

@ -37,6 +37,10 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
/* Block sending traffic to VF if it's about to be gone */
if (!vf)
net_device_ctx->data_path_is_vf = vf;
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
if (vf)
@ -51,7 +55,10 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
(unsigned long)init_pkt,
VM_PKT_DATA_INBAND, 0);
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
wait_for_completion(&nv_dev->channel_init_wait);
net_device_ctx->data_path_is_vf = vf;
}
/* Worker to setup sub channels on initial setup
@ -163,7 +170,7 @@ static void netvsc_revoke_recv_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
* ignore the failure since we cannot send on a rescinded
@ -213,7 +220,7 @@ static void netvsc_revoke_send_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
@ -557,7 +564,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
return ret;
@ -614,7 +621,7 @@ static int netvsc_connect_vsp(struct hv_device *device,
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
if (ret != 0)
goto cleanup;
@ -698,10 +705,19 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct sk_buff *skb;
u16 q_idx = 0;
int queue_sends;
u64 cmd_rqst;
cmd_rqst = vmbus_request_addr(&channel->requestor, (u64)desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Incorrect transaction id\n");
return;
}
skb = (struct sk_buff *)(unsigned long)cmd_rqst;
/* Notify the layer above us */
if (likely(skb)) {
@ -748,8 +764,31 @@ static void netvsc_send_completion(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
const struct nvsp_message *nvsp_packet;
u32 msglen = hv_pkt_datalen(desc);
struct nvsp_message *pkt_rqst;
u64 cmd_rqst;
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
(u64)desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Invalid transaction id\n");
return;
}
pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
switch (pkt_rqst->hdr.msg_type) {
case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
complete(&net_device->channel_init_wait);
break;
default:
netdev_err(ndev, "Unexpected VMBUS completion!!\n");
}
return;
}
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
@ -757,6 +796,7 @@ static void netvsc_send_completion(struct net_device *ndev,
return;
}
nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
@ -1530,6 +1570,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);

View File

@ -44,6 +44,10 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
/* Macros to define the context of vf registration */
#define VF_REG_IN_PROBE 1
#define VF_REG_IN_NOTIFIER 2
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@ -2194,7 +2198,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
}
static int netvsc_vf_join(struct net_device *vf_netdev,
struct net_device *ndev)
struct net_device *ndev, int context)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
@ -2217,7 +2221,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
/* If this registration is called from probe context vf_takeover
* is taken care of later in probe itself.
*/
if (context == VF_REG_IN_NOTIFIER)
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@ -2310,8 +2318,17 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
if (!ndev_ctx->vf_alloc)
continue;
if (ndev_ctx->vf_serial == serial)
return hv_get_drvdata(ndev_ctx->device_ctx);
if (ndev_ctx->vf_serial != serial)
continue;
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ndev->addr_len != vf_netdev->addr_len ||
memcmp(ndev->perm_addr, vf_netdev->perm_addr,
ndev->addr_len) != 0)
continue;
return ndev;
}
/* Fallback path to check synthetic vf with help of mac addr.
@ -2346,7 +2363,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
return NOTIFY_DONE;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
static int netvsc_register_vf(struct net_device *vf_netdev, int context)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
@ -2386,7 +2403,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
if (netvsc_vf_join(vf_netdev, ndev) != 0)
if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
@ -2411,12 +2428,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
* interface, there is only the CHANGE event and no UP or DOWN event.
*/
static int netvsc_vf_changed(struct net_device *vf_netdev)
static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
bool vf_is_up = netif_running(vf_netdev);
bool vf_is_up = false;
if (event != NETDEV_GOING_DOWN)
vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
@ -2429,7 +2449,6 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
net_device_ctx->data_path_is_vf = vf_is_up;
if (vf_is_up && !net_device_ctx->vf_alloc) {
netdev_info(ndev, "Waiting for the VF association from host\n");
@ -2468,10 +2487,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
static int check_dev_is_matching_vf(struct net_device *event_ndev)
{
/* Skip NetVSC interfaces */
if (event_ndev->netdev_ops == &device_ops)
return -ENODEV;
/* Avoid non-Ethernet type devices */
if (event_ndev->type != ARPHRD_ETHER)
return -ENODEV;
/* Avoid Vlan dev with same MAC registering as VF */
if (is_vlan_dev(event_ndev))
return -ENODEV;
/* Avoid Bonding master dev with same MAC registering as VF */
if (netif_is_bond_master(event_ndev))
return -ENODEV;
return 0;
}
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
struct net_device *net = NULL;
struct net_device *net = NULL, *vf_netdev;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
@ -2579,6 +2619,30 @@ static int netvsc_probe(struct hv_device *dev,
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
/* When the hv_netvsc driver is unloaded and reloaded, the
* NET_DEVICE_REGISTER for the vf device is replayed before probe
* is complete. This is because register_netdevice_notifier() gets
* registered before vmbus_driver_register() so that callback func
* is set before probe and we don't miss events like NETDEV_POST_INIT
* So, in this section we try to register the matching vf device that
* is present as a netdevice, knowing that its register call is not
* processed in the netvsc_netdev_notifier(as probing is progress and
* get_netvsc_byslot fails).
*/
for_each_netdev(dev_net(net), vf_netdev) {
ret = check_dev_is_matching_vf(vf_netdev);
if (ret != 0)
continue;
if (net != get_netvsc_byslot(vf_netdev))
continue;
netvsc_prepare_bonding(vf_netdev);
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
__netvsc_vf_setup(net, vf_netdev);
break;
}
rtnl_unlock();
netvsc_devinfo_put(device_info);
@ -2735,35 +2799,24 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
/* Skip our own events */
if (event_dev->netdev_ops == &device_ops)
return NOTIFY_DONE;
/* Avoid non-Ethernet type devices */
if (event_dev->type != ARPHRD_ETHER)
return NOTIFY_DONE;
/* Avoid Vlan dev with same MAC registering as VF */
if (is_vlan_dev(event_dev))
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
if ((event_dev->priv_flags & IFF_BONDING) &&
(event_dev->flags & IFF_MASTER))
ret = check_dev_is_matching_vf(event_dev);
if (ret != 0)
return NOTIFY_DONE;
switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
return netvsc_vf_changed(event_dev);
case NETDEV_GOING_DOWN:
return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}

View File

@ -1170,6 +1170,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);

File diff suppressed because it is too large Load Diff

View File

@ -342,6 +342,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.

View File

@ -14,9 +14,10 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@ -235,6 +237,10 @@
#define MAX310x_REV_MASK (0xf8)
#define MAX310X_WRITE_BIT 0x80
/* Port startup definitions */
#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */
#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */
/* Crystal-related definitions */
#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
@ -249,7 +255,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
struct max310x_if_cfg {
int (*extended_reg_enable)(struct device *dev, bool enable);
unsigned int rev_id_reg;
};
struct max310x_devtype {
struct {
unsigned short min;
unsigned short max;
} slave_addr;
char name[9];
int nr;
u8 mode1;
@ -262,16 +278,16 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
struct regmap *regmap;
u8 wr_header;
u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
container_of(_port, struct max310x_one, port)
struct max310x_port {
struct max310x_devtype *devtype;
const struct max310x_devtype *devtype;
const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@ -293,26 +309,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
regmap_read(s->regmap, port->iobase + reg, &val);
regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
struct max310x_one *one = to_max310x_port(port);
regmap_write(s->regmap, port->iobase + reg, val);
regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
struct max310x_one *one = to_max310x_port(port);
regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@ -361,13 +377,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
MAX310X_EXTREG_ENBL);
ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@ -392,13 +407,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
MAX310X_EXTREG_ENBL);
ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@ -423,6 +437,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x2c,
.max = 0x2f,
},
};
static const struct max310x_devtype max3108_devtype = {
@ -431,6 +449,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static const struct max310x_devtype max3109_devtype = {
@ -439,6 +461,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static const struct max310x_devtype max14830_devtype = {
@ -447,11 +473,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
},
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
switch (reg & 0x1f) {
switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@ -468,7 +498,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
switch (reg & 0x1f) {
switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@ -490,7 +520,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
switch (reg & 0x1f) {
switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@ -503,6 +533,11 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}
static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
{
return reg == MAX310X_RHR_REG;
}
static int max310x_set_baud(struct uart_port *port, int baud)
{
unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
@ -556,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
@ -626,40 +661,25 @@ static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
} while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
if (!stable)
dev_warn(dev, "clock is not stable yet\n");
return dev_err_probe(dev, -EAGAIN,
"clock is not stable\n");
}
return (int)bestfreq;
return bestfreq;
}
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
struct spi_transfer xfer[] = {
{
.tx_buf = &one->wr_header,
.len = sizeof(one->wr_header),
}, {
.tx_buf = txbuf,
.len = len,
}
};
spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
struct spi_transfer xfer[] = {
{
.tx_buf = &one->rd_header,
.len = sizeof(one->rd_header),
}, {
.rx_buf = rxbuf,
.len = len,
}
};
spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@ -1261,16 +1281,18 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
struct regmap *regmap, int irq)
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
const struct max310x_if_cfg *if_cfg,
struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq, uartclk;
struct clk *clk_osc, *clk_xtal;
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
bool xtal = false;
s32 uartclk = 0;
bool xtal;
if (IS_ERR(regmap))
return PTR_ERR(regmap);
for (i = 0; i < devtype->nr; i++)
if (IS_ERR(regmaps[i]))
return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@ -1279,23 +1301,20 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
return -ENOMEM;
}
clk_osc = devm_clk_get(dev, "osc");
clk_xtal = devm_clk_get(dev, "xtal");
if (!IS_ERR(clk_osc)) {
s->clk = clk_osc;
fmin = 500000;
fmax = 35000000;
} else if (!IS_ERR(clk_xtal)) {
s->clk = clk_xtal;
fmin = 1000000;
fmax = 4000000;
xtal = true;
} else if (PTR_ERR(clk_osc) == -EPROBE_DEFER ||
PTR_ERR(clk_xtal) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
s->clk = devm_clk_get_optional(dev, "osc");
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
if (s->clk) {
xtal = false;
} else {
dev_err(dev, "Cannot get clock\n");
return -EINVAL;
s->clk = devm_clk_get_optional(dev, "xtal");
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
xtal = true;
}
ret = clk_prepare_enable(s->clk);
@ -1303,14 +1322,31 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
return ret;
freq = clk_get_rate(s->clk);
if (freq == 0)
freq = uartclk;
if (freq == 0) {
dev_err(dev, "Cannot get clock rate\n");
ret = -EINVAL;
goto out_clk;
}
if (xtal) {
fmin = 1000000;
fmax = 4000000;
} else {
fmin = 500000;
fmax = 35000000;
}
/* Check frequency limits */
if (freq < fmin || freq > fmax) {
ret = -ERANGE;
goto out_clk;
}
s->regmap = regmap;
s->regmap = regmaps[0];
s->devtype = devtype;
s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@ -1319,25 +1355,38 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
unsigned int offs = i << 5;
bool started = false;
unsigned int try = 0, val = 0;
/* Reset port */
regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
regmap_read(s->regmap,
MAX310X_BRGDIVLSB_REG + offs, &ret);
} while (ret != 0x01);
msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
devtype->mode1);
if (val == 0x01)
started = true;
} while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
if (!started) {
ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
goto out_uart;
}
regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
if (uartclk < 0) {
ret = uartclk;
goto out_uart;
}
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
for (i = 0; i < devtype->nr; i++) {
@ -1357,11 +1406,13 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.iobase = i * 0x20;
s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
s->p[i].port.ops = &max310x_ops;
s->p[i].regmap = regmaps[i];
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@ -1372,10 +1423,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
/* Initialize SPI-transfer buffers */
s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
MAX310X_WRITE_BIT;
s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@ -1462,16 +1509,35 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
.max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
.max_raw_write = MAX310X_FIFO_SIZE,
};
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
{
struct max310x_port *s = dev_get_drvdata(dev);
return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
}
static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
.extended_reg_enable = max310x_spi_extended_reg_enable,
.rev_id_reg = MAX310X_SPI_REVID_EXTREG,
};
static int max310x_spi_probe(struct spi_device *spi)
{
struct max310x_devtype *devtype;
struct regmap *regmap;
const struct max310x_devtype *devtype;
struct regmap *regmaps[4];
unsigned int i;
int ret;
/* Setup SPI bus */
@ -1482,23 +1548,18 @@ static int max310x_spi_probe(struct spi_device *spi)
if (ret)
return ret;
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
if (!of_id)
return -ENODEV;
devtype = device_get_match_data(&spi->dev);
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
devtype = (struct max310x_devtype *)of_id->data;
} else {
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct max310x_devtype *)id_entry->driver_data;
for (i = 0; i < devtype->nr; i++) {
u8 port_mask = i * 0x20;
regcfg.read_flag_mask = port_mask;
regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
}
regcfg.max_register = devtype->nr * 0x20 - 1;
regmap = devm_regmap_init_spi(spi, &regcfg);
return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static int max310x_spi_remove(struct spi_device *spi)
@ -1518,7 +1579,7 @@ MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_spi_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = of_match_ptr(max310x_dt_ids),
.of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
.probe = max310x_spi_probe,
@ -1527,6 +1588,101 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
#ifdef CONFIG_I2C
static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
{
return 0;
}
static struct regmap_config regcfg_i2c = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
.max_register = MAX310X_I2C_REVID_EXTREG,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
.max_raw_write = MAX310X_FIFO_SIZE,
};
static const struct max310x_if_cfg max310x_i2c_if_cfg = {
.extended_reg_enable = max310x_i2c_extended_reg_enable,
.rev_id_reg = MAX310X_I2C_REVID_EXTREG,
};
static unsigned short max310x_i2c_slave_addr(unsigned short addr,
unsigned int nr)
{
/*
* For MAX14830 and MAX3109, the slave address depends on what the
* A0 and A1 pins are tied to.
* See Table I2C Address Map of the datasheet.
* Based on that table, the following formulas were determined.
* UART1 - UART0 = 0x10
* UART2 - UART1 = 0x20 + 0x10
* UART3 - UART2 = 0x10
*/
addr -= nr * 0x10;
if (nr >= 2)
addr -= 0x20;
return addr;
}
static int max310x_i2c_probe(struct i2c_client *client)
{
const struct max310x_devtype *devtype =
device_get_match_data(&client->dev);
struct i2c_client *port_client;
struct regmap *regmaps[4];
unsigned int i;
u8 port_addr;
if (client->addr < devtype->slave_addr.min ||
client->addr > devtype->slave_addr.max)
return dev_err_probe(&client->dev, -EINVAL,
"Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
client->addr, devtype->slave_addr.min,
devtype->slave_addr.max);
regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
for (i = 1; i < devtype->nr; i++) {
port_addr = max310x_i2c_slave_addr(client->addr, i);
port_client = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
port_addr);
regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
}
return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
regmaps, client->irq);
}
static int max310x_i2c_remove(struct i2c_client *client)
{
max310x_remove(&client->dev);
return 0;
}
static struct i2c_driver max310x_i2c_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
.probe_new = max310x_i2c_probe,
.remove = max310x_i2c_remove,
};
#endif
static int __init max310x_uart_init(void)
{
int ret;
@ -1540,15 +1696,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
uart_unregister_driver(&max310x_uart);
goto err_spi_register;
#endif
#ifdef CONFIG_I2C
ret = i2c_add_driver(&max310x_i2c_driver);
if (ret)
goto err_i2c_register;
#endif
return 0;
#ifdef CONFIG_I2C
err_i2c_register:
spi_unregister_driver(&max310x_spi_driver);
#endif
err_spi_register:
uart_unregister_driver(&max310x_uart);
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
#ifdef CONFIG_I2C
i2c_del_driver(&max310x_i2c_driver);
#endif
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif

View File

@ -2454,6 +2454,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
/* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
if (td->error_mid_td)
break;
if (remaining) {
frame->status = short_framestatus;
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
@ -2469,9 +2472,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_BANDWIDTH_OVERRUN_ERROR:
frame->status = -ECOMM;
break;
case COMP_ISOCH_BUFFER_OVERRUN:
case COMP_BABBLE_DETECTED_ERROR:
sum_trbs_for_length = true;
fallthrough;
case COMP_ISOCH_BUFFER_OVERRUN:
frame->status = -EOVERFLOW;
if (ep_trb != td->last_trb)
td->error_mid_td = true;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
@ -2479,8 +2486,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
sum_trbs_for_length = true;
if (ep_trb != td->last_trb)
return 0;
td->error_mid_td = true;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
@ -2500,6 +2508,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
}
if (td->urb_length_set)
goto finish_td;
if (sum_trbs_for_length)
frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
@ -2508,6 +2519,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length += frame->actual_length;
finish_td:
/* Don't give back TD yet if we encountered an error mid TD */
if (td->error_mid_td && ep_trb != td->last_trb) {
xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
td->urb_length_set = true;
return 0;
}
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
@ -2885,17 +2904,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
if (!ep_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
* successful event after a short transfer.
* Ignore it.
*/
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
goto cleanup;
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
skip_isoc_td(xhci, td, ep, status);
goto cleanup;
}
/*
* Some hosts give a spurious success event after a short
* transfer. Ignore it.
*/
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
goto cleanup;
}
/*
* xhci 4.10.2 states isoc endpoints should continue
* processing the next TD if there was an error mid TD.
* So host like NEC don't generate an event for the last
* isoc TRB even if the IOC flag is set.
* xhci 4.9.1 states that if there are errors in mult-TRB
* TDs xHC should generate an error for that TRB, and if xHC
* proceeds to the next TD it should genete an event for
* any TRB with IOC flag on the way. Other host follow this.
* So this event might be for the next TD.
*/
if (td->error_mid_td &&
!list_is_last(&td->td_list, &ep_ring->td_list)) {
struct xhci_td *td_next = list_next_entry(td, td_list);
ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
td_next->last_trb, ep_trb_dma, false);
if (ep_seg) {
/* give back previous TD, start handling new */
xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
ep_ring->dequeue = td->last_trb;
ep_ring->deq_seg = td->last_trb_seg;
inc_deq(xhci, ep_ring);
xhci_td_cleanup(xhci, td, ep_ring, td->status);
td = td_next;
}
}
if (!ep_seg) {
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@ -2907,9 +2960,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_trb_dma, true);
return -ESHUTDOWN;
}
skip_isoc_td(xhci, td, ep, status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
ep_ring->last_td_was_short = true;

View File

@ -1567,6 +1567,7 @@ struct xhci_td {
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
bool error_mid_td;
unsigned int num_trbs;
};

View File

@ -3107,8 +3107,9 @@ static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
if (ee_len == 0)
return 0;
return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
EXTENT_STATUS_WRITTEN);
ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
EXTENT_STATUS_WRITTEN);
return 0;
}
/* FIXME!! we need to try to merge to left or right after zero-out */

View File

@ -846,12 +846,10 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
/*
* ext4_es_insert_extent() adds information to an inode's extent
* status tree.
*
* Return 0 on success, error code on failure.
*/
int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status)
void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status)
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
@ -863,13 +861,13 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
bool revise_pending = false;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
return;
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
if (!len)
return 0;
return;
BUG_ON(end < lblk);
@ -938,7 +936,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
goto retry;
ext4_es_print_tree(inode);
return 0;
return;
}
/*

View File

@ -127,9 +127,9 @@ extern int __init ext4_init_es(void);
extern void ext4_exit_es(void);
extern void ext4_es_init_tree(struct ext4_es_tree *tree);
extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);
extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);
extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);

View File

@ -590,10 +590,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
}
up_read((&EXT4_I(inode)->i_data_sem));
@ -702,12 +700,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
}
out_sem:
@ -1747,11 +1741,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_es_is_hole(&es))
goto add_delayed;
}
/*
* Delayed extent could be allocated by fallocate.
@ -1793,27 +1784,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
add_delayed:
if (retval == 0) {
int ret;
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
ret = ext4_insert_delayed_block(inode, map->m_lblk);
if (ret != 0) {
retval = ret;
goto out_unlock;
}
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
} else if (retval > 0) {
int ret;
if (retval < 0) {
up_read(&EXT4_I(inode)->i_data_sem);
return retval;
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
@ -1826,15 +1801,23 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret != 0)
retval = ret;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
up_read(&EXT4_I(inode)->i_data_sem);
return retval;
}
up_read(&EXT4_I(inode)->i_data_sem);
out_unlock:
up_read((&EXT4_I(inode)->i_data_sem));
add_delayed:
down_write(&EXT4_I(inode)->i_data_sem);
retval = ext4_insert_delayed_block(inode, map->m_lblk);
up_write(&EXT4_I(inode)->i_data_sem);
if (retval)
return retval;
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
return retval;
}

View File

@ -135,6 +135,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
vm_flags_t vm_flags;
/*
* vma address alignment (but not the pgoff alignment) has
@ -176,10 +177,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
ret = -ENOMEM;
if (hugetlb_reserve_pages(inode,
vm_flags = vma->vm_flags;
/*
* for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
* reserving here. Note: only for SHM hugetlbfs file, the inode
* flag S_PRIVATE is set.
*/
if (inode->i_flags & S_PRIVATE)
vm_flags |= VM_NORESERVE;
if (!hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
vma->vm_flags))
vm_flags))
goto out;
ret = 0;
@ -1500,7 +1511,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode->i_size = size;
clear_nlink(inode);
if (hugetlb_reserve_pages(inode, 0,
if (!hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL,
acctflag))
file = ERR_PTR(-ENOMEM);

View File

@ -892,8 +892,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);

View File

@ -144,7 +144,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
enum mcopy_atomic_mode mode,
struct page **pagep);
#endif /* CONFIG_USERFAULTFD */
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,

View File

@ -764,6 +764,23 @@ enum vmbus_device_type {
HV_UNKNOWN,
};
/*
* Provides request ids for VMBus. Encapsulates guest memory
* addresses and stores the next available slot in req_arr
* to generate new ids in constant time.
*/
struct vmbus_requestor {
u64 *req_arr;
unsigned long *req_bitmap; /* is a given slot available? */
u32 size;
u64 next_request_id;
spinlock_t req_lock; /* provides atomicity */
};
#define VMBUS_NO_RQSTOR U64_MAX
#define VMBUS_RQST_ERROR (U64_MAX - 1)
#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
struct vmbus_device {
u16 dev_type;
guid_t guid;
@ -988,8 +1005,14 @@ struct vmbus_channel {
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
/* request/transaction ids for VMBus */
struct vmbus_requestor requestor;
u32 rqstor_size;
};
u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
return !!(c->offermsg.offer.chn_flags &

View File

@ -295,9 +295,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
char __user *optval, int __user *optlen, unsigned len)
LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
sockptr_t optval, sockptr_t optlen, unsigned int len)
LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)

View File

@ -935,8 +935,8 @@
* SO_GETPEERSEC. For tcp sockets this can be meaningful if the
* socket is associated with an ipsec SA.
* @sock is the local socket.
* @optval userspace memory where the security state is to be copied.
* @optlen userspace int where the module should copy the actual length
* @optval memory where the security state is to be copied.
* @optlen memory where the module should copy the actual length
* of the security state.
* @len as input is the maximum length to copy to userspace provided
* by the caller.

View File

@ -290,6 +290,17 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
* @reg_update_bits: Optional callback that if filled will be used to perform
* all the update_bits(rmw) operation. Should only be provided
* if the function require special handling with lock and reg
* handling and the operation cannot be represented as a simple
* update_bits operation on a bus such as SPI, I2C, etc.
* @read: Optional callback that if filled will be used to perform all the
* bulk reads from the registers. Data is returned in the buffer used
* to transmit data.
* @write: Same as above for writing.
* @max_raw_read: Max raw read size that can be used on the device.
* @max_raw_write: Max raw write size that can be used on the device.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
@ -367,6 +378,14 @@ struct regmap_config {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
int (*reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
/* Bulk read/write */
int (*read)(void *context, const void *reg_buf, size_t reg_size,
void *val_buf, size_t val_size);
int (*write)(void *context, const void *data, size_t count);
size_t max_raw_read;
size_t max_raw_write;
bool fast_io;

View File

@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
struct linux_binprm;
struct cred;
@ -1374,8 +1375,8 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname);
int security_socket_setsockopt(struct socket *sock, int level, int optname);
int security_socket_shutdown(struct socket *sock, int how);
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len);
int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
sockptr_t optlen, unsigned int len);
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
@ -1509,8 +1510,10 @@ static inline int security_sock_rcv_skb(struct sock *sk,
return 0;
}
static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
static inline int security_socket_getpeersec_stream(struct socket *sock,
sockptr_t optval,
sockptr_t optlen,
unsigned int len)
{
return -ENOPROTOOPT;
}

View File

@ -64,6 +64,11 @@ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
return 0;
}
static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
{
return copy_to_sockptr_offset(dst, 0, src, size);
}
static inline void *memdup_sockptr(sockptr_t src, size_t len)
{
void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);

View File

@ -53,14 +53,14 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
__string( dev, qdisc_dev(q) )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
__string( dev, qdisc_dev(q)->name )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
),
TP_fast_assign(
__assign_str(dev, qdisc_dev(q));
__assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
@ -78,14 +78,14 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
__string( dev, qdisc_dev(q) )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
__string( dev, qdisc_dev(q)->name )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
),
TP_fast_assign(
__assign_str(dev, qdisc_dev(q));
__assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;

View File

@ -229,7 +229,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
void **frames, int n,
struct xdp_cpumap_stats *stats)
{
struct xdp_rxq_info rxq;
struct xdp_rxq_info rxq = {};
struct xdp_buff xdp;
int i, nframes = 0;

View File

@ -1746,74 +1746,87 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t;
unsigned long flags;
u64 tgutime, tgstime, utime, stime;
unsigned long maxrss = 0;
unsigned long maxrss;
struct mm_struct *mm;
struct signal_struct *sig = p->signal;
unsigned int seq = 0;
memset((char *)r, 0, sizeof (*r));
retry:
memset(r, 0, sizeof(*r));
utime = stime = 0;
maxrss = 0;
if (who == RUSAGE_THREAD) {
task_cputime_adjusted(current, &utime, &stime);
accumulate_thread_rusage(p, r);
maxrss = p->signal->maxrss;
goto out;
maxrss = sig->maxrss;
goto out_thread;
}
if (!lock_task_sighand(p, &flags))
return;
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
switch (who) {
case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
utime = p->signal->cutime;
stime = p->signal->cstime;
r->ru_nvcsw = p->signal->cnvcsw;
r->ru_nivcsw = p->signal->cnivcsw;
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
r->ru_inblock = p->signal->cinblock;
r->ru_oublock = p->signal->coublock;
maxrss = p->signal->cmaxrss;
utime = sig->cutime;
stime = sig->cstime;
r->ru_nvcsw = sig->cnvcsw;
r->ru_nivcsw = sig->cnivcsw;
r->ru_minflt = sig->cmin_flt;
r->ru_majflt = sig->cmaj_flt;
r->ru_inblock = sig->cinblock;
r->ru_oublock = sig->coublock;
maxrss = sig->cmaxrss;
if (who == RUSAGE_CHILDREN)
break;
fallthrough;
case RUSAGE_SELF:
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
utime += tgutime;
stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
r->ru_inblock += p->signal->inblock;
r->ru_oublock += p->signal->oublock;
if (maxrss < p->signal->maxrss)
maxrss = p->signal->maxrss;
t = p;
do {
r->ru_nvcsw += sig->nvcsw;
r->ru_nivcsw += sig->nivcsw;
r->ru_minflt += sig->min_flt;
r->ru_majflt += sig->maj_flt;
r->ru_inblock += sig->inblock;
r->ru_oublock += sig->oublock;
if (maxrss < sig->maxrss)
maxrss = sig->maxrss;
rcu_read_lock();
__for_each_thread(sig, t)
accumulate_thread_rusage(t, r);
} while_each_thread(p, t);
rcu_read_unlock();
break;
default:
BUG();
}
unlock_task_sighand(p, &flags);
out:
if (need_seqretry(&sig->stats_lock, seq)) {
seq = 1;
goto retry;
}
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
if (who == RUSAGE_CHILDREN)
goto out_children;
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
utime += tgutime;
stime += tgstime;
out_thread:
mm = get_task_mm(p);
if (mm) {
setmax_mm_hiwater_rss(&maxrss, mm);
mmput(mm);
}
out_children:
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
r->ru_utime = ns_to_kernel_old_timeval(utime);
r->ru_stime = ns_to_kernel_old_timeval(stime);
if (who != RUSAGE_CHILDREN) {
struct mm_struct *mm = get_task_mm(p);
if (mm) {
setmax_mm_hiwater_rss(&maxrss, mm);
mmput(mm);
}
}
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
}
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)

View File

@ -5160,12 +5160,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
return pages << h->order;
}
int hugetlb_reserve_pages(struct inode *inode,
/* Return true if reservation was successful, false otherwise. */
bool hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg, add = -1;
long chg, add = -1;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;
@ -5175,7 +5176,7 @@ int hugetlb_reserve_pages(struct inode *inode,
/* This should never happen */
if (from > to) {
VM_WARN(1, "%s called with a negative range\n", __func__);
return -EINVAL;
return false;
}
/*
@ -5184,7 +5185,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
return true;
/*
* Shared mappings base their reservation on the number of pages that
@ -5206,7 +5207,7 @@ int hugetlb_reserve_pages(struct inode *inode,
/* Private mapping. */
resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
return false;
chg = to - from;
@ -5214,18 +5215,12 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0) {
ret = chg;
if (chg < 0)
goto out_err;
}
ret = hugetlb_cgroup_charge_cgroup_rsvd(
hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
if (ret < 0) {
ret = -ENOMEM;
if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
chg * pages_per_huge_page(h), &h_cg) < 0)
goto out_err;
}
if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
/* For private mappings, the hugetlb_cgroup uncharge info hangs
@ -5240,19 +5235,15 @@ int hugetlb_reserve_pages(struct inode *inode,
* reservations already in place (gbl_reserve).
*/
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
if (gbl_reserve < 0) {
ret = -ENOSPC;
if (gbl_reserve < 0)
goto out_uncharge_cgroup;
}
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
ret = hugetlb_acct_memory(h, gbl_reserve);
if (ret < 0) {
if (hugetlb_acct_memory(h, gbl_reserve) < 0)
goto out_put_pages;
}
/*
* Account for the reservations made. Shared mappings record regions
@ -5270,7 +5261,6 @@ int hugetlb_reserve_pages(struct inode *inode,
if (unlikely(add < 0)) {
hugetlb_acct_memory(h, -gbl_reserve);
ret = add;
goto out_put_pages;
} else if (unlikely(chg > add)) {
/*
@ -5303,7 +5293,8 @@ int hugetlb_reserve_pages(struct inode *inode,
hugetlb_cgroup_put_rsvd_cgroup(h_cg);
}
}
return 0;
return true;
out_put_pages:
/* put back original number of pages, chg */
(void)hugepage_subpool_put_pages(spool, chg);
@ -5319,7 +5310,7 @@ int hugetlb_reserve_pages(struct inode *inode,
region_abort(resv_map, from, to, regions_needed);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return ret;
return false;
}
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,

View File

@ -9883,8 +9883,7 @@ int sk_detach_filter(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len)
int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len)
{
struct sock_fprog_kern *fprog;
struct sk_filter *filter;
@ -9915,7 +9914,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
goto out;
ret = -EFAULT;
if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
if (copy_to_sockptr(optval, fprog->filter, bpf_classic_proglen(fprog)))
goto out;
/* Instead of bytes, the API requests to return the number

View File

@ -645,8 +645,8 @@ static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
return ret;
}
static int sock_getbindtodevice(struct sock *sk, char __user *optval,
int __user *optlen, int len)
static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
sockptr_t optlen, int len)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
@ -669,12 +669,12 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
len = strlen(devname) + 1;
ret = -EFAULT;
if (copy_to_user(optval, devname, len))
if (copy_to_sockptr(optval, devname, len))
goto out;
zero:
ret = -EFAULT;
if (put_user(len, optlen))
if (copy_to_sockptr(optlen, &len, sizeof(int)))
goto out;
ret = 0;
@ -1282,22 +1282,25 @@ static void cred_to_ucred(struct pid *pid, const struct cred *cred,
}
}
static int groups_to_user(gid_t __user *dst, const struct group_info *src)
static int groups_to_user(sockptr_t dst, const struct group_info *src)
{
struct user_namespace *user_ns = current_user_ns();
int i;
for (i = 0; i < src->ngroups; i++)
if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
for (i = 0; i < src->ngroups; i++) {
gid_t gid = from_kgid_munged(user_ns, src->gid[i]);
if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid)))
return -EFAULT;
}
return 0;
}
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
static int sk_getsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, sockptr_t optlen)
{
struct sock *sk = sock->sk;
struct socket *sock = sk->sk_socket;
union {
int val;
@ -1313,7 +1316,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
int lv = sizeof(int);
int len;
if (get_user(len, optlen))
if (copy_from_sockptr(&len, optlen, sizeof(int)))
return -EFAULT;
if (len < 0)
return -EINVAL;
@ -1446,7 +1449,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
spin_unlock(&sk->sk_peer_lock);
if (copy_to_user(optval, &peercred, len))
if (copy_to_sockptr(optval, &peercred, len))
return -EFAULT;
goto lenout;
}
@ -1464,11 +1467,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (len < n * sizeof(gid_t)) {
len = n * sizeof(gid_t);
put_cred(cred);
return put_user(len, optlen) ? -EFAULT : -ERANGE;
return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE;
}
len = n * sizeof(gid_t);
ret = groups_to_user((gid_t __user *)optval, cred->group_info);
ret = groups_to_user(optval, cred->group_info);
put_cred(cred);
if (ret)
return ret;
@ -1484,7 +1487,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
return -ENOTCONN;
if (lv < len)
return -EINVAL;
if (copy_to_user(optval, address, len))
if (copy_to_sockptr(optval, address, len))
return -EFAULT;
goto lenout;
}
@ -1501,7 +1504,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PEERSEC:
return security_socket_getpeersec_stream(sock, optval, optlen, len);
return security_socket_getpeersec_stream(sock,
optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
@ -1529,7 +1533,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
return sock_getbindtodevice(sk, optval, optlen, len);
case SO_GET_FILTER:
len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
len = sk_get_filter(sk, optval, len);
if (len < 0)
return len;
@ -1576,7 +1580,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
sk_get_meminfo(sk, meminfo);
len = min_t(unsigned int, len, sizeof(meminfo));
if (copy_to_user(optval, &meminfo, len))
if (copy_to_sockptr(optval, &meminfo, len))
return -EFAULT;
goto lenout;
@ -1633,14 +1637,22 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (len > lv)
len = lv;
if (copy_to_user(optval, &v, len))
if (copy_to_sockptr(optval, &v, len))
return -EFAULT;
lenout:
if (put_user(len, optlen))
if (copy_to_sockptr(optlen, &len, sizeof(int)))
return -EFAULT;
return 0;
}
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
return sk_getsockopt(sock->sk, level, optname,
USER_SOCKPTR(optval),
USER_SOCKPTR(optlen));
}
/*
* Initialize an sk_lock.
*

View File

@ -5195,19 +5195,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
err = __ip6_ins_rt(nh->fib6_info, info, extack);
fib6_info_release(nh->fib6_info);
if (!err) {
/* save reference to last route successfully inserted */
rt_last = nh->fib6_info;
/* save reference to first route for notification */
if (!rt_notif)
rt_notif = nh->fib6_info;
}
/* nh->fib6_info is used or freed at this point, reset to NULL*/
nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
NL_SET_ERR_MSG_MOD(extack,
@ -5215,6 +5203,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = nh;
goto add_errout;
}
/* save reference to last route successfully inserted */
rt_last = nh->fib6_info;
/* save reference to first route for notification */
if (!rt_notif)
rt_notif = nh->fib6_info;
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
@ -5275,8 +5269,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
if (nh->fib6_info)
fib6_info_release(nh->fib6_info);
fib6_info_release(nh->fib6_info);
list_del(&nh->next);
kfree(nh);
}

View File

@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
/* Get fields bitmap */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
if (f->sz > 32)
return H323_ERROR_RANGE;
bmp = get_bitmap(bs, f->sz);
if (base)
*(unsigned int *)base = bmp;
@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
bmp2_len = get_bits(bs, 7) + 1;
if (nf_h323_error_boundary(bs, 0, bmp2_len))
return H323_ERROR_BOUND;
if (bmp2_len > 32)
return H323_ERROR_RANGE;
bmp2 = get_bitmap(bs, bmp2_len);
bmp |= bmp2 >> f->sz;
if (base)

View File

@ -1188,14 +1188,13 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
switch (priv->l3num) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
if (priv->l3num != ctx->family)
return -EINVAL;
if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
break;
fallthrough;
case NFPROTO_INET:
break;
return -EINVAL;
case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
default:
return -EOPNOTSUPP;
return -EAFNOSUPPORT;
}
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);

View File

@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
nr_init_timers(sk);
nr->t1 =
msecs_to_jiffies(sysctl_netrom_transport_timeout);
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
nr->t2 =
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
nr->n2 =
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
nr->t4 =
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
nr->idle =
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
nr->window = sysctl_netrom_transport_requested_window_size;
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
nr->bpqext = 1;
nr->state = NR_STATE_0;
@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
if (sysctl_netrom_reset_circuit &&
if (READ_ONCE(sysctl_netrom_reset_circuit) &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);

View File

@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
*buff++ = sysctl_netrom_network_ttl_initialiser;
*buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;

View File

@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@ -263,7 +263,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
break;
case NR_RESET:
if (sysctl_netrom_reset_circuit)
if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;

View File

@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
*dptr++ = sysctl_netrom_network_ttl_initialiser;
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);

View File

@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_neigh->digipeat = NULL;
nr_neigh->ax25 = NULL;
nr_neigh->dev = dev;
nr_neigh->quality = sysctl_netrom_default_path_quality;
nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
nr_neigh->locked = 0;
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
@ -725,7 +725,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
nr_neigh_put(nr_neigh);
return;
}
@ -763,7 +763,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (ax25 != NULL) {
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
ax25->ax25_dev->dev, 0,
sysctl_netrom_obsolescence_count_initialiser);
READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
if (ret)
return ret;
}
@ -777,7 +777,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
return ret;
}
if (!sysctl_netrom_routing_control && ax25 != NULL)
if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
return 0;
/* Its Time-To-Live has expired */

View File

@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
*dptr++ = nr->my_id;
*dptr++ = frametype;
*dptr++ = nr->window;
if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
if (nr->bpqext)
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
break;
case NR_DISCREQ:
@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
*dptr++ = sysctl_netrom_network_ttl_initialiser;
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (mine) {
*dptr++ = 0;

View File

@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
kfree(sg);
}
ret = PTR_ERR(trans_private);
/* Trigger connection so that its ready for the next retry */
if (ret == -ENODEV)
rds_conn_connect_if_down(cp->cp_conn);
goto out;
}

View File

@ -1314,12 +1314,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
if (ret) {
/* Trigger connection so that its ready for the next retry */
if (ret == -EAGAIN)
rds_conn_connect_if_down(conn);
if (ret)
goto out;
}
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",

View File

@ -1070,11 +1070,10 @@ static struct aa_label *sk_peer_label(struct sock *sk)
* Note: for tcp only valid if using ipsec or cipso on lan
*/
static int apparmor_socket_getpeersec_stream(struct socket *sock,
char __user *optval,
int __user *optlen,
sockptr_t optval, sockptr_t optlen,
unsigned int len)
{
char *name;
char *name = NULL;
int slen, error = 0;
struct aa_label *label;
struct aa_label *peer;
@ -1091,23 +1090,21 @@ static int apparmor_socket_getpeersec_stream(struct socket *sock,
/* don't include terminating \0 in slen, it breaks some apps */
if (slen < 0) {
error = -ENOMEM;
} else {
if (slen > len) {
error = -ERANGE;
} else if (copy_to_user(optval, name, slen)) {
error = -EFAULT;
goto out;
}
if (put_user(slen, optlen))
error = -EFAULT;
out:
kfree(name);
goto done;
}
if (slen > len) {
error = -ERANGE;
goto done_len;
}
if (copy_to_sockptr(optval, name, slen))
error = -EFAULT;
done_len:
if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
error = -EFAULT;
done:
end_current_label_crit_section(label);
kfree(name);
return error;
}

View File

@ -2196,17 +2196,40 @@ int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(security_sock_rcv_skb);
int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
sockptr_t optlen, unsigned int len)
{
return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
optval, optlen, len);
struct security_hook_list *hp;
int rc;
/*
* Only one module will provide a security context.
*/
hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_stream,
list) {
rc = hp->hook.socket_getpeersec_stream(sock, optval, optlen,
len);
if (rc != LSM_RET_DEFAULT(socket_getpeersec_stream))
return rc;
}
return LSM_RET_DEFAULT(socket_getpeersec_stream);
}
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
skb, secid);
struct security_hook_list *hp;
int rc;
/*
* Only one module will provide a security context.
*/
hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_dgram,
list) {
rc = hp->hook.socket_getpeersec_dgram(sock, skb, secid);
if (rc != LSM_RET_DEFAULT(socket_getpeersec_dgram))
return rc;
}
return LSM_RET_DEFAULT(socket_getpeersec_dgram);
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);

View File

@ -5169,11 +5169,12 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
}
static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
static int selinux_socket_getpeersec_stream(struct socket *sock,
sockptr_t optval, sockptr_t optlen,
unsigned int len)
{
int err = 0;
char *scontext;
char *scontext = NULL;
u32 scontext_len;
struct sk_security_struct *sksec = sock->sk->sk_security;
u32 peer_sid = SECSID_NULL;
@ -5189,17 +5190,15 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
&scontext_len);
if (err)
return err;
if (scontext_len > len) {
err = -ERANGE;
goto out_len;
}
if (copy_to_user(optval, scontext, scontext_len))
if (copy_to_sockptr(optval, scontext, scontext_len))
err = -EFAULT;
out_len:
if (put_user(scontext_len, optlen))
if (copy_to_sockptr(optlen, &scontext_len, sizeof(scontext_len)))
err = -EFAULT;
kfree(scontext);
return err;

View File

@ -4023,12 +4023,12 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
* returns zero on success, an error code otherwise
*/
static int smack_socket_getpeersec_stream(struct socket *sock,
char __user *optval,
int __user *optlen, unsigned len)
sockptr_t optval, sockptr_t optlen,
unsigned int len)
{
struct socket_smack *ssp;
char *rcp = "";
int slen = 1;
u32 slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
@ -4036,15 +4036,16 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
rcp = ssp->smk_packet->smk_known;
slen = strlen(rcp) + 1;
}
if (slen > len)
if (slen > len) {
rc = -ERANGE;
else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
goto out_len;
}
if (put_user(slen, optlen) != 0)
if (copy_to_sockptr(optval, rcp, slen))
rc = -EFAULT;
out_len:
if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
rc = -EFAULT;
return rc;
}

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
set -e

View File

@ -15,6 +15,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
@ -70,10 +71,16 @@ int main(int argc, char **argv)
{
void *addr;
int ret;
size_t hugepage_size;
size_t length = LENGTH;
int flags = FLAGS;
int shift = 0;
hugepage_size = default_huge_page_size();
/* munmap with fail if the length is not page aligned */
if (hugepage_size > length)
length = hugepage_size;
if (argc > 1)
length = atol(argv[1]) << 20;
if (argc > 2) {

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
set -e