Merge changes I407c2796,If3f36646,Ia03ea493,I5f0e742b,Ibe09c3b1, ... into android11-5.4-lts
* changes: Revert "netfilter: conntrack: allow sctp hearbeat after connection re-use" Revert "netfilter: conntrack: don't refresh sctp entries in closed state" Revert "netfilter: handle the connecting collision properly in nf_conntrack_proto_sctp" Merge 5.4.258 into android11-5.4-lts Reapply "netfilter: conntrack: don't refresh sctp entries in closed state" Reapply "netfilter: conntrack: allow sctp hearbeat after connection re-use"
This commit is contained in:
commit
bbe33b72cc
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 257
|
||||
SUBLEVEL = 258
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -2,14 +2,28 @@
|
||||
#ifndef __PARISC_LDCW_H
|
||||
#define __PARISC_LDCW_H
|
||||
|
||||
#ifndef CONFIG_PA20
|
||||
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
|
||||
and GCC only guarantees 8-byte alignment for stack locals, we can't
|
||||
be assured of 16-byte alignment for atomic lock data even if we
|
||||
specify "__attribute ((aligned(16)))" in the type declaration. So,
|
||||
we use a struct containing an array of four ints for the atomic lock
|
||||
type and dynamically select the 16-byte aligned int from the array
|
||||
for the semaphore. */
|
||||
for the semaphore. */
|
||||
|
||||
/* From: "Jim Hull" <jim.hull of hp.com>
|
||||
I've attached a summary of the change, but basically, for PA 2.0, as
|
||||
long as the ",CO" (coherent operation) completer is implemented, then the
|
||||
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
|
||||
they only require "natural" alignment (4-byte for ldcw, 8-byte for
|
||||
ldcd).
|
||||
|
||||
Although the cache control hint is accepted by all PA 2.0 processors,
|
||||
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
|
||||
require 16-byte alignment. If the address is unaligned, the operation
|
||||
of the instruction is undefined. The ldcw instruction does not generate
|
||||
unaligned data reference traps so misaligned accesses are not detected.
|
||||
This hid the problem for years. So, restore the 16-byte alignment dropped
|
||||
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 16
|
||||
#define __PA_LDCW_ALIGN_ORDER 4
|
||||
@ -19,22 +33,12 @@
|
||||
& ~(__PA_LDCW_ALIGNMENT - 1); \
|
||||
(volatile unsigned int *) __ret; \
|
||||
})
|
||||
#define __LDCW "ldcw"
|
||||
|
||||
#else /*CONFIG_PA20*/
|
||||
/* From: "Jim Hull" <jim.hull of hp.com>
|
||||
I've attached a summary of the change, but basically, for PA 2.0, as
|
||||
long as the ",CO" (coherent operation) completer is specified, then the
|
||||
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
|
||||
they only require "natural" alignment (4-byte for ldcw, 8-byte for
|
||||
ldcd). */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 4
|
||||
#define __PA_LDCW_ALIGN_ORDER 2
|
||||
#define __ldcw_align(a) (&(a)->slock)
|
||||
#ifdef CONFIG_PA20
|
||||
#define __LDCW "ldcw,co"
|
||||
|
||||
#endif /*!CONFIG_PA20*/
|
||||
#else
|
||||
#define __LDCW "ldcw"
|
||||
#endif
|
||||
|
||||
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
|
||||
We don't explicitly expose that "*a" may be written as reload
|
||||
|
@ -3,13 +3,8 @@
|
||||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_PA20
|
||||
volatile unsigned int slock;
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#else
|
||||
volatile unsigned int lock[4];
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
typedef struct {
|
||||
|
@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
||||
if (!rbnode)
|
||||
return -ENOMEM;
|
||||
regcache_rbtree_set_register(map, rbnode,
|
||||
reg - rbnode->base_reg, value);
|
||||
(reg - rbnode->base_reg) / map->reg_stride,
|
||||
value);
|
||||
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
|
||||
rbtree_ctx->cached_rbnode = rbnode;
|
||||
}
|
||||
|
@ -967,7 +967,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
|
||||
else if (param == PIN_CONFIG_BIAS_DISABLE ||
|
||||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
|
||||
param == PIN_CONFIG_DRIVE_STRENGTH)
|
||||
return pinctrl_gpio_set_config(offset, config);
|
||||
return pinctrl_gpio_set_config(chip->base + offset, config);
|
||||
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
|
||||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
|
||||
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
|
||||
|
@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
|
||||
switch (gpio_type) {
|
||||
case PXA3XX_GPIO:
|
||||
case MMP2_GPIO:
|
||||
case MMP_GPIO:
|
||||
return false;
|
||||
|
||||
default:
|
||||
|
@ -218,7 +218,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
|
||||
}
|
||||
|
||||
for (i = 0; i < ports_num; i++) {
|
||||
char port_str[10];
|
||||
char port_str[11];
|
||||
|
||||
ports[i].port_num = i + 1;
|
||||
snprintf(port_str, sizeof(port_str), "%u", i + 1);
|
||||
|
@ -2080,6 +2080,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
|
||||
},
|
||||
[RDMA_NLDEV_CMD_SYS_SET] = {
|
||||
.doit = nldev_set_sys_set_doit,
|
||||
.flags = RDMA_NL_ADMIN_PERM,
|
||||
},
|
||||
[RDMA_NLDEV_CMD_STAT_SET] = {
|
||||
.doit = nldev_stat_set_doit,
|
||||
|
@ -633,7 +633,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
|
||||
if (hdr->in_words * 4 != count)
|
||||
return -EINVAL;
|
||||
|
||||
if (count < method_elm->req_size + sizeof(hdr)) {
|
||||
if (count < method_elm->req_size + sizeof(*hdr)) {
|
||||
/*
|
||||
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
|
||||
* with a 16 byte write instead of 24. Old kernels didn't
|
||||
|
@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
|
||||
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
|
||||
{
|
||||
int i;
|
||||
char buff[11];
|
||||
char buff[12];
|
||||
struct mlx4_ib_iov_port *port = NULL;
|
||||
int ret = 0 ;
|
||||
struct ib_port_attr attr;
|
||||
|
@ -2053,7 +2053,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
|
||||
case MLX5_IB_MMAP_DEVICE_MEM:
|
||||
return "Device Memory";
|
||||
default:
|
||||
return NULL;
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -981,6 +981,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
|
||||
siw_cep_put(cep);
|
||||
new_cep->listen_cep = NULL;
|
||||
if (rv) {
|
||||
siw_cancel_mpatimer(new_cep);
|
||||
siw_cep_set_free(new_cep);
|
||||
goto error;
|
||||
}
|
||||
@ -1105,9 +1106,12 @@ static void siw_cm_work_handler(struct work_struct *w)
|
||||
/*
|
||||
* Socket close before MPA request received.
|
||||
*/
|
||||
siw_dbg_cep(cep, "no mpareq: drop listener\n");
|
||||
siw_cep_put(cep->listen_cep);
|
||||
cep->listen_cep = NULL;
|
||||
if (cep->listen_cep) {
|
||||
siw_dbg_cep(cep,
|
||||
"no mpareq: drop listener\n");
|
||||
siw_cep_put(cep->listen_cep);
|
||||
cep->listen_cep = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
release_cep = 1;
|
||||
@ -1230,7 +1234,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
|
||||
if (!cep)
|
||||
goto out;
|
||||
|
||||
siw_dbg_cep(cep, "state: %d\n", cep->state);
|
||||
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
|
||||
cep->state, sk->sk_state);
|
||||
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
goto out;
|
||||
|
||||
switch (cep->state) {
|
||||
case SIW_EPSTATE_RDMA_MODE:
|
||||
|
@ -2149,14 +2149,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
|
||||
* from the wrong location resulting in the switch booting
|
||||
* to wrong mode and inoperable.
|
||||
*/
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
if (chip->info->ops->get_eeprom)
|
||||
mv88e6xxx_g2_eeprom_wait(chip);
|
||||
|
||||
gpiod_set_value_cansleep(gpiod, 1);
|
||||
usleep_range(10000, 20000);
|
||||
gpiod_set_value_cansleep(gpiod, 0);
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
if (chip->info->ops->get_eeprom)
|
||||
mv88e6xxx_g2_eeprom_wait(chip);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
|
||||
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
|
||||
}
|
||||
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
const unsigned long timeout = jiffies + 1 * HZ;
|
||||
u16 val;
|
||||
int err;
|
||||
|
||||
/* Wait up to 1 second for the switch to finish reading the
|
||||
* EEPROM.
|
||||
*/
|
||||
while (time_before(jiffies, timeout)) {
|
||||
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "Error reading status");
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the switch is still resetting, it may not
|
||||
* respond on the bus, and so MDIO read returns
|
||||
* 0xffff. Differentiate between that, and waiting for
|
||||
* the EEPROM to be done by bit 0 being set.
|
||||
*/
|
||||
if (val != 0xffff &&
|
||||
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
|
||||
return;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
dev_err(chip->dev, "Timeout waiting for EEPROM done");
|
||||
}
|
||||
|
||||
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
|
||||
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
|
||||
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
|
||||
|
@ -277,7 +277,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
|
||||
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
|
||||
|
||||
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
|
||||
|
@ -310,7 +310,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
|
||||
* Offset 0x15: EEPROM Addr (for 8-bit data access)
|
||||
*/
|
||||
|
||||
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
|
||||
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
|
||||
int err;
|
||||
|
@ -340,6 +340,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
|
||||
|
||||
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
|
||||
int port);
|
||||
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
|
||||
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
|
||||
|
@ -111,6 +111,7 @@ struct stm32_ops {
|
||||
int (*parse_data)(struct stm32_dwmac *dwmac,
|
||||
struct device *dev);
|
||||
u32 syscfg_eth_mask;
|
||||
bool clk_rx_enable_in_suspend;
|
||||
};
|
||||
|
||||
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
|
||||
@ -128,7 +129,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dwmac->dev->power.is_suspended) {
|
||||
if (!dwmac->ops->clk_rx_enable_in_suspend ||
|
||||
!dwmac->dev->power.is_suspended) {
|
||||
ret = clk_prepare_enable(dwmac->clk_rx);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(dwmac->clk_tx);
|
||||
@ -508,7 +510,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
|
||||
.suspend = stm32mp1_suspend,
|
||||
.resume = stm32mp1_resume,
|
||||
.parse_data = stm32mp1_parse_data,
|
||||
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
|
||||
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
|
||||
.clk_rx_enable_in_suspend = true
|
||||
};
|
||||
|
||||
static const struct of_device_id stm32_dwmac_match[] = {
|
||||
|
@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
|
||||
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
|
||||
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, index, &buf, 4);
|
||||
if (unlikely(ret < 0)) {
|
||||
if (unlikely(ret < 4)) {
|
||||
ret = ret < 0 ? ret : -ENODATA;
|
||||
|
||||
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
|
||||
index, ret);
|
||||
return ret;
|
||||
|
@ -856,7 +856,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
|
||||
EXPORT_SYMBOL(target_to_linux_sector);
|
||||
|
||||
struct devices_idr_iter {
|
||||
struct config_item *prev_item;
|
||||
int (*fn)(struct se_device *dev, void *data);
|
||||
void *data;
|
||||
};
|
||||
@ -866,11 +865,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
|
||||
{
|
||||
struct devices_idr_iter *iter = data;
|
||||
struct se_device *dev = p;
|
||||
struct config_item *item;
|
||||
int ret;
|
||||
|
||||
config_item_put(iter->prev_item);
|
||||
iter->prev_item = NULL;
|
||||
|
||||
/*
|
||||
* We add the device early to the idr, so it can be used
|
||||
* by backend modules during configuration. We do not want
|
||||
@ -880,12 +877,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
|
||||
if (!target_dev_configured(dev))
|
||||
return 0;
|
||||
|
||||
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
|
||||
if (!iter->prev_item)
|
||||
item = config_item_get_unless_zero(&dev->dev_group.cg_item);
|
||||
if (!item)
|
||||
return 0;
|
||||
mutex_unlock(&device_mutex);
|
||||
|
||||
ret = iter->fn(dev, iter->data);
|
||||
config_item_put(item);
|
||||
|
||||
mutex_lock(&device_mutex);
|
||||
return ret;
|
||||
@ -908,7 +906,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
|
||||
mutex_lock(&device_mutex);
|
||||
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
|
||||
mutex_unlock(&device_mutex);
|
||||
config_item_put(iter.prev_item);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -83,23 +83,13 @@ const struct evtchn_ops *evtchn_ops;
|
||||
*/
|
||||
static DEFINE_MUTEX(irq_mapping_update_lock);
|
||||
|
||||
/*
|
||||
* Lock protecting event handling loop against removing event channels.
|
||||
* Adding of event channels is no issue as the associated IRQ becomes active
|
||||
* only after everything is setup (before request_[threaded_]irq() the handler
|
||||
* can't be entered for an event, as the event channel will be unmasked only
|
||||
* then).
|
||||
*/
|
||||
static DEFINE_RWLOCK(evtchn_rwlock);
|
||||
|
||||
/*
|
||||
* Lock hierarchy:
|
||||
*
|
||||
* irq_mapping_update_lock
|
||||
* evtchn_rwlock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
* irq_info->lock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
* irq_info->lock
|
||||
*/
|
||||
|
||||
static LIST_HEAD(xen_irq_list_head);
|
||||
@ -214,6 +204,22 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
|
||||
irq_set_chip_data(irq, info);
|
||||
}
|
||||
|
||||
static void delayed_free_irq(struct work_struct *work)
|
||||
{
|
||||
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
|
||||
rwork);
|
||||
unsigned int irq = info->irq;
|
||||
|
||||
/* Remove the info pointer only now, with no potential users left. */
|
||||
set_info_for_irq(irq, NULL);
|
||||
|
||||
kfree(info);
|
||||
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
if (irq >= nr_legacy_irqs())
|
||||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
/* Constructors for packed IRQ information. */
|
||||
static int xen_irq_info_common_setup(struct irq_info *info,
|
||||
unsigned irq,
|
||||
@ -548,33 +554,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
|
||||
|
||||
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
rcu_read_lock();
|
||||
|
||||
while (true) {
|
||||
spin_lock(&eoi->eoi_list_lock);
|
||||
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
|
||||
|
||||
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
|
||||
eoi_list);
|
||||
|
||||
if (info == NULL || now < info->eoi_time) {
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
if (info == NULL)
|
||||
break;
|
||||
|
||||
if (now < info->eoi_time) {
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed,
|
||||
info->eoi_time - now);
|
||||
break;
|
||||
}
|
||||
|
||||
list_del_init(&info->eoi_list);
|
||||
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
|
||||
info->eoi_time = 0;
|
||||
|
||||
xen_irq_lateeoi_locked(info, false);
|
||||
}
|
||||
|
||||
if (info)
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed, info->eoi_time - now);
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void xen_cpu_init_eoi(unsigned int cpu)
|
||||
@ -589,16 +598,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
|
||||
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
|
||||
{
|
||||
struct irq_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
rcu_read_lock();
|
||||
|
||||
info = info_for_irq(irq);
|
||||
|
||||
if (info)
|
||||
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
|
||||
|
||||
@ -617,6 +625,7 @@ static void xen_irq_init(unsigned irq)
|
||||
|
||||
info->type = IRQT_UNBOUND;
|
||||
info->refcnt = -1;
|
||||
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
|
||||
|
||||
set_info_for_irq(irq, info);
|
||||
|
||||
@ -669,31 +678,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
||||
static void xen_free_irq(unsigned irq)
|
||||
{
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!info))
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&evtchn_rwlock, flags);
|
||||
|
||||
if (!list_empty(&info->eoi_list))
|
||||
lateeoi_list_del(info);
|
||||
|
||||
list_del(&info->list);
|
||||
|
||||
set_info_for_irq(irq, NULL);
|
||||
|
||||
WARN_ON(info->refcnt > 0);
|
||||
|
||||
write_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
|
||||
kfree(info);
|
||||
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
if (irq < nr_legacy_irqs())
|
||||
return;
|
||||
|
||||
irq_free_desc(irq);
|
||||
queue_rcu_work(system_wq, &info->rwork);
|
||||
}
|
||||
|
||||
static void xen_evtchn_close(unsigned int port)
|
||||
@ -1604,7 +1600,14 @@ static void __xen_evtchn_do_upcall(void)
|
||||
unsigned count;
|
||||
struct evtchn_loop_ctrl ctrl = { 0 };
|
||||
|
||||
read_lock(&evtchn_rwlock);
|
||||
/*
|
||||
* When closing an event channel the associated IRQ must not be freed
|
||||
* until all cpus have left the event handling loop. This is ensured
|
||||
* by taking the rcu_read_lock() while handling events, as freeing of
|
||||
* the IRQ is handled via queue_rcu_work() _after_ closing the event
|
||||
* channel.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
do {
|
||||
vcpu_info->evtchn_upcall_pending = 0;
|
||||
@ -1621,7 +1624,7 @@ static void __xen_evtchn_do_upcall(void)
|
||||
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
|
||||
|
||||
out:
|
||||
read_unlock(&evtchn_rwlock);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Increment irq_epoch only now to defer EOIs only for
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
#ifndef __EVENTS_INTERNAL_H__
|
||||
#define __EVENTS_INTERNAL_H__
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/* Interrupt types. */
|
||||
enum xen_irq_type {
|
||||
@ -31,6 +32,7 @@ enum xen_irq_type {
|
||||
struct irq_info {
|
||||
struct list_head list;
|
||||
struct list_head eoi_list;
|
||||
struct rcu_work rwork;
|
||||
short refcnt;
|
||||
short spurious_cnt;
|
||||
short type; /* type */
|
||||
|
@ -411,7 +411,9 @@ extern int __init register_nfs_fs(void);
|
||||
extern void __exit unregister_nfs_fs(void);
|
||||
extern bool nfs_sb_active(struct super_block *sb);
|
||||
extern void nfs_sb_deactive(struct super_block *sb);
|
||||
|
||||
extern int nfs_client_for_each_server(struct nfs_client *clp,
|
||||
int (*fn)(struct nfs_server *, void *),
|
||||
void *data);
|
||||
/* io.c */
|
||||
extern void nfs_start_io_read(struct inode *inode);
|
||||
extern void nfs_end_io_read(struct inode *inode);
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include "nfs4session.h"
|
||||
#include "pnfs.h"
|
||||
#include "netns.h"
|
||||
#include "nfs4trace.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_STATE
|
||||
|
||||
@ -2525,6 +2526,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||||
|
||||
/* Ensure exclusive access to NFSv4 state */
|
||||
do {
|
||||
trace_nfs4_state_mgr(clp);
|
||||
clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
|
||||
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
|
||||
section = "purge state";
|
||||
@ -2621,6 +2623,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||||
nfs4_end_drain_session(clp);
|
||||
nfs4_clear_state_manager_bit(clp);
|
||||
|
||||
if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
|
||||
!test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
|
||||
&clp->cl_state)) {
|
||||
memflags = memalloc_nofs_save();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
|
||||
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
|
||||
nfs_client_return_marked_delegations(clp);
|
||||
@ -2641,6 +2650,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||||
out_error:
|
||||
if (strlen(section))
|
||||
section_sep = ": ";
|
||||
trace_nfs4_state_mgr_failed(clp, section, status);
|
||||
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
|
||||
" with error %d\n", section_sep, section,
|
||||
clp->cl_hostname, -status);
|
||||
|
@ -563,6 +563,99 @@ TRACE_EVENT(nfs4_setup_sequence,
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_RUNNING);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_CHECK_LEASE);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_EXPIRED);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_REBOOT);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_NOGRACE);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_SESSION_RESET);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_CONFIRM);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_SERVER_SCOPE_MISMATCH);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_PURGE_STATE);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_BIND_CONN_TO_SESSION);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
|
||||
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING);
|
||||
|
||||
#define show_nfs4_clp_state(state) \
|
||||
__print_flags(state, "|", \
|
||||
{ NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \
|
||||
{ NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \
|
||||
{ NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \
|
||||
{ NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \
|
||||
{ NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \
|
||||
{ NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \
|
||||
{ NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \
|
||||
{ NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \
|
||||
{ NFS4CLNT_SERVER_SCOPE_MISMATCH, \
|
||||
"SERVER_SCOPE_MISMATCH" }, \
|
||||
{ NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \
|
||||
{ NFS4CLNT_BIND_CONN_TO_SESSION, \
|
||||
"BIND_CONN_TO_SESSION" }, \
|
||||
{ NFS4CLNT_MOVED, "MOVED" }, \
|
||||
{ NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \
|
||||
{ NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \
|
||||
{ NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \
|
||||
{ NFS4CLNT_DELEGRETURN_RUNNING, "DELEGRETURN_RUNNING" })
|
||||
|
||||
TRACE_EVENT(nfs4_state_mgr,
|
||||
TP_PROTO(
|
||||
const struct nfs_client *clp
|
||||
),
|
||||
|
||||
TP_ARGS(clp),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, state)
|
||||
__string(hostname, clp->cl_hostname)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->state = clp->cl_state;
|
||||
__assign_str(hostname, clp->cl_hostname)
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"hostname=%s clp state=%s", __get_str(hostname),
|
||||
show_nfs4_clp_state(__entry->state)
|
||||
)
|
||||
)
|
||||
|
||||
TRACE_EVENT(nfs4_state_mgr_failed,
|
||||
TP_PROTO(
|
||||
const struct nfs_client *clp,
|
||||
const char *section,
|
||||
int status
|
||||
),
|
||||
|
||||
TP_ARGS(clp, section, status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, error)
|
||||
__field(unsigned long, state)
|
||||
__string(hostname, clp->cl_hostname)
|
||||
__string(section, section)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->error = status;
|
||||
__entry->state = clp->cl_state;
|
||||
__assign_str(hostname, clp->cl_hostname);
|
||||
__assign_str(section, section);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"hostname=%s clp state=%s error=%ld (%s) section=%s",
|
||||
__get_str(hostname),
|
||||
show_nfs4_clp_state(__entry->state), -__entry->error,
|
||||
show_nfsv4_errors(__entry->error), __get_str(section)
|
||||
|
||||
)
|
||||
)
|
||||
|
||||
TRACE_EVENT(nfs4_xdr_status,
|
||||
TP_PROTO(
|
||||
const struct xdr_stream *xdr,
|
||||
|
@ -436,6 +436,41 @@ void nfs_sb_deactive(struct super_block *sb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
|
||||
|
||||
static int __nfs_list_for_each_server(struct list_head *head,
|
||||
int (*fn)(struct nfs_server *, void *),
|
||||
void *data)
|
||||
{
|
||||
struct nfs_server *server, *last = NULL;
|
||||
int ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(server, head, client_link) {
|
||||
if (!(server->super && nfs_sb_active(server->super)))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
if (last)
|
||||
nfs_sb_deactive(last->super);
|
||||
last = server;
|
||||
ret = fn(server, data);
|
||||
if (ret)
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
if (last)
|
||||
nfs_sb_deactive(last->super);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nfs_client_for_each_server(struct nfs_client *clp,
|
||||
int (*fn)(struct nfs_server *, void *),
|
||||
void *data)
|
||||
{
|
||||
return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_client_for_each_server);
|
||||
|
||||
/*
|
||||
* Deliver file system statistics to userspace
|
||||
*/
|
||||
|
@ -343,12 +343,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags);
|
||||
|
||||
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
||||
const unsigned int pkts)
|
||||
static inline void tcp_dec_quickack_mode(struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (icsk->icsk_ack.quick) {
|
||||
/* How many ACKs S/ACKing new data have we sent? */
|
||||
const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
|
||||
|
||||
if (pkts >= icsk->icsk_ack.quick) {
|
||||
icsk->icsk_ack.quick = 0;
|
||||
/* Leaving quickack mode we deflate ATO. */
|
||||
|
@ -933,7 +933,9 @@ static void neigh_periodic_work(struct work_struct *work)
|
||||
(state == NUD_FAILED ||
|
||||
!time_in_range_open(jiffies, n->used,
|
||||
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
|
||||
*np = n->next;
|
||||
rcu_assign_pointer(*np,
|
||||
rcu_dereference_protected(n->next,
|
||||
lockdep_is_held(&tbl->lock)));
|
||||
neigh_mark_dead(n);
|
||||
write_unlock(&n->lock);
|
||||
neigh_cleanup_and_release(n);
|
||||
|
@ -179,6 +179,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
|
||||
if (unlikely(len > icsk->icsk_ack.rcv_mss +
|
||||
MAX_TCP_OPTION_SPACE))
|
||||
tcp_gro_dev_warn(sk, skb, len);
|
||||
/* If the skb has a len of exactly 1*MSS and has the PSH bit
|
||||
* set then it is likely the end of an application write. So
|
||||
* more data may not be arriving soon, and yet the data sender
|
||||
* may be waiting for an ACK if cwnd-bound or using TX zero
|
||||
* copy. So we set ICSK_ACK_PUSHED here so that
|
||||
* tcp_cleanup_rbuf() will send an ACK immediately if the app
|
||||
* reads all of the data and is not ping-pong. If len > MSS
|
||||
* then this logic does not matter (and does not hurt) because
|
||||
* tcp_cleanup_rbuf() will always ACK immediately if the app
|
||||
* reads data and there is more than an MSS of unACKed data.
|
||||
*/
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
|
||||
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
|
||||
} else {
|
||||
/* Otherwise, we make more careful check taking into account,
|
||||
* that SACKs block is variable.
|
||||
|
@ -179,8 +179,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
|
||||
}
|
||||
|
||||
/* Account for an ACK we sent. */
|
||||
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
|
||||
u32 rcv_nxt)
|
||||
static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
@ -194,7 +193,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
|
||||
|
||||
if (unlikely(rcv_nxt != tp->rcv_nxt))
|
||||
return; /* Special ACK sent by DCTCP to reflect ECN */
|
||||
tcp_dec_quickack_mode(sk, pkts);
|
||||
tcp_dec_quickack_mode(sk);
|
||||
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
|
||||
}
|
||||
|
||||
@ -1152,7 +1151,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
|
||||
icsk->icsk_af_ops->send_check(sk, skb);
|
||||
|
||||
if (likely(tcb->tcp_flags & TCPHDR_ACK))
|
||||
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
|
||||
tcp_event_ack_sent(sk, rcv_nxt);
|
||||
|
||||
if (skb->len != tcp_header_size) {
|
||||
tcp_event_data_sent(tp, sk);
|
||||
|
@ -521,7 +521,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
*/
|
||||
if (len > INT_MAX - transhdrlen)
|
||||
return -EMSGSIZE;
|
||||
ulen = len + transhdrlen;
|
||||
|
||||
/* Mirror BSD error message compatibility */
|
||||
if (msg->msg_flags & MSG_OOB)
|
||||
@ -645,6 +644,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
back_from_confirm:
|
||||
lock_sock(sk);
|
||||
ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
|
||||
err = ip6_append_data(sk, ip_generic_getfrag, msg,
|
||||
ulen, transhdrlen, &ipc6,
|
||||
&fl6, (struct rt6_info *)dst,
|
||||
|
@ -1646,7 +1646,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
|
||||
timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
|
||||
INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
|
||||
|
||||
spin_lock(&llcp_devices_lock);
|
||||
list_add(&local->list, &llcp_devices);
|
||||
spin_unlock(&llcp_devices_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1156,8 +1156,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
|
||||
/* Add any peer addresses from the new association. */
|
||||
list_for_each_entry(trans, &new->peer.transport_addr_list,
|
||||
transports)
|
||||
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
|
||||
!sctp_assoc_add_peer(asoc, &trans->ipaddr,
|
||||
if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
|
||||
GFP_ATOMIC, trans->state))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2491,6 +2491,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
|
||||
if (trans) {
|
||||
trans->hbinterval =
|
||||
msecs_to_jiffies(params->spp_hbinterval);
|
||||
sctp_transport_reset_hb_timer(trans);
|
||||
} else if (asoc) {
|
||||
asoc->hbinterval =
|
||||
msecs_to_jiffies(params->spp_hbinterval);
|
||||
|
@ -1455,7 +1455,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
|
||||
/* First handle the "special" cases */
|
||||
if (sym_is(name, namelen, "usb"))
|
||||
do_usb_table(symval, sym->st_size, mod);
|
||||
if (sym_is(name, namelen, "of"))
|
||||
else if (sym_is(name, namelen, "of"))
|
||||
do_of_table(symval, sym->st_size, mod);
|
||||
else if (sym_is(name, namelen, "pnp"))
|
||||
do_pnp_device_entry(symval, sym->st_size, mod);
|
||||
|
@ -29,9 +29,11 @@ config IMA
|
||||
to learn more about IMA.
|
||||
If unsure, say N.
|
||||
|
||||
if IMA
|
||||
|
||||
config IMA_KEXEC
|
||||
bool "Enable carrying the IMA measurement list across a soft boot"
|
||||
depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
|
||||
depends on TCG_TPM && HAVE_IMA_KEXEC
|
||||
default n
|
||||
help
|
||||
TPM PCRs are only reset on a hard reboot. In order to validate
|
||||
@ -43,7 +45,6 @@ config IMA_KEXEC
|
||||
|
||||
config IMA_MEASURE_PCR_IDX
|
||||
int
|
||||
depends on IMA
|
||||
range 8 14
|
||||
default 10
|
||||
help
|
||||
@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
|
||||
|
||||
config IMA_LSM_RULES
|
||||
bool
|
||||
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
|
||||
depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
|
||||
default y
|
||||
help
|
||||
Disabling this option will disregard LSM based policy rules.
|
||||
@ -61,7 +62,6 @@ config IMA_LSM_RULES
|
||||
choice
|
||||
prompt "Default template"
|
||||
default IMA_NG_TEMPLATE
|
||||
depends on IMA
|
||||
help
|
||||
Select the default IMA measurement template.
|
||||
|
||||
@ -80,14 +80,12 @@ endchoice
|
||||
|
||||
config IMA_DEFAULT_TEMPLATE
|
||||
string
|
||||
depends on IMA
|
||||
default "ima-ng" if IMA_NG_TEMPLATE
|
||||
default "ima-sig" if IMA_SIG_TEMPLATE
|
||||
|
||||
choice
|
||||
prompt "Default integrity hash algorithm"
|
||||
default IMA_DEFAULT_HASH_SHA1
|
||||
depends on IMA
|
||||
help
|
||||
Select the default hash algorithm used for the measurement
|
||||
list, integrity appraisal and audit log. The compiled default
|
||||
@ -113,7 +111,6 @@ endchoice
|
||||
|
||||
config IMA_DEFAULT_HASH
|
||||
string
|
||||
depends on IMA
|
||||
default "sha1" if IMA_DEFAULT_HASH_SHA1
|
||||
default "sha256" if IMA_DEFAULT_HASH_SHA256
|
||||
default "sha512" if IMA_DEFAULT_HASH_SHA512
|
||||
@ -121,7 +118,6 @@ config IMA_DEFAULT_HASH
|
||||
|
||||
config IMA_WRITE_POLICY
|
||||
bool "Enable multiple writes to the IMA policy"
|
||||
depends on IMA
|
||||
default n
|
||||
help
|
||||
IMA policy can now be updated multiple times. The new rules get
|
||||
@ -132,7 +128,6 @@ config IMA_WRITE_POLICY
|
||||
|
||||
config IMA_READ_POLICY
|
||||
bool "Enable reading back the current IMA policy"
|
||||
depends on IMA
|
||||
default y if IMA_WRITE_POLICY
|
||||
default n if !IMA_WRITE_POLICY
|
||||
help
|
||||
@ -142,7 +137,6 @@ config IMA_READ_POLICY
|
||||
|
||||
config IMA_APPRAISE
|
||||
bool "Appraise integrity measurements"
|
||||
depends on IMA
|
||||
default n
|
||||
help
|
||||
This option enables local measurement integrity appraisal.
|
||||
@ -263,7 +257,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
|
||||
config IMA_BLACKLIST_KEYRING
|
||||
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
|
||||
depends on SYSTEM_TRUSTED_KEYRING
|
||||
depends on IMA_TRUSTED_KEYRING
|
||||
depends on INTEGRITY_TRUSTED_KEYRING
|
||||
default n
|
||||
help
|
||||
This option creates an IMA blacklist keyring, which contains all
|
||||
@ -273,7 +267,7 @@ config IMA_BLACKLIST_KEYRING
|
||||
|
||||
config IMA_LOAD_X509
|
||||
bool "Load X509 certificate onto the '.ima' trusted keyring"
|
||||
depends on IMA_TRUSTED_KEYRING
|
||||
depends on INTEGRITY_TRUSTED_KEYRING
|
||||
default n
|
||||
help
|
||||
File signature verification is based on the public keys
|
||||
@ -295,3 +289,5 @@ config IMA_APPRAISE_SIGNED_INIT
|
||||
default n
|
||||
help
|
||||
This option requires user-space init to be signed.
|
||||
|
||||
endif
|
||||
|
@ -270,14 +270,14 @@ clean:
|
||||
$(MAKE) -C bench O=$(OUTPUT) clean
|
||||
|
||||
|
||||
install-lib:
|
||||
install-lib: libcpupower
|
||||
$(INSTALL) -d $(DESTDIR)${libdir}
|
||||
$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
|
||||
$(INSTALL) -d $(DESTDIR)${includedir}
|
||||
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
|
||||
$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
|
||||
|
||||
install-tools:
|
||||
install-tools: $(OUTPUT)cpupower
|
||||
$(INSTALL) -d $(DESTDIR)${bindir}
|
||||
$(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
|
||||
$(INSTALL) -d $(DESTDIR)${bash_completion_dir}
|
||||
@ -293,14 +293,14 @@ install-man:
|
||||
$(INSTALL_DATA) -D man/cpupower-info.1 $(DESTDIR)${mandir}/man1/cpupower-info.1
|
||||
$(INSTALL_DATA) -D man/cpupower-monitor.1 $(DESTDIR)${mandir}/man1/cpupower-monitor.1
|
||||
|
||||
install-gmo:
|
||||
install-gmo: create-gmo
|
||||
$(INSTALL) -d $(DESTDIR)${localedir}
|
||||
for HLANG in $(LANGUAGES); do \
|
||||
echo '$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
|
||||
$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
|
||||
done;
|
||||
|
||||
install-bench:
|
||||
install-bench: compile-bench
|
||||
@#DESTDIR must be set from outside to survive
|
||||
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
|
||||
|
||||
|
@ -27,7 +27,7 @@ $(OUTPUT)cpufreq-bench: $(OBJS)
|
||||
|
||||
all: $(OUTPUT)cpufreq-bench
|
||||
|
||||
install:
|
||||
install: $(OUTPUT)cpufreq-bench
|
||||
mkdir -p $(DESTDIR)/$(sbindir)
|
||||
mkdir -p $(DESTDIR)/$(bindir)
|
||||
mkdir -p $(DESTDIR)/$(docdir)
|
||||
|
Loading…
Reference in New Issue
Block a user