This is the 5.4.200 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKzBvAACgkQONu9yGCS aT7I5A//eBWtBQ7E3MDCDTiZW8RQ+VT0dG87UmqmxJBnafbxuywkvasN3ysJu5Pk xA5k5MM5zbb5YFVlO+BRqjgj4d/CM1sNW5k6/PQgJnlHTYC4oCRFr5FlBLaGT4ER IGIyJciELMJTL02OWIdUFd0yylrDP1tyjpxlmBt3BapY5n4VxqbDQWYHnrZPKAw3 f2wDzMLMwm5M6/W+TihCaJNOp6N/SZRW1j9RquKBiur3CwA2yRpmIIE/LawVtpK3 BDuGYYgmQevi7cV2ZIYp8kb05M9n6WEXFlzy0jq2qLfrsc5+pCiiqtjOTDDzQRFd k/NXRNoRLNmAZn3JPoF6rMyDlV2LPC16tJ+JsBMHB5CMbcv7kdTh9wxdx842UEkR Sck+DRBoErBIIP7uD63fTMTwX7jgC4C7YsZ1abmrY0+ANJ5ribXEKj+XeasDoT5Y kt9IJ3HersxM1e6NEDi3L4z7x9v3LbghUmDzZPHnuSZsb3CKUFwDs3hOf+XxhjWE Hds98MabbBuzqnWR+GIhBhA1C+CiFF3f3sMMK43sY3dhrqs9S3aZMxWdmBK1Us4y D6OK1BCp9oQ5bdstdGtx80HTMU8hUqlu51GMxXolfwcyIKAvLswYzBvGi7Pft2v1 s+z/1kSIAWpeUf14AUir3eCR07QWEyGiLKgn7JrD2p3irWhfBCM= =6xFB -----END PGP SIGNATURE----- Merge 5.4.200 into android11-5.4-lts Changes in 5.4.200 9p: missing chunk of "fs/9p: Don't update file type when updating file attributes" bpf: Fix incorrect memory charge cost calculation in stack_map_alloc() nfc: st21nfca: fix incorrect sizing calculations in EVT_TRANSACTION crypto: blake2s - generic C library implementation and selftest lib/crypto: blake2s: move hmac construction into wireguard lib/crypto: sha1: re-roll loops to reduce code size compat_ioctl: remove /dev/random commands random: don't forget compat_ioctl on urandom random: Don't wake crng_init_wait when crng_init == 1 random: Add a urandom_read_nowait() for random APIs that don't warn random: add GRND_INSECURE to return best-effort non-cryptographic bytes random: ignore GRND_RANDOM in getentropy(2) random: make /dev/random be almost like /dev/urandom random: remove the blocking pool random: delete code to pull data into pools random: remove kernel.random.read_wakeup_threshold random: remove unnecessary unlikely() random: convert to ENTROPY_BITS for better code readability random: Add and use pr_fmt() random: fix typo in add_timer_randomness() random: remove some dead code of poolinfo random: split primary/secondary crng init paths random: avoid warnings for !CONFIG_NUMA builds x86: Remove arch_has_random, arch_has_random_seed powerpc: Remove arch_has_random, arch_has_random_seed s390: Remove arch_has_random, arch_has_random_seed linux/random.h: Remove arch_has_random, arch_has_random_seed linux/random.h: Use false with bool linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check powerpc: Use bool in archrandom.h random: add arch_get_random_*long_early() random: avoid arch_get_random_seed_long() when collecting IRQ randomness random: remove dead code left over from blocking pool MAINTAINERS: co-maintain random.c crypto: blake2s - include <linux/bug.h> instead of <asm/bug.h> crypto: blake2s - adjust include guard naming random: document add_hwgenerator_randomness() with other input functions random: remove unused irq_flags argument from add_interrupt_randomness() random: use BLAKE2s instead of SHA1 in extraction random: do not sign extend bytes for rotation when mixing random: do not re-init if crng_reseed completes before primary init random: mix bootloader randomness into pool random: harmonize "crng init done" messages random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs random: initialize ChaCha20 constants with correct endianness random: early initialization of ChaCha constants random: avoid superfluous call to RDRAND in CRNG extraction random: don't reset crng_init_cnt on urandom_read() random: fix typo in comments random: cleanup poolinfo abstraction random: cleanup integer types random: remove incomplete last_data logic random: remove unused extract_entropy() reserved argument random: rather than entropy_store abstraction, use global random: remove unused OUTPUT_POOL constants random: de-duplicate INPUT_POOL constants random: prepend remaining pool constants with POOL_ random: cleanup fractional entropy shift constants random: access input_pool_data directly rather than through pointer random: selectively clang-format where it makes sense random: simplify arithmetic function flow in account() random: continually use hwgenerator randomness random: access primary_pool directly rather than through pointer random: only call crng_finalize_init() for primary_crng random: use computational hash for entropy extraction random: simplify entropy debiting random: use linear min-entropy accumulation crediting random: always wake up entropy writers after extraction random: make credit_entropy_bits() always safe random: remove use_input_pool parameter from crng_reseed() random: remove batched entropy locking random: fix locking in crng_fast_load() random: use RDSEED instead of RDRAND in entropy extraction random: get rid of secondary crngs random: inline leaves of rand_initialize() random: ensure early RDSEED goes through mixer on init random: do not xor RDRAND when writing into /dev/random random: absorb fast pool into input pool after fast load random: use simpler fast key erasure flow on per-cpu keys random: use hash function for crng_slow_load() random: make more consistent use of integer types random: remove outdated INT_MAX >> 6 check in urandom_read() random: zero buffer after reading entropy from userspace random: fix locking for crng_init in crng_reseed() random: tie batched entropy generation to base_crng generation random: remove ifdef'd out interrupt bench random: remove unused tracepoints random: add proper SPDX header random: deobfuscate irq u32/u64 contributions random: introduce drain_entropy() helper to declutter crng_reseed() random: remove useless header comment random: remove whitespace and reorder includes random: group initialization wait functions random: group crng functions random: group entropy extraction functions random: group entropy collection functions random: group userspace read/write functions random: group sysctl functions random: rewrite header introductory comment random: defer fast pool mixing to worker random: do not take pool spinlock at boot random: unify early init crng load accounting random: check for crng_init == 0 in add_device_randomness() random: pull add_hwgenerator_randomness() declaration into random.h random: clear fast pool, crng, and batches in cpuhp bring up random: round-robin registers as ulong, not u32 random: only wake up writers after zap if threshold was passed random: cleanup UUID handling random: unify cycles_t and jiffies usage and types random: do crng pre-init loading in worker rather than irq random: give sysctl_random_min_urandom_seed a more sensible value random: don't let 644 read-only sysctls be written to random: replace custom notifier chain with standard one random: use SipHash as interrupt entropy accumulator random: make consistent usage of crng_ready() random: reseed more often immediately after booting random: check for signal and try earlier when generating entropy random: skip fast_init if hwrng provides large chunk of entropy random: treat bootloader trust toggle the same way as cpu trust toggle random: re-add removed comment about get_random_{u32,u64} reseeding random: mix build-time latent entropy into pool at init random: do not split fast init input in add_hwgenerator_randomness() random: do not allow user to keep crng key around on stack random: check for signal_pending() outside of need_resched() check random: check for signals every PAGE_SIZE chunk of /dev/[u]random random: allow partial reads if later user copies fail random: make random_get_entropy() return an unsigned long random: document crng_fast_key_erasure() destination possibility random: fix sysctl documentation nits init: call time_init() before rand_initialize() ia64: define get_cycles macro for arch-override s390: define get_cycles macro for arch-override parisc: define get_cycles macro for arch-override alpha: define get_cycles macro for arch-override powerpc: define get_cycles macro for arch-override timekeeping: Add raw clock fallback for random_get_entropy() m68k: use fallback for random_get_entropy() instead of zero mips: use fallback for random_get_entropy() instead of just c0 random arm: use fallback for random_get_entropy() instead of zero nios2: use fallback for random_get_entropy() instead of zero x86/tsc: Use fallback for random_get_entropy() instead of zero um: use fallback for random_get_entropy() instead of zero sparc: use fallback for random_get_entropy() instead of zero xtensa: use fallback for random_get_entropy() instead of zero random: insist on random_get_entropy() existing in order to simplify random: do not use batches when !crng_ready() random: use first 128 bits of input as fast init random: do not pretend to handle premature next security model random: order timer entropy functions below interrupt functions random: do not use input pool from hard IRQs random: help compiler out with fast_mix() by using simpler arguments siphash: use one source of truth for siphash permutations random: use symbolic constants for crng_init states random: avoid initializing twice in credit race random: move initialization out of reseeding hot path random: remove ratelimiting for in-kernel unseeded randomness random: use proper jiffies comparison macro random: handle latent entropy and command line from random_init() random: credit architectural init the exact amount random: use static branch for crng_ready() random: remove extern from functions in header random: use proper return types on get_random_{int,long}_wait() random: make consistent use of buf and len random: move initialization functions out of hot pages random: move randomize_page() into mm where it belongs random: unify batched entropy implementations random: convert to using fops->read_iter() random: convert to using fops->write_iter() random: wire up fops->splice_{read,write}_iter() random: check for signals after page of pool writes Revert "random: use static branch for crng_ready()" crypto: drbg - always seeded with SP800-90B compliant noise source crypto: drbg - prepare for more fine-grained tracking of seeding state crypto: drbg - track whether DRBG was seeded with !rng_is_initialized() crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed() crypto: drbg - always try to free Jitter RNG instance crypto: drbg - make reseeding from get_random_bytes() synchronous random: avoid checking crng_ready() twice in random_init() random: mark bootloader randomness code as __init random: account for arch randomness in bits powerpc/kasan: Silence KASAN warnings in __get_wchan() ASoC: nau8822: Add operation for internal PLL off and on dma-debug: make things less spammy under memory pressure ASoC: cs42l52: Fix TLV scales for mixer controls ASoC: cs35l36: Update digital volume TLV ASoC: cs53l30: Correct number of volume levels on SX controls ASoC: cs42l52: Correct TLV for Bypass Volume ASoC: cs42l56: Correct typo in minimum level for SX volume controls ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo() ASoC: wm8962: Fix suspend while playing music ASoC: es8328: Fix event generation for deemphasis control ASoC: wm_adsp: Fix event generation for wm_adsp_fw_put() scsi: vmw_pvscsi: Expand vcpuHint to 16 bits scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology scsi: lpfc: Allow reduced polling rate for nvme_admin_async_event cmd completion scsi: ipr: Fix missing/incorrect resource cleanup in error case scsi: pmcraid: Fix missing resource cleanup in error case ALSA: hda/realtek - Add HW8326 support virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag random: credit cpu and bootloader seeds by default pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE clocksource: hyper-v: unexport __init-annotated hv_init_clocksource() i40e: Fix adding ADQ filter to TC0 i40e: Fix calculating the number of queue pairs i40e: Fix call trace in setup_tx_descriptors tty: goldfish: Fix free_irq() on remove misc: atmel-ssc: Fix IRQ check in ssc_probe mlxsw: spectrum_cnt: Reorder counter pools net: bgmac: Fix an erroneous kfree() in bgmac_remove() arm64: ftrace: fix branch range checks certs/blacklist_hashes.c: fix const confusion in certs blacklist faddr2line: Fix overlapping text section failures, the sequel irqchip/gic/realview: Fix refcount leak in realview_gic_of_init irqchip/gic-v3: Fix error handling in gic_populate_ppi_partitions irqchip/gic-v3: Fix refcount leak in gic_populate_ppi_partitions i2c: designware: Use standard optional ref clock implementation comedi: vmk80xx: fix expression for tx buffer size USB: serial: option: add support for Cinterion MV31 with new baseline USB: serial: io_ti: add Agilent E5805A support usb: dwc2: Fix memory leak in dwc2_hcd_init usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe serial: 8250: Store to lsr_save_flags after lsr read dm mirror log: round up region bitmap size to BITS_PER_LONG ext4: fix bug_on ext4_mb_use_inode_pa ext4: make variable "count" signed ext4: add reserved GDT blocks check ALSA: hda/realtek: fix mute/micmute LEDs for HP 440 G8 ALSA: hda/realtek: fix right sounds and mute/micmute LEDs for HP machine virtio-pci: Remove wrong address verification in vp_del_vqs() net/sched: act_police: more accurate MTU policing net: openvswitch: fix misuse of the cached connection on tuple changes net: openvswitch: fix leak of nested actions arm64: kprobes: Use BRK instead of single-step when executing instructions out-of-line RISC-V: fix barrier() use in <vdso/processor.h> riscv: Less inefficient gcc tishift helpers (and export their symbols) powerpc/mm: Switch obsolete dssall to .long Linux 5.4.200 Also includes in this merge resolution the following commits from 5.10.y in order to handle merge issues with previous blake2s changes that are in the Android tree: 6048fdcc5f26 ("lib/crypto: blake2s: include as built-in") d2a02e3c8bb6 ("lib/crypto: blake2s: avoid indirect calls to compression function for Clang CFI") e56e18985596 ("lib/crypto: add prompts back to crypto libraries") Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ie836943a404704937d2d6575f0f51e1d02d24e55
This commit is contained in:
commit
b2351c2368
@ -3848,6 +3848,12 @@
|
||||
fully seed the kernel's CRNG. Default is controlled
|
||||
by CONFIG_RANDOM_TRUST_CPU.
|
||||
|
||||
random.trust_bootloader={on,off}
|
||||
[KNL] Enable or disable trusting the use of a
|
||||
seed passed by the bootloader (if available) to
|
||||
fully seed the kernel's CRNG. Default is controlled
|
||||
by CONFIG_RANDOM_TRUST_BOOTLOADER.
|
||||
|
||||
ras=option[,option,...] [KNL] RAS-specific options
|
||||
|
||||
cec_disable [X86]
|
||||
|
@ -862,9 +862,40 @@ The kernel command line parameter printk.devkmsg= overrides this and is
|
||||
a one-time setting until next reboot: once set, it cannot be changed by
|
||||
this sysctl interface anymore.
|
||||
|
||||
pty
|
||||
===
|
||||
|
||||
randomize_va_space:
|
||||
===================
|
||||
See Documentation/filesystems/devpts.rst.
|
||||
|
||||
|
||||
random
|
||||
======
|
||||
|
||||
This is a directory, with the following entries:
|
||||
|
||||
* ``boot_id``: a UUID generated the first time this is retrieved, and
|
||||
unvarying after that;
|
||||
|
||||
* ``uuid``: a UUID generated every time this is retrieved (this can
|
||||
thus be used to generate UUIDs at will);
|
||||
|
||||
* ``entropy_avail``: the pool's entropy count, in bits;
|
||||
|
||||
* ``poolsize``: the entropy pool size, in bits;
|
||||
|
||||
* ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
|
||||
number of seconds between urandom pool reseeding). This file is
|
||||
writable for compatibility purposes, but writing to it has no effect
|
||||
on any RNG behavior;
|
||||
|
||||
* ``write_wakeup_threshold``: when the entropy count drops below this
|
||||
(as a number of bits), processes waiting to write to ``/dev/random``
|
||||
are woken up. This file is writable for compatibility purposes, but
|
||||
writing to it has no effect on any RNG behavior.
|
||||
|
||||
|
||||
randomize_va_space
|
||||
==================
|
||||
|
||||
This option can be used to select the type of process address
|
||||
space randomization that is used in the system, for architectures
|
||||
|
@ -13652,6 +13652,7 @@ F: arch/mips/configs/generic/board-ranchu.config
|
||||
|
||||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 199
|
||||
SUBLEVEL = 200
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
|
||||
__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
|
||||
return ret;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o
|
||||
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
|
||||
@ -49,7 +50,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
|
||||
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
|
||||
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
|
||||
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
|
||||
blake2s-arm-y := blake2s-core.o blake2s-glue.o
|
||||
blake2s-arm-y := blake2s-shash.o
|
||||
libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
|
||||
blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
|
||||
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
|
||||
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
|
||||
|
@ -167,8 +167,8 @@
|
||||
.endm
|
||||
|
||||
//
|
||||
// void blake2s_compress_arch(struct blake2s_state *state,
|
||||
// const u8 *block, size_t nblocks, u32 inc);
|
||||
// void blake2s_compress(struct blake2s_state *state,
|
||||
// const u8 *block, size_t nblocks, u32 inc);
|
||||
//
|
||||
// Only the first three fields of struct blake2s_state are used:
|
||||
// u32 h[8]; (inout)
|
||||
@ -176,7 +176,7 @@
|
||||
// u32 f[2]; (in)
|
||||
//
|
||||
.align 5
|
||||
ENTRY(blake2s_compress_arch)
|
||||
ENTRY(blake2s_compress)
|
||||
push {r0-r2,r4-r11,lr} // keep this an even number
|
||||
|
||||
.Lnext_block:
|
||||
@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch)
|
||||
str r3, [r12], #4
|
||||
bne 1b
|
||||
b .Lcopy_block_done
|
||||
ENDPROC(blake2s_compress_arch)
|
||||
ENDPROC(blake2s_compress)
|
||||
|
@ -1,78 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* BLAKE2s digest algorithm, ARM scalar implementation
|
||||
*
|
||||
* Copyright 2020 Google LLC
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
/* defined in blake2s-core.S */
|
||||
EXPORT_SYMBOL(blake2s_compress_arch);
|
||||
|
||||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_arm, \
|
||||
.final = crypto_blake2s_final_arm, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_arm_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_arm_mod_init(void)
|
||||
{
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_arm_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
|
||||
crypto_unregister_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_arm_mod_init);
|
||||
module_exit(blake2s_arm_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-arm");
|
||||
EXPORT_SYMBOL(blake2s_compress);
|
||||
|
75
arch/arm/crypto/blake2s-shash.c
Normal file
75
arch/arm/crypto/blake2s-shash.c
Normal file
@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* BLAKE2s digest algorithm, ARM scalar implementation
|
||||
*
|
||||
* Copyright 2020 Google LLC
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_arm, \
|
||||
.final = crypto_blake2s_final_arm, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_arm_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_arm_mod_init(void)
|
||||
{
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_arm_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
|
||||
crypto_unregister_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_arm_mod_init);
|
||||
module_exit(blake2s_arm_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-arm");
|
@ -11,5 +11,6 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
|
||||
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,7 @@
|
||||
* #imm16 values used for BRK instruction generation
|
||||
* 0x004: for installing kprobes
|
||||
* 0x005: for installing uprobes
|
||||
* 0x006: for kprobe software single-step
|
||||
* Allowed values for kgdb are 0x400 - 0x7ff
|
||||
* 0x100: for triggering a fault on purpose (reserved)
|
||||
* 0x400: for dynamic BRK instruction
|
||||
@ -19,6 +20,7 @@
|
||||
*/
|
||||
#define KPROBES_BRK_IMM 0x004
|
||||
#define UPROBES_BRK_IMM 0x005
|
||||
#define KPROBES_BRK_SS_IMM 0x006
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
|
@ -53,6 +53,7 @@
|
||||
|
||||
/* kprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
|
||||
#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
|
||||
/* uprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 1
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
@ -69,7 +69,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned long pc = rec->ip;
|
||||
u32 old, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
long offset = (long)addr - (long)pc;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
@ -126,7 +126,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long pc = rec->ip;
|
||||
bool validate = true;
|
||||
u32 old = 0, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
long offset = (long)addr - (long)pc;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
|
@ -36,25 +36,16 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
static void __kprobes
|
||||
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
|
||||
|
||||
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
|
||||
{
|
||||
void *addrs[1];
|
||||
u32 insns[1];
|
||||
|
||||
addrs[0] = addr;
|
||||
insns[0] = opcode;
|
||||
|
||||
return aarch64_insn_patch_text(addrs, insns, 1);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||
{
|
||||
/* prepare insn slot */
|
||||
patch_text(p->ainsn.api.insn, p->opcode);
|
||||
kprobe_opcode_t *addr = p->ainsn.api.insn;
|
||||
void *addrs[] = {addr, addr + 1};
|
||||
u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
|
||||
|
||||
flush_icache_range((uintptr_t) (p->ainsn.api.insn),
|
||||
(uintptr_t) (p->ainsn.api.insn) +
|
||||
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
/* prepare insn slot */
|
||||
aarch64_insn_patch_text(addrs, insns, 2);
|
||||
|
||||
flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
|
||||
|
||||
/*
|
||||
* Needs restoring of return address after stepping xol.
|
||||
@ -134,13 +125,18 @@ void *alloc_insn_page(void)
|
||||
/* arm kprobe: install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, BRK64_OPCODE_KPROBES);
|
||||
void *addr = p->addr;
|
||||
u32 insn = BRK64_OPCODE_KPROBES;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &insn, 1);
|
||||
}
|
||||
|
||||
/* disarm kprobe: remove breakpoint from text */
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, p->opcode);
|
||||
void *addr = p->addr;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &p->opcode, 1);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
@ -169,20 +165,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts need to be disabled before single-step mode is set, and not
|
||||
* reenabled until after single-step mode ends.
|
||||
* Without disabling interrupt on local CPU, there is a chance of
|
||||
* interrupt occurrence in the period of exception return and start of
|
||||
* out-of-line single-step, that result in wrongly single stepping
|
||||
* into the interrupt handler.
|
||||
* Mask all of DAIF while executing the instruction out-of-line, to keep things
|
||||
* simple and avoid nesting exceptions. Interrupts do have to be disabled since
|
||||
* the kprobe state is per-CPU and doesn't get migrated.
|
||||
*/
|
||||
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
|
||||
regs->pstate |= PSR_I_BIT;
|
||||
/* Unmask PSTATE.D for enabling software step exceptions. */
|
||||
regs->pstate &= ~PSR_D_BIT;
|
||||
regs->pstate |= DAIF_MASK;
|
||||
}
|
||||
|
||||
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
@ -225,10 +216,7 @@ static void __kprobes setup_singlestep(struct kprobe *p,
|
||||
slot = (unsigned long)p->ainsn.api.insn;
|
||||
|
||||
set_ss_context(kcb, slot); /* mark pending ss */
|
||||
|
||||
/* IRQs and single stepping do not mix well. */
|
||||
kprobes_save_local_irqflag(kcb, regs);
|
||||
kernel_enable_single_step(regs);
|
||||
instruction_pointer_set(regs, slot);
|
||||
} else {
|
||||
/* insn simulation */
|
||||
@ -279,12 +267,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
|
||||
}
|
||||
/* call post handler */
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
if (cur->post_handler) {
|
||||
/* post_handler can hit breakpoint and single step
|
||||
* again, so we enable D-flag for recursive exception.
|
||||
*/
|
||||
if (cur->post_handler)
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
reset_current_kprobe();
|
||||
}
|
||||
@ -308,8 +292,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
kernel_disable_single_step();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
@ -371,10 +353,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
* pre-handler and it returned non-zero, it will
|
||||
* modify the execution path and no need to single
|
||||
* stepping. Let's just reset current kprobe and exit.
|
||||
*
|
||||
* pre_handler can hit a breakpoint and can step thru
|
||||
* before return, keep PSTATE D-flag enabled until
|
||||
* pre_handler return back.
|
||||
*/
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
setup_singlestep(p, regs, kcb, 0);
|
||||
@ -405,7 +383,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
int retval;
|
||||
@ -415,16 +393,15 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
|
||||
if (retval == DBG_HOOK_HANDLED) {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
kernel_disable_single_step();
|
||||
|
||||
post_kprobe_handler(kcb, regs);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static struct step_hook kprobes_step_hook = {
|
||||
.fn = kprobe_single_step_handler,
|
||||
static struct break_hook kprobes_break_ss_hook = {
|
||||
.imm = KPROBES_BRK_SS_IMM,
|
||||
.fn = kprobe_breakpoint_ss_handler,
|
||||
};
|
||||
|
||||
static int __kprobes
|
||||
@ -568,7 +545,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
register_kernel_break_hook(&kprobes_break_hook);
|
||||
register_kernel_step_hook(&kprobes_step_hook);
|
||||
register_kernel_break_hook(&kprobes_break_ss_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ get_cycles (void)
|
||||
ret = ia64_getreg(_IA64_REG_AR_ITC);
|
||||
return ret;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
extern void ia64_cpu_local_tick (void);
|
||||
extern unsigned long long ia64_native_sched_clock (void);
|
||||
|
@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
if (mach_random_get_entropy)
|
||||
return mach_random_get_entropy();
|
||||
return 0;
|
||||
return random_get_entropy_fallback();
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
|
@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
|
||||
else
|
||||
return 0; /* no usable counter */
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
/*
|
||||
* Like get_cycles - but where c0_count is not available we desperately
|
||||
* use c0_random in an attempt to get at least a little bit of entropy.
|
||||
*
|
||||
* R6000 and R6000A neither have a count register nor a random register.
|
||||
* That leaves no entropy source in the CPU itself.
|
||||
*/
|
||||
static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
unsigned int prid = read_c0_prid();
|
||||
unsigned int imp = prid & PRID_IMP_MASK;
|
||||
unsigned int c0_random;
|
||||
|
||||
if (can_use_mips_counter(prid))
|
||||
if (can_use_mips_counter(read_c0_prid()))
|
||||
return read_c0_count();
|
||||
else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
|
||||
return read_c0_random();
|
||||
|
||||
if (cpu_has_3kex)
|
||||
c0_random = (read_c0_random() >> 8) & 0x3f;
|
||||
else
|
||||
return 0; /* no usable register */
|
||||
c0_random = read_c0_random() & 0x3f;
|
||||
return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
|
@ -8,5 +8,8 @@
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
extern cycles_t get_cycles(void);
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
|
||||
|
||||
#endif
|
||||
|
@ -12,9 +12,10 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline cycles_t get_cycles (void)
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return mfctl(16);
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif
|
||||
|
@ -6,27 +6,28 @@
|
||||
|
||||
#include <asm/machdep.h>
|
||||
|
||||
static inline int arch_get_random_long(unsigned long *v)
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int arch_get_random_int(unsigned int *v)
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int arch_get_random_seed_long(unsigned long *v)
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (ppc_md.get_random_seed)
|
||||
return ppc_md.get_random_seed(v);
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
static inline int arch_get_random_seed_int(unsigned int *v)
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
unsigned long val;
|
||||
int rc;
|
||||
bool rc;
|
||||
|
||||
rc = arch_get_random_seed_long(&val);
|
||||
if (rc)
|
||||
@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int arch_has_random(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int arch_has_random_seed(void)
|
||||
{
|
||||
return !!ppc_md.get_random_seed;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
|
||||
#ifdef CONFIG_PPC_POWERNV
|
||||
|
@ -204,6 +204,7 @@
|
||||
#define PPC_INST_ICBT 0x7c00002c
|
||||
#define PPC_INST_ICSWX 0x7c00032d
|
||||
#define PPC_INST_ICSWEPX 0x7c00076d
|
||||
#define PPC_INST_DSSALL 0x7e00066c
|
||||
#define PPC_INST_ISEL 0x7c00001e
|
||||
#define PPC_INST_ISEL_MASK 0xfc00003e
|
||||
#define PPC_INST_LDARX 0x7c0000a8
|
||||
@ -439,6 +440,7 @@
|
||||
__PPC_RA(a) | __PPC_RB(b))
|
||||
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
|
||||
__PPC_RA(a) | __PPC_RB(b))
|
||||
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
|
||||
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \
|
||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||
___PPC_RB(b) | __PPC_EH(eh))
|
||||
|
@ -22,6 +22,7 @@ static inline cycles_t get_cycles(void)
|
||||
|
||||
return mftb();
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_TIMEX_H */
|
||||
|
@ -129,7 +129,7 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
|
||||
mtspr SPRN_HID0,r4
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
|
||||
|
@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
|
||||
|
||||
/* Stop DST streams */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
|
||||
@ -293,7 +293,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
|
||||
isync
|
||||
|
||||
/* Stop DST streams */
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
|
||||
/* Get the current enable bit of the L3CR into r4 */
|
||||
@ -402,7 +402,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
|
||||
_GLOBAL(__flush_disable_L1)
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
sync
|
||||
|
||||
|
@ -2001,12 +2001,12 @@ static unsigned long __get_wchan(struct task_struct *p)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
sp = *(unsigned long *)sp;
|
||||
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
|
||||
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
|
||||
p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
if (count > 0) {
|
||||
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
|
||||
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif
|
||||
sync
|
||||
|
@ -142,7 +142,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
|
||||
_GLOBAL(swsusp_arch_resume)
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
sync
|
||||
|
||||
|
@ -79,7 +79,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
* context
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile ("dssall");
|
||||
asm volatile (PPC_DSSALL);
|
||||
|
||||
if (new_on_cpu)
|
||||
radix_kvm_prefetch_workaround(next);
|
||||
|
@ -48,7 +48,7 @@ flush_disable_75x:
|
||||
|
||||
/* Stop DST streams */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
|
||||
@ -196,7 +196,7 @@ flush_disable_745x:
|
||||
isync
|
||||
|
||||
/* Stop prefetch streams */
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
|
||||
/* Disable L2 prefetching */
|
||||
|
@ -4,4 +4,8 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
long long __lshrti3(long long a, int b);
|
||||
long long __ashrti3(long long a, int b);
|
||||
long long __ashlti3(long long a, int b);
|
||||
|
||||
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
struct task_struct;
|
||||
struct pt_regs;
|
||||
|
||||
|
@ -4,34 +4,73 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm-generic/export.h>
|
||||
|
||||
ENTRY(__lshrti3)
|
||||
SYM_FUNC_START(__lshrti3)
|
||||
beqz a2, .L1
|
||||
li a5,64
|
||||
sub a5,a5,a2
|
||||
addi sp,sp,-16
|
||||
sext.w a4,a5
|
||||
blez a5, .L2
|
||||
sext.w a2,a2
|
||||
sll a4,a1,a4
|
||||
srl a0,a0,a2
|
||||
srl a1,a1,a2
|
||||
sll a4,a1,a4
|
||||
srl a2,a1,a2
|
||||
or a0,a0,a4
|
||||
sd a1,8(sp)
|
||||
sd a0,0(sp)
|
||||
ld a0,0(sp)
|
||||
ld a1,8(sp)
|
||||
addi sp,sp,16
|
||||
ret
|
||||
mv a1,a2
|
||||
.L1:
|
||||
ret
|
||||
.L2:
|
||||
negw a4,a4
|
||||
srl a1,a1,a4
|
||||
sd a1,0(sp)
|
||||
sd zero,8(sp)
|
||||
ld a0,0(sp)
|
||||
ld a1,8(sp)
|
||||
addi sp,sp,16
|
||||
negw a0,a4
|
||||
li a2,0
|
||||
srl a0,a1,a0
|
||||
mv a1,a2
|
||||
ret
|
||||
ENDPROC(__lshrti3)
|
||||
SYM_FUNC_END(__lshrti3)
|
||||
EXPORT_SYMBOL(__lshrti3)
|
||||
|
||||
SYM_FUNC_START(__ashrti3)
|
||||
beqz a2, .L3
|
||||
li a5,64
|
||||
sub a5,a5,a2
|
||||
sext.w a4,a5
|
||||
blez a5, .L4
|
||||
sext.w a2,a2
|
||||
srl a0,a0,a2
|
||||
sll a4,a1,a4
|
||||
sra a2,a1,a2
|
||||
or a0,a0,a4
|
||||
mv a1,a2
|
||||
.L3:
|
||||
ret
|
||||
.L4:
|
||||
negw a0,a4
|
||||
srai a2,a1,0x3f
|
||||
sra a0,a1,a0
|
||||
mv a1,a2
|
||||
ret
|
||||
SYM_FUNC_END(__ashrti3)
|
||||
EXPORT_SYMBOL(__ashrti3)
|
||||
|
||||
SYM_FUNC_START(__ashlti3)
|
||||
beqz a2, .L5
|
||||
li a5,64
|
||||
sub a5,a5,a2
|
||||
sext.w a4,a5
|
||||
blez a5, .L6
|
||||
sext.w a2,a2
|
||||
sll a1,a1,a2
|
||||
srl a4,a0,a4
|
||||
sll a2,a0,a2
|
||||
or a1,a1,a4
|
||||
mv a0,a2
|
||||
.L5:
|
||||
ret
|
||||
.L6:
|
||||
negw a1,a4
|
||||
li a2,0
|
||||
sll a1,a0,a1
|
||||
mv a0,a2
|
||||
ret
|
||||
SYM_FUNC_END(__ashlti3)
|
||||
EXPORT_SYMBOL(__ashlti3)
|
||||
|
@ -21,18 +21,6 @@ extern atomic64_t s390_arch_random_counter;
|
||||
|
||||
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
|
||||
|
||||
static inline bool arch_has_random(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_has_random_seed(void)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
|
@ -177,6 +177,7 @@ static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return (cycles_t) get_tod_clock() >> 2;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
int get_phys_clock(unsigned long *clock);
|
||||
void init_cpu_timer(void);
|
||||
|
@ -9,8 +9,6 @@
|
||||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
/* XXX Maybe do something better at some point... -DaveM */
|
||||
typedef unsigned long cycles_t;
|
||||
#define get_cycles() (0)
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif
|
||||
|
@ -2,13 +2,8 @@
|
||||
#ifndef __UM_TIMEX_H
|
||||
#define __UM_TIMEX_H
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline cycles_t get_cycles (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CLOCK_TICK_RATE (HZ)
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif
|
||||
|
@ -71,7 +71,9 @@ serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
|
||||
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
|
||||
|
||||
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
|
||||
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||
blake2s-x86_64-y := blake2s-shash.o
|
||||
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o
|
||||
libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||
|
||||
ifeq ($(avx_supported),yes)
|
||||
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
@ -27,9 +26,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
|
||||
|
||||
void blake2s_compress_arch(struct blake2s_state *state,
|
||||
const u8 *block, size_t nblocks,
|
||||
const u32 inc)
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc)
|
||||
{
|
||||
/* SIMD disables preemption, so relax after processing each page. */
|
||||
BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
|
||||
@ -55,49 +53,12 @@ void blake2s_compress_arch(struct blake2s_state *state,
|
||||
block += blocks * BLAKE2S_BLOCK_SIZE;
|
||||
} while (nblocks);
|
||||
}
|
||||
EXPORT_SYMBOL(blake2s_compress_arch);
|
||||
|
||||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_x86, \
|
||||
.final = crypto_blake2s_final_x86, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
EXPORT_SYMBOL(blake2s_compress);
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&blake2s_use_ssse3);
|
||||
if (boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
static_branch_enable(&blake2s_use_ssse3);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
@ -108,26 +69,9 @@ static int __init blake2s_mod_init(void)
|
||||
XFEATURE_MASK_AVX512, NULL))
|
||||
static_branch_enable(&blake2s_use_avx512);
|
||||
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_algs,
|
||||
ARRAY_SIZE(blake2s_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
77
arch/x86/crypto/blake2s-shash.c
Normal file
77
arch/x86/crypto/blake2s-shash.c
Normal file
@ -0,0 +1,77 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_x86, \
|
||||
.final = crypto_blake2s_final_x86, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -73,10 +73,6 @@ static inline bool rdseed_int(unsigned int *v)
|
||||
return ok;
|
||||
}
|
||||
|
||||
/* Conditional execution based on CPU type */
|
||||
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
|
||||
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
|
||||
|
||||
/*
|
||||
* These are the generic interfaces; they must not be declared if the
|
||||
* stubs in <linux/random.h> are to be invoked,
|
||||
@ -86,22 +82,22 @@ static inline bool rdseed_int(unsigned int *v)
|
||||
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return arch_has_random() ? rdrand_long(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return arch_has_random() ? rdrand_int(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_long(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_int(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
|
||||
}
|
||||
|
||||
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
|
||||
|
@ -5,6 +5,15 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_X86_TSC) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_TSC))
|
||||
return random_get_entropy_fallback();
|
||||
return rdtsc();
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
/* Assume we use the PIT time source for the clock tick */
|
||||
#define CLOCK_TICK_RATE PIT_TICK_RATE
|
||||
|
||||
|
@ -22,13 +22,12 @@ extern void disable_TSC(void);
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
#ifndef CONFIG_X86_TSC
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
if (!IS_ENABLED(CONFIG_X86_TSC) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_TSC))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return rdtsc();
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
|
||||
|
@ -82,7 +82,7 @@ __visible void __irq_entry hv_stimer0_vector_handler(struct pt_regs *regs)
|
||||
inc_irq_stat(hyperv_stimer0_count);
|
||||
if (hv_stimer0_handler)
|
||||
hv_stimer0_handler();
|
||||
add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
|
||||
add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
|
||||
ack_APIC_irq();
|
||||
|
||||
exiting_irq();
|
||||
|
@ -29,10 +29,6 @@
|
||||
|
||||
extern unsigned long ccount_freq;
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
#define get_cycles() (0)
|
||||
|
||||
void local_timer_setup(unsigned cpu);
|
||||
|
||||
/*
|
||||
@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
|
||||
xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
|
||||
}
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif /* _XTENSA_TIMEX_H */
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "blacklist.h"
|
||||
|
||||
const char __initdata *const blacklist_hashes[] = {
|
||||
const char __initconst *const blacklist_hashes[] = {
|
||||
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
|
||||
, NULL
|
||||
};
|
||||
|
@ -1874,7 +1874,6 @@ config CRYPTO_STATS
|
||||
config CRYPTO_HASH_INFO
|
||||
bool
|
||||
|
||||
source "lib/crypto/Kconfig"
|
||||
source "drivers/crypto/Kconfig"
|
||||
source "crypto/asymmetric_keys/Kconfig"
|
||||
source "certs/Kconfig"
|
||||
|
@ -15,12 +15,12 @@
|
||||
static int crypto_blake2s_update_generic(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
|
||||
return crypto_blake2s_update(desc, in, inlen, true);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_generic);
|
||||
return crypto_blake2s_final(desc, out, true);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
135
crypto/drbg.c
135
crypto/drbg.c
@ -1035,17 +1035,38 @@ static const struct drbg_state_ops drbg_hash_ops = {
|
||||
******************************************************************/
|
||||
|
||||
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
|
||||
int reseed)
|
||||
int reseed, enum drbg_seed_state new_seed_state)
|
||||
{
|
||||
int ret = drbg->d_ops->update(drbg, seed, reseed);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drbg->seeded = true;
|
||||
drbg->seeded = new_seed_state;
|
||||
/* 10.1.1.2 / 10.1.1.3 step 5 */
|
||||
drbg->reseed_ctr = 1;
|
||||
|
||||
switch (drbg->seeded) {
|
||||
case DRBG_SEED_STATE_UNSEEDED:
|
||||
/* Impossible, but handle it to silence compiler warnings. */
|
||||
fallthrough;
|
||||
case DRBG_SEED_STATE_PARTIAL:
|
||||
/*
|
||||
* Require frequent reseeds until the seed source is
|
||||
* fully initialized.
|
||||
*/
|
||||
drbg->reseed_threshold = 50;
|
||||
break;
|
||||
|
||||
case DRBG_SEED_STATE_FULL:
|
||||
/*
|
||||
* Seed source has become fully initialized, frequent
|
||||
* reseeds no longer required.
|
||||
*/
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1065,12 +1086,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drbg_async_seed(struct work_struct *work)
|
||||
static int drbg_seed_from_random(struct drbg_state *drbg)
|
||||
{
|
||||
struct drbg_string data;
|
||||
LIST_HEAD(seedlist);
|
||||
struct drbg_state *drbg = container_of(work, struct drbg_state,
|
||||
seed_work);
|
||||
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
||||
unsigned char entropy[32];
|
||||
int ret;
|
||||
@ -1081,30 +1100,15 @@ static void drbg_async_seed(struct work_struct *work)
|
||||
drbg_string_fill(&data, entropy, entropylen);
|
||||
list_add_tail(&data.list, &seedlist);
|
||||
|
||||
mutex_lock(&drbg->drbg_mutex);
|
||||
|
||||
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
goto out;
|
||||
|
||||
/* If nonblocking pool is initialized, deactivate Jitter RNG */
|
||||
crypto_free_rng(drbg->jent);
|
||||
drbg->jent = NULL;
|
||||
|
||||
/* Set seeded to false so that if __drbg_seed fails the
|
||||
* next generate call will trigger a reseed.
|
||||
*/
|
||||
drbg->seeded = false;
|
||||
|
||||
__drbg_seed(drbg, &seedlist, true);
|
||||
|
||||
if (drbg->seeded)
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&drbg->drbg_mutex);
|
||||
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
|
||||
|
||||
out:
|
||||
memzero_explicit(entropy, entropylen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1126,6 +1130,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
||||
struct drbg_string data1;
|
||||
LIST_HEAD(seedlist);
|
||||
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
|
||||
|
||||
/* 9.1 / 9.2 / 9.3.1 step 3 */
|
||||
if (pers && pers->len > (drbg_max_addtl(drbg))) {
|
||||
@ -1153,6 +1158,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
BUG_ON((entropylen * 2) > sizeof(entropy));
|
||||
|
||||
/* Get seed from in-kernel /dev/urandom */
|
||||
if (!rng_is_initialized())
|
||||
new_seed_state = DRBG_SEED_STATE_PARTIAL;
|
||||
|
||||
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1168,7 +1176,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
entropylen);
|
||||
if (ret) {
|
||||
pr_devel("DRBG: jent failed with %d\n", ret);
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Do not treat the transient failure of the
|
||||
* Jitter RNG as an error that needs to be
|
||||
* reported. The combined number of the
|
||||
* maximum reseed threshold times the maximum
|
||||
* number of Jitter RNG transient errors is
|
||||
* less than the reseed threshold required by
|
||||
* SP800-90A allowing us to treat the
|
||||
* transient errors as such.
|
||||
*
|
||||
* However, we mandate that at least the first
|
||||
* seeding operation must succeed with the
|
||||
* Jitter RNG.
|
||||
*/
|
||||
if (!reseed || ret != -EAGAIN)
|
||||
goto out;
|
||||
}
|
||||
|
||||
drbg_string_fill(&data1, entropy, entropylen * 2);
|
||||
@ -1193,7 +1217,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
memset(drbg->C, 0, drbg_statelen(drbg));
|
||||
}
|
||||
|
||||
ret = __drbg_seed(drbg, &seedlist, reseed);
|
||||
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
|
||||
|
||||
out:
|
||||
memzero_explicit(entropy, entropylen * 2);
|
||||
@ -1373,19 +1397,25 @@ static int drbg_generate(struct drbg_state *drbg,
|
||||
* here. The spec is a bit convoluted here, we make it simpler.
|
||||
*/
|
||||
if (drbg->reseed_threshold < drbg->reseed_ctr)
|
||||
drbg->seeded = false;
|
||||
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
|
||||
|
||||
if (drbg->pr || !drbg->seeded) {
|
||||
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
|
||||
pr_devel("DRBG: reseeding before generation (prediction "
|
||||
"resistance: %s, state %s)\n",
|
||||
drbg->pr ? "true" : "false",
|
||||
drbg->seeded ? "seeded" : "unseeded");
|
||||
(drbg->seeded == DRBG_SEED_STATE_FULL ?
|
||||
"seeded" : "unseeded"));
|
||||
/* 9.3.1 steps 7.1 through 7.3 */
|
||||
len = drbg_seed(drbg, addtl, true);
|
||||
if (len)
|
||||
goto err;
|
||||
/* 9.3.1 step 7.4 */
|
||||
addtl = NULL;
|
||||
} else if (rng_is_initialized() &&
|
||||
drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
|
||||
len = drbg_seed_from_random(drbg);
|
||||
if (len)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (addtl && 0 < addtl->len)
|
||||
@ -1478,51 +1508,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
|
||||
{
|
||||
struct drbg_state *drbg = container_of(rdy, struct drbg_state,
|
||||
random_ready);
|
||||
|
||||
schedule_work(&drbg->seed_work);
|
||||
}
|
||||
|
||||
static int drbg_prepare_hrng(struct drbg_state *drbg)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* We do not need an HRNG in test mode. */
|
||||
if (list_empty(&drbg->test_data.list))
|
||||
return 0;
|
||||
|
||||
INIT_WORK(&drbg->seed_work, drbg_async_seed);
|
||||
|
||||
drbg->random_ready.owner = THIS_MODULE;
|
||||
drbg->random_ready.func = drbg_schedule_async_seed;
|
||||
|
||||
err = add_random_ready_callback(&drbg->random_ready);
|
||||
|
||||
switch (err) {
|
||||
case 0:
|
||||
break;
|
||||
|
||||
case -EALREADY:
|
||||
err = 0;
|
||||
/* fall through */
|
||||
|
||||
default:
|
||||
drbg->random_ready.func = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
|
||||
|
||||
/*
|
||||
* Require frequent reseeds until the seed source is fully
|
||||
* initialized.
|
||||
*/
|
||||
drbg->reseed_threshold = 50;
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1565,7 +1559,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
if (!drbg->core) {
|
||||
drbg->core = &drbg_cores[coreref];
|
||||
drbg->pr = pr;
|
||||
drbg->seeded = false;
|
||||
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
|
||||
ret = drbg_alloc_state(drbg);
|
||||
@ -1616,12 +1610,9 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
*/
|
||||
static int drbg_uninstantiate(struct drbg_state *drbg)
|
||||
{
|
||||
if (drbg->random_ready.func) {
|
||||
del_random_ready_callback(&drbg->random_ready);
|
||||
cancel_work_sync(&drbg->seed_work);
|
||||
if (!IS_ERR_OR_NULL(drbg->jent))
|
||||
crypto_free_rng(drbg->jent);
|
||||
drbg->jent = NULL;
|
||||
}
|
||||
drbg->jent = NULL;
|
||||
|
||||
if (drbg->d_ops)
|
||||
drbg->d_ops->crypto_fini(drbg);
|
||||
|
@ -6235,7 +6235,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
const struct ata_port_info * const * ppi,
|
||||
int n_ports)
|
||||
{
|
||||
const struct ata_port_info *pi;
|
||||
const struct ata_port_info *pi = &ata_dummy_port_info;
|
||||
struct ata_host *host;
|
||||
int i, j;
|
||||
|
||||
@ -6243,7 +6243,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
if (!host)
|
||||
return NULL;
|
||||
|
||||
for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
|
||||
for (i = 0, j = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ppi[j])
|
||||
|
@ -535,28 +535,41 @@ config ADI
|
||||
and SSM (Silicon Secured Memory). Intended consumers of this
|
||||
driver include crash and makedumpfile.
|
||||
|
||||
endmenu
|
||||
|
||||
config RANDOM_TRUST_CPU
|
||||
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
|
||||
depends on X86 || S390 || PPC
|
||||
default n
|
||||
bool "Initialize RNG using CPU RNG instructions"
|
||||
default y
|
||||
depends on ARCH_RANDOM
|
||||
help
|
||||
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
|
||||
RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
|
||||
for the purposes of initializing Linux's CRNG. Since this is not
|
||||
something that can be independently audited, this amounts to trusting
|
||||
that CPU manufacturer (perhaps with the insistence or mandate
|
||||
of a Nation State's intelligence or law enforcement agencies)
|
||||
has not installed a hidden back door to compromise the CPU's
|
||||
random number generation facilities. This can also be configured
|
||||
at boot with "random.trust_cpu=on/off".
|
||||
Initialize the RNG using random numbers supplied by the CPU's
|
||||
RNG instructions (e.g. RDRAND), if supported and available. These
|
||||
random numbers are never used directly, but are rather hashed into
|
||||
the main input pool, and this happens regardless of whether or not
|
||||
this option is enabled. Instead, this option controls whether the
|
||||
they are credited and hence can initialize the RNG. Additionally,
|
||||
other sources of randomness are always used, regardless of this
|
||||
setting. Enabling this implies trusting that the CPU can supply high
|
||||
quality and non-backdoored random numbers.
|
||||
|
||||
Say Y here unless you have reason to mistrust your CPU or believe
|
||||
its RNG facilities may be faulty. This may also be configured at
|
||||
boot time with "random.trust_cpu=on/off".
|
||||
|
||||
config RANDOM_TRUST_BOOTLOADER
|
||||
bool "Trust the bootloader to initialize Linux's CRNG"
|
||||
bool "Initialize RNG using bootloader-supplied seed"
|
||||
default y
|
||||
help
|
||||
Some bootloaders can provide entropy to increase the kernel's initial
|
||||
device randomness. Say Y here to assume the entropy provided by the
|
||||
booloader is trustworthy so it will be added to the kernel's entropy
|
||||
pool. Otherwise, say N here so it will be regarded as device input that
|
||||
only mixes the entropy pool.
|
||||
Initialize the RNG using a seed supplied by the bootloader or boot
|
||||
environment (e.g. EFI or a bootloader-generated device tree). This
|
||||
seed is not used directly, but is rather hashed into the main input
|
||||
pool, and this happens regardless of whether or not this option is
|
||||
enabled. Instead, this option controls whether the seed is credited
|
||||
and hence can initialize the RNG. Additionally, other sources of
|
||||
randomness are always used, regardless of this setting. Enabling
|
||||
this implies trusting that the bootloader can supply high quality and
|
||||
non-backdoored seeds.
|
||||
|
||||
Say Y here unless you have reason to mistrust your bootloader or
|
||||
believe its RNG facilities may be faulty. This may also be configured
|
||||
at boot time with "random.trust_bootloader=on/off".
|
||||
|
||||
endmenu
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -325,4 +325,3 @@ void __init hv_init_clocksource(void)
|
||||
hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
|
||||
hv_setup_sched_clock(read_hv_sched_clock_msr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_init_clocksource);
|
||||
|
@ -1303,7 +1303,7 @@ static void vmbus_isr(void)
|
||||
tasklet_schedule(&hv_cpu->msg_dpc);
|
||||
}
|
||||
|
||||
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
|
||||
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -253,9 +253,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(dev->clk))
|
||||
return PTR_ERR(dev->clk);
|
||||
|
||||
if (prepare) {
|
||||
/* Optional interface clock */
|
||||
ret = clk_prepare_enable(dev->pclk);
|
||||
|
@ -349,8 +349,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
||||
goto exit_reset;
|
||||
}
|
||||
|
||||
dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (!i2c_dw_prepare_clk(dev, true)) {
|
||||
dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
|
||||
if (IS_ERR(dev->clk)) {
|
||||
ret = PTR_ERR(dev->clk);
|
||||
goto exit_reset;
|
||||
}
|
||||
|
||||
ret = i2c_dw_prepare_clk(dev, true);
|
||||
if (ret)
|
||||
goto exit_reset;
|
||||
|
||||
if (dev->clk) {
|
||||
u64 clk_khz;
|
||||
|
||||
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
|
||||
|
@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
|
||||
/* The PB11MPCore GIC needs to be configured in the syscon */
|
||||
map = syscon_node_to_regmap(np);
|
||||
of_node_put(np);
|
||||
if (!IS_ERR(map)) {
|
||||
/* new irq mode with no DCC */
|
||||
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
|
||||
|
@ -1621,7 +1621,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
||||
|
||||
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
|
||||
if (!gic_data.ppi_descs)
|
||||
return;
|
||||
goto out_put_node;
|
||||
|
||||
nr_parts = of_get_child_count(parts_node);
|
||||
|
||||
@ -1662,12 +1662,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
||||
continue;
|
||||
|
||||
cpu = of_cpu_node_to_id(cpu_node);
|
||||
if (WARN_ON(cpu < 0))
|
||||
if (WARN_ON(cpu < 0)) {
|
||||
of_node_put(cpu_node);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_cont("%pOF[%d] ", cpu_node, cpu);
|
||||
|
||||
cpumask_set_cpu(cpu, &part->mask);
|
||||
of_node_put(cpu_node);
|
||||
}
|
||||
|
||||
pr_cont("}\n");
|
||||
|
@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
/*
|
||||
* Work out how many "unsigned long"s we need to hold the bitset.
|
||||
*/
|
||||
bitset_size = dm_round_up(region_count,
|
||||
sizeof(*lc->clean_bits) << BYTE_SHIFT);
|
||||
bitset_size = dm_round_up(region_count, BITS_PER_LONG);
|
||||
bitset_size >>= BYTE_SHIFT;
|
||||
|
||||
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
|
||||
|
@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev)
|
||||
clk_disable_unprepare(ssc->clk);
|
||||
|
||||
ssc->irq = platform_get_irq(pdev, 0);
|
||||
if (!ssc->irq) {
|
||||
if (ssc->irq < 0) {
|
||||
dev_dbg(&pdev->dev, "could not get irq\n");
|
||||
return -ENXIO;
|
||||
return ssc->irq;
|
||||
}
|
||||
|
||||
mutex_lock(&user_lock);
|
||||
|
@ -323,7 +323,6 @@ static void bgmac_remove(struct bcma_device *core)
|
||||
bcma_mdio_mii_unregister(bgmac->mii_bus);
|
||||
bgmac_enet_remove(bgmac);
|
||||
bcma_set_drvdata(core, NULL);
|
||||
kfree(bgmac);
|
||||
}
|
||||
|
||||
static struct bcma_driver bgmac_bcma_driver = {
|
||||
|
@ -2578,15 +2578,16 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
|
||||
set_bit(__I40E_TESTING, pf->state);
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Cannot start offline testing when PF is in reset state.\n");
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
@ -2633,9 +2634,17 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
data[I40E_ETH_TEST_INTR] = 0;
|
||||
}
|
||||
|
||||
skip_ol_tests:
|
||||
|
||||
netif_info(pf, drv, netdev, "testing finished\n");
|
||||
return;
|
||||
|
||||
skip_ol_tests:
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
netif_info(pf, drv, netdev, "testing failed\n");
|
||||
}
|
||||
|
||||
static void i40e_get_wol(struct net_device *netdev,
|
||||
|
@ -8108,6 +8108,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!tc) {
|
||||
dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
|
||||
return -EBUSY;
|
||||
|
@ -2149,7 +2149,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
if (vf->adq_enabled) {
|
||||
for (i = 0; i < I40E_MAX_VF_VSI; i++)
|
||||
for (i = 0; i < vf->num_tc; i++)
|
||||
num_qps_all += vf->ch[i].num_qps;
|
||||
if (num_qps_all != qci->num_queue_pairs) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
|
@ -802,6 +802,17 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
|
||||
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
|
||||
}
|
||||
|
||||
static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
|
||||
unsigned long data;
|
||||
|
||||
data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
|
||||
get_order(size));
|
||||
|
||||
return (void *)data;
|
||||
}
|
||||
|
||||
/* the qdma core needs scratch memory to be setup */
|
||||
static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
@ -1299,7 +1310,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
|
||||
goto release_desc;
|
||||
|
||||
/* alloc new buffer */
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
else
|
||||
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
|
||||
if (unlikely(!new_data)) {
|
||||
netdev->stats.rx_dropped++;
|
||||
goto release_desc;
|
||||
@ -1696,7 +1710,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
else
|
||||
ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
|
||||
if (!ring->data[i])
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -7,8 +7,8 @@
|
||||
#include "spectrum.h"
|
||||
|
||||
enum mlxsw_sp_counter_sub_pool_id {
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_RIF,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
};
|
||||
|
||||
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -401,13 +401,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
|
||||
int err;
|
||||
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
usb_anchor_urb(urb, &drv_data->tx_anchor);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
drv_data->tx_in_flight++;
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
/* Cleanup the rest deferred urbs. */
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
usb_scuttle_anchored_urbs(&drv_data->deferred);
|
||||
}
|
||||
|
||||
static int nfcmrvl_resume(struct usb_interface *intf)
|
||||
|
@ -304,6 +304,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
int r = 0;
|
||||
struct device *dev = &hdev->ndev->dev;
|
||||
struct nfc_evt_transaction *transaction;
|
||||
u32 aid_len;
|
||||
u8 params_len;
|
||||
|
||||
pr_debug("connectivity gate event: %x\n", event);
|
||||
|
||||
@ -312,51 +314,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
r = nfc_se_connectivity(hdev->ndev, host);
|
||||
break;
|
||||
case ST21NFCA_EVT_TRANSACTION:
|
||||
/*
|
||||
* According to specification etsi 102 622
|
||||
/* According to specification etsi 102 622
|
||||
* 11.2.2.4 EVT_TRANSACTION Table 52
|
||||
* Description Tag Length
|
||||
* AID 81 5 to 16
|
||||
* PARAMETERS 82 0 to 255
|
||||
*
|
||||
* The key differences are aid storage length is variably sized
|
||||
* in the packet, but fixed in nfc_evt_transaction, and that the aid_len
|
||||
* is u8 in the packet, but u32 in the structure, and the tags in
|
||||
* the packet are not included in nfc_evt_transaction.
|
||||
*
|
||||
* size in bytes: 1 1 5-16 1 1 0-255
|
||||
* offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
|
||||
* member name: aid_tag(M) aid_len aid params_tag(M) params_len params
|
||||
* example: 0x81 5-16 X 0x82 0-255 X
|
||||
*/
|
||||
if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
|
||||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
return -EPROTO;
|
||||
|
||||
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
|
||||
skb->len - 2, GFP_KERNEL);
|
||||
aid_len = skb->data[1];
|
||||
|
||||
if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
|
||||
return -EPROTO;
|
||||
|
||||
params_len = skb->data[aid_len + 3];
|
||||
|
||||
/* Verify PARAMETERS tag is (82), and final check that there is enough
|
||||
* space in the packet to read everything.
|
||||
*/
|
||||
if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
|
||||
(skb->len < aid_len + 4 + params_len))
|
||||
return -EPROTO;
|
||||
|
||||
transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
|
||||
if (!transaction)
|
||||
return -ENOMEM;
|
||||
|
||||
transaction->aid_len = skb->data[1];
|
||||
transaction->aid_len = aid_len;
|
||||
transaction->params_len = params_len;
|
||||
|
||||
/* Checking if the length of the AID is valid */
|
||||
if (transaction->aid_len > sizeof(transaction->aid)) {
|
||||
devm_kfree(dev, transaction);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(transaction->aid, &skb->data[2],
|
||||
transaction->aid_len);
|
||||
|
||||
/* Check next byte is PARAMETERS tag (82) */
|
||||
if (skb->data[transaction->aid_len + 2] !=
|
||||
NFC_EVT_TRANSACTION_PARAMS_TAG) {
|
||||
devm_kfree(dev, transaction);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
transaction->params_len = skb->data[transaction->aid_len + 3];
|
||||
|
||||
/* Total size is allocated (skb->len - 2) minus fixed array members */
|
||||
if (transaction->params_len > ((skb->len - 2) -
|
||||
sizeof(struct nfc_evt_transaction))) {
|
||||
devm_kfree(dev, transaction);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(transaction->params, skb->data +
|
||||
transaction->aid_len + 4, transaction->params_len);
|
||||
memcpy(transaction->aid, &skb->data[2], aid_len);
|
||||
memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
|
||||
|
||||
r = nfc_se_transaction(hdev->ndev, host, transaction);
|
||||
break;
|
||||
|
@ -9772,7 +9772,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ioa_cfg->hrrq[i].host_rrq) {
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(u32) * ioa_cfg->hrrq[i].size,
|
||||
ioa_cfg->hrrq[i].host_rrq,
|
||||
@ -10045,7 +10045,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
|
||||
ioa_cfg->vectors_info[i].desc,
|
||||
&ioa_cfg->hrrq[i]);
|
||||
if (rc) {
|
||||
while (--i >= 0)
|
||||
while (--i > 0)
|
||||
free_irq(pci_irq_vector(pdev, i),
|
||||
&ioa_cfg->hrrq[i]);
|
||||
return rc;
|
||||
|
@ -4249,6 +4249,9 @@ struct wqe_common {
|
||||
#define wqe_sup_SHIFT 6
|
||||
#define wqe_sup_MASK 0x00000001
|
||||
#define wqe_sup_WORD word11
|
||||
#define wqe_ffrq_SHIFT 6
|
||||
#define wqe_ffrq_MASK 0x00000001
|
||||
#define wqe_ffrq_WORD word11
|
||||
#define wqe_wqec_SHIFT 7
|
||||
#define wqe_wqec_MASK 0x00000001
|
||||
#define wqe_wqec_WORD word11
|
||||
|
@ -842,7 +842,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
else
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
||||
if (ndlp->nlp_DID == Fabric_DID) {
|
||||
if (vport->port_state <= LPFC_FDISC)
|
||||
if (vport->port_state <= LPFC_FDISC ||
|
||||
vport->fc_flag & FC_PT2PT)
|
||||
goto out;
|
||||
lpfc_linkdown_port(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
|
@ -1202,7 +1202,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
|
||||
struct nvme_common_command *sqe;
|
||||
struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
|
||||
union lpfc_wqe128 *wqe = &pwqeq->wqe;
|
||||
uint32_t req_len;
|
||||
|
||||
@ -1258,8 +1259,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
cstat->control_requests++;
|
||||
}
|
||||
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
|
||||
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
|
||||
sqe = &((struct nvme_fc_cmd_iu *)
|
||||
nCmd->cmdaddr)->sqe.common;
|
||||
if (sqe->opcode == nvme_admin_async_event)
|
||||
bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
* of the nvme_cmnd request_buffer
|
||||
|
@ -4532,7 +4532,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
|
||||
return 0;
|
||||
|
||||
out_unwind:
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
|
||||
pci_free_irq_vectors(pdev);
|
||||
return rc;
|
||||
|
@ -333,8 +333,8 @@ struct PVSCSIRingReqDesc {
|
||||
u8 tag;
|
||||
u8 bus;
|
||||
u8 target;
|
||||
u8 vcpuHint;
|
||||
u8 unused[59];
|
||||
u16 vcpuHint;
|
||||
u8 unused[58];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
@ -685,7 +685,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
|
||||
if (!devpriv->usb_rx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
|
||||
size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
|
||||
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
|
||||
if (!devpriv->usb_tx_buf)
|
||||
return -ENOMEM;
|
||||
|
@ -428,7 +428,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
|
||||
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
|
||||
iounmap(qtty->base);
|
||||
qtty->base = NULL;
|
||||
free_irq(qtty->irq, pdev);
|
||||
free_irq(qtty->irq, qtty);
|
||||
tty_port_destroy(&qtty->port);
|
||||
goldfish_tty_current_line_count--;
|
||||
if (goldfish_tty_current_line_count == 0)
|
||||
|
@ -1476,6 +1476,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
|
||||
|
||||
if (em485) {
|
||||
unsigned char lsr = serial_in(p, UART_LSR);
|
||||
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
|
||||
|
||||
/*
|
||||
* To provide required timeing and allow FIFO transfer,
|
||||
* __stop_tx_rs485() must be called only when both FIFO and
|
||||
|
@ -5076,7 +5076,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
retval = -EINVAL;
|
||||
goto error1;
|
||||
goto error2;
|
||||
}
|
||||
hcd->rsrc_start = res->start;
|
||||
hcd->rsrc_len = resource_size(res);
|
||||
|
@ -3027,6 +3027,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
|
||||
of_node_put(isp1301_node);
|
||||
if (!udc->isp1301_i2c_client) {
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
@ -168,6 +168,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
|
||||
{ }
|
||||
};
|
||||
|
||||
@ -206,6 +207,7 @@ static const struct usb_device_id id_table_combined[] = {
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -212,6 +212,7 @@
|
||||
//
|
||||
// Definitions for other product IDs
|
||||
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
|
||||
#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
|
||||
|
||||
|
||||
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
|
||||
|
@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define CINTERION_PRODUCT_CLS8 0x00b0
|
||||
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
|
||||
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
|
||||
#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
|
||||
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
|
||||
#define CINTERION_PRODUCT_MV32_WA 0x00f1
|
||||
#define CINTERION_PRODUCT_MV32_WB 0x00f2
|
||||
|
||||
@ -1979,6 +1981,10 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
|
||||
.driver_info = RSVD(0)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
|
||||
.driver_info = RSVD(0)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
|
||||
|
@ -669,6 +669,7 @@ static int vm_cmdline_set(const char *device,
|
||||
if (!vm_cmdline_parent_registered) {
|
||||
err = device_register(&vm_cmdline_parent);
|
||||
if (err) {
|
||||
put_device(&vm_cmdline_parent);
|
||||
pr_err("Failed to register parent device!\n");
|
||||
return err;
|
||||
}
|
||||
|
@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
|
||||
|
||||
if (vp_dev->msix_affinity_masks) {
|
||||
for (i = 0; i < vp_dev->msix_vectors; i++)
|
||||
if (vp_dev->msix_affinity_masks[i])
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
}
|
||||
|
||||
if (vp_dev->msix_enabled) {
|
||||
|
@ -641,14 +641,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
|
||||
if (stat->st_result_mask & P9_STATS_NLINK)
|
||||
set_nlink(inode, stat->st_nlink);
|
||||
if (stat->st_result_mask & P9_STATS_MODE) {
|
||||
inode->i_mode = stat->st_mode;
|
||||
if ((S_ISBLK(inode->i_mode)) ||
|
||||
(S_ISCHR(inode->i_mode)))
|
||||
init_special_inode(inode, inode->i_mode,
|
||||
inode->i_rdev);
|
||||
mode = stat->st_mode & S_IALLUGO;
|
||||
mode |= inode->i_mode & ~S_IALLUGO;
|
||||
inode->i_mode = mode;
|
||||
}
|
||||
if (stat->st_result_mask & P9_STATS_RDEV)
|
||||
inode->i_rdev = new_decode_dev(stat->st_rdev);
|
||||
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
|
||||
stat->st_result_mask & P9_STATS_SIZE)
|
||||
v9fs_i_size_write(inode, stat->st_size);
|
||||
|
@ -808,13 +808,6 @@ COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
|
||||
COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
|
||||
COMPATIBLE_IOCTL(WDIOC_SETPRETIMEOUT)
|
||||
COMPATIBLE_IOCTL(WDIOC_GETPRETIMEOUT)
|
||||
/* Big R */
|
||||
COMPATIBLE_IOCTL(RNDGETENTCNT)
|
||||
COMPATIBLE_IOCTL(RNDADDTOENTCNT)
|
||||
COMPATIBLE_IOCTL(RNDGETPOOL)
|
||||
COMPATIBLE_IOCTL(RNDADDENTROPY)
|
||||
COMPATIBLE_IOCTL(RNDZAPENTCNT)
|
||||
COMPATIBLE_IOCTL(RNDCLEARPOOL)
|
||||
/* Bluetooth */
|
||||
COMPATIBLE_IOCTL(HCIDEVUP)
|
||||
COMPATIBLE_IOCTL(HCIDEVDOWN)
|
||||
|
@ -3172,6 +3172,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||
size = size >> bsbits;
|
||||
start = start_off >> bsbits;
|
||||
|
||||
/*
|
||||
* For tiny groups (smaller than 8MB) the chosen allocation
|
||||
* alignment may be larger than group size. Make sure the
|
||||
* alignment does not move allocation to a different group which
|
||||
* makes mballoc fail assertions later.
|
||||
*/
|
||||
start = max(start, rounddown(ac->ac_o_ex.fe_logical,
|
||||
(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
|
||||
|
||||
/* don't cover already allocated blocks in selected range */
|
||||
if (ar->pleft && start <= ar->lleft) {
|
||||
size -= ar->lleft + 1 - start;
|
||||
|
@ -1916,7 +1916,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
||||
struct dx_hash_info *hinfo, ext4_lblk_t *newblock)
|
||||
{
|
||||
unsigned blocksize = dir->i_sb->s_blocksize;
|
||||
unsigned count, continued;
|
||||
unsigned continued;
|
||||
int count;
|
||||
struct buffer_head *bh2;
|
||||
u32 hash2;
|
||||
struct dx_map_entry *map;
|
||||
|
@ -52,6 +52,16 @@ int ext4_resize_begin(struct super_block *sb)
|
||||
if (!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* If the reserved GDT blocks is non-zero, the resize_inode feature
|
||||
* should always be set.
|
||||
*/
|
||||
if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
|
||||
!ext4_has_feature_resize_inode(sb)) {
|
||||
ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are not using the primary superblock/GDT copy don't resize,
|
||||
* because the user tools have no way of handling this. Probably a
|
||||
|
@ -2111,6 +2111,12 @@ pnfs_update_layout(struct inode *ino,
|
||||
case -ERECALLCONFLICT:
|
||||
case -EAGAIN:
|
||||
break;
|
||||
case -ENODATA:
|
||||
/* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
|
||||
pnfs_layout_set_fail_bit(
|
||||
lo, pnfs_iomode_to_fail_bit(iomode));
|
||||
lseg = NULL;
|
||||
goto out_put_layout_hdr;
|
||||
default:
|
||||
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
|
||||
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
||||
|
@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
|
||||
blake2s_final(&state, out);
|
||||
}
|
||||
|
||||
void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
|
||||
const size_t keylen);
|
||||
|
||||
#endif /* _CRYPTO_BLAKE2S_H */
|
||||
|
@ -51,4 +51,19 @@ int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
int crypto_chacha_crypt(struct skcipher_request *req);
|
||||
int crypto_xchacha_crypt(struct skcipher_request *req);
|
||||
|
||||
enum chacha_constants { /* expand 32-byte k */
|
||||
CHACHA_CONSTANT_EXPA = 0x61707865U,
|
||||
CHACHA_CONSTANT_ND_3 = 0x3320646eU,
|
||||
CHACHA_CONSTANT_2_BY = 0x79622d32U,
|
||||
CHACHA_CONSTANT_TE_K = 0x6b206574U
|
||||
};
|
||||
|
||||
static inline void chacha_init_consts(u32 *state)
|
||||
{
|
||||
state[0] = CHACHA_CONSTANT_EXPA;
|
||||
state[1] = CHACHA_CONSTANT_ND_3;
|
||||
state[2] = CHACHA_CONSTANT_2_BY;
|
||||
state[3] = CHACHA_CONSTANT_TE_K;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_CHACHA_H */
|
||||
|
@ -105,6 +105,12 @@ struct drbg_test_data {
|
||||
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
|
||||
};
|
||||
|
||||
enum drbg_seed_state {
|
||||
DRBG_SEED_STATE_UNSEEDED,
|
||||
DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
|
||||
DRBG_SEED_STATE_FULL,
|
||||
};
|
||||
|
||||
struct drbg_state {
|
||||
struct mutex drbg_mutex; /* lock around DRBG */
|
||||
unsigned char *V; /* internal state 10.1.1.1 1a) */
|
||||
@ -127,16 +133,14 @@ struct drbg_state {
|
||||
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
|
||||
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
|
||||
|
||||
bool seeded; /* DRBG fully seeded? */
|
||||
enum drbg_seed_state seeded; /* DRBG fully seeded? */
|
||||
bool pr; /* Prediction resistance enabled? */
|
||||
bool fips_primed; /* Continuous test primed? */
|
||||
unsigned char *prev; /* FIPS 140-2 continuous test value */
|
||||
struct work_struct seed_work; /* asynchronous seeding support */
|
||||
struct crypto_rng *jent;
|
||||
const struct drbg_state_ops *d_ops;
|
||||
const struct drbg_core *core;
|
||||
struct drbg_string test_data;
|
||||
struct random_ready_callback random_ready;
|
||||
};
|
||||
|
||||
static inline __u8 drbg_statelen(struct drbg_state *drbg)
|
||||
@ -184,11 +188,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg)
|
||||
static inline size_t drbg_max_requests(struct drbg_state *drbg)
|
||||
{
|
||||
/* SP800-90A requires 2**48 maximum requests before reseeding */
|
||||
#if (__BITS_PER_LONG == 32)
|
||||
return SIZE_MAX;
|
||||
#else
|
||||
return (1UL<<48);
|
||||
#endif
|
||||
return (1<<20);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11,11 +11,11 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
|
||||
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
bool blake2s_selftest(void);
|
||||
|
||||
@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
|
||||
state->f[0] = -1;
|
||||
}
|
||||
|
||||
typedef void (*blake2s_compress_t)(struct blake2s_state *state,
|
||||
const u8 *block, size_t nblocks, u32 inc);
|
||||
|
||||
/* Helper functions for BLAKE2s shared by the library and shash APIs */
|
||||
|
||||
static inline void __blake2s_update(struct blake2s_state *state,
|
||||
const u8 *in, size_t inlen,
|
||||
blake2s_compress_t compress)
|
||||
static __always_inline void
|
||||
__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
||||
bool force_generic)
|
||||
{
|
||||
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
|
||||
|
||||
@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
|
||||
return;
|
||||
if (inlen > fill) {
|
||||
memcpy(state->buf + state->buflen, in, fill);
|
||||
(*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, state->buf, 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
else
|
||||
blake2s_compress(state, state->buf, 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
state->buflen = 0;
|
||||
in += fill;
|
||||
inlen -= fill;
|
||||
@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
|
||||
if (inlen > BLAKE2S_BLOCK_SIZE) {
|
||||
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
|
||||
/* Hash one less (full) block than strictly possible */
|
||||
(*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, in, nblocks - 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
else
|
||||
blake2s_compress(state, in, nblocks - 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
}
|
||||
@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state,
|
||||
state->buflen += inlen;
|
||||
}
|
||||
|
||||
static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
|
||||
blake2s_compress_t compress)
|
||||
static __always_inline void
|
||||
__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
|
||||
{
|
||||
blake2s_set_lastblock(state);
|
||||
memset(state->buf + state->buflen, 0,
|
||||
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
|
||||
(*compress)(state, state->buf, 1, state->buflen);
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, state->buf, 1, state->buflen);
|
||||
else
|
||||
blake2s_compress(state, state->buf, 1, state->buflen);
|
||||
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
|
||||
memcpy(out, state->h, state->outlen);
|
||||
}
|
||||
@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
|
||||
|
||||
static inline int crypto_blake2s_update(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen,
|
||||
blake2s_compress_t compress)
|
||||
bool force_generic)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
__blake2s_update(state, in, inlen, compress);
|
||||
__blake2s_update(state, in, inlen, force_generic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
|
||||
blake2s_compress_t compress)
|
||||
bool force_generic)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
__blake2s_final(state, out, compress);
|
||||
__blake2s_final(state, out, force_generic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@ enum cpuhp_state {
|
||||
CPUHP_LUSTRE_CFS_DEAD,
|
||||
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
|
||||
CPUHP_PADATA_DEAD,
|
||||
CPUHP_RANDOM_PREPARE,
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
@ -176,6 +177,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
|
||||
CPUHP_AP_WATCHDOG_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RANDOM_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
|
||||
/** Unregister a Hardware Random Number Generator driver. */
|
||||
extern void hwrng_unregister(struct hwrng *rng);
|
||||
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
|
||||
/** Feed random bits into the pool. */
|
||||
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
|
||||
|
||||
#endif /* LINUX_HWRANDOM_H_ */
|
||||
|
@ -2382,6 +2382,7 @@ extern int install_special_mapping(struct mm_struct *mm,
|
||||
unsigned long flags, struct page **pages);
|
||||
|
||||
unsigned long randomize_stack_top(unsigned long stack_top);
|
||||
unsigned long randomize_page(unsigned long start, unsigned long range);
|
||||
|
||||
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/siphash.h>
|
||||
|
||||
u32 prandom_u32(void);
|
||||
void prandom_bytes(void *buf, size_t nbytes);
|
||||
@ -21,15 +22,10 @@ void prandom_reseed_late(void);
|
||||
* The core SipHash round function. Each line can be executed in
|
||||
* parallel given enough CPU resources.
|
||||
*/
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
|
||||
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
|
||||
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
|
||||
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
|
||||
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
|
||||
)
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3)
|
||||
|
||||
#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
|
||||
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
|
||||
#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2)
|
||||
#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3)
|
||||
|
||||
#elif BITS_PER_LONG == 32
|
||||
/*
|
||||
@ -37,14 +33,9 @@ void prandom_reseed_late(void);
|
||||
* This is weaker, but 32-bit machines are not used for high-traffic
|
||||
* applications, so there is less output for an attacker to analyze.
|
||||
*/
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
|
||||
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
|
||||
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
|
||||
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
|
||||
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
|
||||
)
|
||||
#define PRND_K0 0x6c796765
|
||||
#define PRND_K1 0x74656462
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3)
|
||||
#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2)
|
||||
#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3)
|
||||
|
||||
#else
|
||||
#error Unsupported BITS_PER_LONG
|
||||
|
@ -1,52 +1,35 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* include/linux/random.h
|
||||
*
|
||||
* Include file for the random number generator.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RANDOM_H
|
||||
#define _LINUX_RANDOM_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/once.h>
|
||||
|
||||
#include <uapi/linux/random.h>
|
||||
|
||||
struct random_ready_callback {
|
||||
struct list_head list;
|
||||
void (*func)(struct random_ready_callback *rdy);
|
||||
struct module *owner;
|
||||
};
|
||||
struct notifier_block;
|
||||
|
||||
extern void add_device_randomness(const void *, unsigned int);
|
||||
extern void add_bootloader_randomness(const void *, unsigned int);
|
||||
void add_device_randomness(const void *buf, size_t len);
|
||||
void __init add_bootloader_randomness(const void *buf, size_t len);
|
||||
void add_input_randomness(unsigned int type, unsigned int code,
|
||||
unsigned int value) __latent_entropy;
|
||||
void add_interrupt_randomness(int irq) __latent_entropy;
|
||||
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
|
||||
|
||||
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
|
||||
static inline void add_latent_entropy(void)
|
||||
{
|
||||
add_device_randomness((const void *)&latent_entropy,
|
||||
sizeof(latent_entropy));
|
||||
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
|
||||
}
|
||||
#else
|
||||
static inline void add_latent_entropy(void) {}
|
||||
#endif
|
||||
|
||||
extern void add_input_randomness(unsigned int type, unsigned int code,
|
||||
unsigned int value) __latent_entropy;
|
||||
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
|
||||
|
||||
extern void get_random_bytes(void *buf, int nbytes);
|
||||
extern int wait_for_random_bytes(void);
|
||||
extern int __init rand_initialize(void);
|
||||
extern bool rng_is_initialized(void);
|
||||
extern int add_random_ready_callback(struct random_ready_callback *rdy);
|
||||
extern void del_random_ready_callback(struct random_ready_callback *rdy);
|
||||
extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
|
||||
|
||||
#ifndef MODULE
|
||||
extern const struct file_operations random_fops, urandom_fops;
|
||||
static inline void add_latent_entropy(void) { }
|
||||
#endif
|
||||
|
||||
void get_random_bytes(void *buf, size_t len);
|
||||
size_t __must_check get_random_bytes_arch(void *buf, size_t len);
|
||||
u32 get_random_u32(void);
|
||||
u64 get_random_u64(void);
|
||||
static inline unsigned int get_random_int(void)
|
||||
@ -78,36 +61,38 @@ static inline unsigned long get_random_long(void)
|
||||
|
||||
static inline unsigned long get_random_canary(void)
|
||||
{
|
||||
unsigned long val = get_random_long();
|
||||
|
||||
return val & CANARY_MASK;
|
||||
return get_random_long() & CANARY_MASK;
|
||||
}
|
||||
|
||||
int __init random_init(const char *command_line);
|
||||
bool rng_is_initialized(void);
|
||||
int wait_for_random_bytes(void);
|
||||
int register_random_ready_notifier(struct notifier_block *nb);
|
||||
int unregister_random_ready_notifier(struct notifier_block *nb);
|
||||
|
||||
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
|
||||
* Returns the result of the call to wait_for_random_bytes. */
|
||||
static inline int get_random_bytes_wait(void *buf, int nbytes)
|
||||
static inline int get_random_bytes_wait(void *buf, size_t nbytes)
|
||||
{
|
||||
int ret = wait_for_random_bytes();
|
||||
get_random_bytes(buf, nbytes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define declare_get_random_var_wait(var) \
|
||||
static inline int get_random_ ## var ## _wait(var *out) { \
|
||||
#define declare_get_random_var_wait(name, ret_type) \
|
||||
static inline int get_random_ ## name ## _wait(ret_type *out) { \
|
||||
int ret = wait_for_random_bytes(); \
|
||||
if (unlikely(ret)) \
|
||||
return ret; \
|
||||
*out = get_random_ ## var(); \
|
||||
*out = get_random_ ## name(); \
|
||||
return 0; \
|
||||
}
|
||||
declare_get_random_var_wait(u32)
|
||||
declare_get_random_var_wait(u64)
|
||||
declare_get_random_var_wait(int)
|
||||
declare_get_random_var_wait(long)
|
||||
declare_get_random_var_wait(u32, u32)
|
||||
declare_get_random_var_wait(u64, u32)
|
||||
declare_get_random_var_wait(int, unsigned int)
|
||||
declare_get_random_var_wait(long, unsigned long)
|
||||
#undef declare_get_random_var
|
||||
|
||||
unsigned long randomize_page(unsigned long start, unsigned long range);
|
||||
|
||||
/*
|
||||
* This is designed to be standalone for just prandom
|
||||
* users, but for now we include it from <linux/random.h>
|
||||
@ -118,22 +103,39 @@ unsigned long randomize_page(unsigned long start, unsigned long range);
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
# include <asm/archrandom.h>
|
||||
#else
|
||||
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
||||
static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; }
|
||||
static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; }
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; }
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called from the boot CPU during startup; not valid to call once
|
||||
* secondary CPUs are up and preemption is possible.
|
||||
*/
|
||||
#ifndef arch_get_random_seed_long_early
|
||||
static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool __must_check arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
return false;
|
||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
||||
return arch_get_random_seed_long(v);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef arch_get_random_long_early
|
||||
static inline bool __init arch_get_random_long_early(unsigned long *v)
|
||||
{
|
||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
||||
return arch_get_random_long(v);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int random_prepare_cpu(unsigned int cpu);
|
||||
int random_online_cpu(unsigned int cpu);
|
||||
#endif
|
||||
|
||||
#ifndef MODULE
|
||||
extern const struct file_operations random_fops, urandom_fops;
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_RANDOM_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user