Merge "Merge android-mainline (6abe79c
) into msm-waipio"
This commit is contained in:
commit
42115c8592
4
.mailmap
4
.mailmap
@ -169,6 +169,10 @@ Juha Yrjola <juha.yrjola@solidboot.com>
|
||||
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
||||
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
|
||||
Kay Sievers <kay.sievers@vrfy.org>
|
||||
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
|
||||
Kees Cook <keescook@chromium.org> <keescook@google.com>
|
||||
Kees Cook <keescook@chromium.org> <kees@outflux.net>
|
||||
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
|
||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
||||
|
@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
|
||||
pgmajfault
|
||||
Number of major page faults incurred
|
||||
|
||||
workingset_refault
|
||||
Number of refaults of previously evicted pages
|
||||
workingset_refault_anon
|
||||
Number of refaults of previously evicted anonymous pages.
|
||||
|
||||
workingset_activate
|
||||
Number of refaulted pages that were immediately activated
|
||||
workingset_refault_file
|
||||
Number of refaults of previously evicted file pages.
|
||||
|
||||
workingset_restore
|
||||
Number of restored pages which have been detected as an active
|
||||
workingset before they got reclaimed.
|
||||
workingset_activate_anon
|
||||
Number of refaulted anonymous pages that were immediately
|
||||
activated.
|
||||
|
||||
workingset_activate_file
|
||||
Number of refaulted file pages that were immediately activated.
|
||||
|
||||
workingset_restore_anon
|
||||
Number of restored anonymous pages which have been detected as
|
||||
an active workingset before they got reclaimed.
|
||||
|
||||
workingset_restore_file
|
||||
Number of restored file pages which have been detected as an
|
||||
active workingset before they got reclaimed.
|
||||
|
||||
workingset_nodereclaim
|
||||
Number of times a shadow node has been reclaimed
|
||||
|
@ -67,7 +67,7 @@ Parameters::
|
||||
the value passed in <key_size>.
|
||||
|
||||
<key_type>
|
||||
Either 'logon' or 'user' kernel key type.
|
||||
Either 'logon', 'user' or 'encrypted' kernel key type.
|
||||
|
||||
<key_description>
|
||||
The kernel keyring key description crypt target should look for
|
||||
@ -121,6 +121,14 @@ submit_from_crypt_cpus
|
||||
thread because it benefits CFQ to have writes submitted using the
|
||||
same context.
|
||||
|
||||
no_read_workqueue
|
||||
Bypass dm-crypt internal workqueue and process read requests synchronously.
|
||||
|
||||
no_write_workqueue
|
||||
Bypass dm-crypt internal workqueue and process write requests synchronously.
|
||||
This option is automatically enabled for host-managed zoned block devices
|
||||
(e.g. host-managed SMR hard-disks).
|
||||
|
||||
integrity:<bytes>:<type>
|
||||
The device requires additional <bytes> metadata per-sector stored
|
||||
in per-bio integrity structure. This metadata must by provided
|
||||
|
@ -182,9 +182,6 @@ in the order of reservations, but only after all previous records where
|
||||
already committed. It is thus possible for slow producers to temporarily hold
|
||||
off submitted records, that were reserved later.
|
||||
|
||||
Reservation/commit/consumer protocol is verified by litmus tests in
|
||||
Documentation/litmus_tests/bpf-rb/_.
|
||||
|
||||
One interesting implementation bit, that significantly simplifies (and thus
|
||||
speeds up as well) implementation of both producers and consumers is how data
|
||||
area is mapped twice contiguously back-to-back in the virtual memory. This
|
||||
@ -200,7 +197,7 @@ a self-pacing notifications of new data being availability.
|
||||
being available after commit only if consumer has already caught up right up to
|
||||
the record being committed. If not, consumer still has to catch up and thus
|
||||
will see new data anyways without needing an extra poll notification.
|
||||
Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c_) show that
|
||||
Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbufs.c) show that
|
||||
this allows to achieve a very high throughput without having to resort to
|
||||
tricks like "notify only every Nth sample", which are necessary with perf
|
||||
buffer. For extreme cases, when BPF program wants more manual control of
|
||||
|
@ -22,7 +22,7 @@
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| powerpc: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
|
@ -39,10 +39,10 @@ which can help simplify cross compiling. ::
|
||||
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
|
||||
|
||||
``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
|
||||
``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For
|
||||
``CROSS_COMPILE`` is used to set a command line flag: ``--target=<triple>``. For
|
||||
example: ::
|
||||
|
||||
clang --target aarch64-linux-gnu foo.c
|
||||
clang --target=aarch64-linux-gnu foo.c
|
||||
|
||||
LLVM Utilities
|
||||
--------------
|
||||
|
@ -206,6 +206,7 @@ Userspace to kernel:
|
||||
``ETHTOOL_MSG_TSINFO_GET`` get timestamping info
|
||||
``ETHTOOL_MSG_CABLE_TEST_ACT`` action start cable test
|
||||
``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` action start raw TDR cable test
|
||||
``ETHTOOL_MSG_TUNNEL_INFO_GET`` get tunnel offload info
|
||||
===================================== ================================
|
||||
|
||||
Kernel to userspace:
|
||||
@ -239,6 +240,7 @@ Kernel to userspace:
|
||||
``ETHTOOL_MSG_TSINFO_GET_REPLY`` timestamping info
|
||||
``ETHTOOL_MSG_CABLE_TEST_NTF`` Cable test results
|
||||
``ETHTOOL_MSG_CABLE_TEST_TDR_NTF`` Cable test TDR results
|
||||
``ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY`` tunnel offload info
|
||||
===================================== =================================
|
||||
|
||||
``GET`` requests are sent by userspace applications to retrieve device
|
||||
@ -1363,4 +1365,5 @@ are netlink only.
|
||||
``ETHTOOL_SFECPARAM`` n/a
|
||||
n/a ''ETHTOOL_MSG_CABLE_TEST_ACT''
|
||||
n/a ''ETHTOOL_MSG_CABLE_TEST_TDR_ACT''
|
||||
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
|
||||
=================================== =====================================
|
||||
|
@ -701,23 +701,6 @@ Memory Consistency Flags
|
||||
:stub-columns: 0
|
||||
:widths: 3 1 4
|
||||
|
||||
* .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`:
|
||||
|
||||
- ``V4L2_FLAG_MEMORY_NON_CONSISTENT``
|
||||
- 0x00000001
|
||||
- A buffer is allocated either in consistent (it will be automatically
|
||||
coherent between the CPU and the bus) or non-consistent memory. The
|
||||
latter can provide performance gains, for instance the CPU cache
|
||||
sync/flush operations can be avoided if the buffer is accessed by the
|
||||
corresponding device only and the CPU does not read/write to/from that
|
||||
buffer. However, this requires extra care from the driver -- it must
|
||||
guarantee memory consistency by issuing a cache flush/sync when
|
||||
consistency is needed. If this flag is set V4L2 will attempt to
|
||||
allocate the buffer in non-consistent memory. The flag takes effect
|
||||
only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
|
||||
queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
|
||||
<V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
|
||||
|
||||
.. c:type:: v4l2_memory
|
||||
|
||||
enum v4l2_memory
|
||||
|
@ -581,6 +581,8 @@ enum v4l2_mpeg_video_bitrate_mode -
|
||||
- Variable bitrate
|
||||
* - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR``
|
||||
- Constant bitrate
|
||||
* - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CQ``
|
||||
- Constant quality
|
||||
|
||||
|
||||
|
||||
@ -592,6 +594,48 @@ enum v4l2_mpeg_video_bitrate_mode -
|
||||
the average video bitrate. It is ignored if the video bitrate mode
|
||||
is set to constant bitrate.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY (integer)``
|
||||
Constant quality level control. This control is applicable when
|
||||
``V4L2_CID_MPEG_VIDEO_BITRATE_MODE`` value is
|
||||
``V4L2_MPEG_VIDEO_BITRATE_MODE_CQ``. Valid range is 1 to 100
|
||||
where 1 indicates lowest quality and 100 indicates highest quality.
|
||||
Encoder will decide the appropriate quantization parameter and
|
||||
bitrate to produce requested frame quality.
|
||||
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE (enum)``
|
||||
|
||||
enum v4l2_mpeg_video_frame_skip_mode -
|
||||
Indicates in what conditions the encoder should skip frames. If
|
||||
encoding a frame would cause the encoded stream to be larger then a
|
||||
chosen data limit then the frame will be skipped. Possible values
|
||||
are:
|
||||
|
||||
|
||||
.. tabularcolumns:: |p{9.2cm}|p{8.3cm}|
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
\small
|
||||
|
||||
.. flat-table::
|
||||
:header-rows: 0
|
||||
:stub-columns: 0
|
||||
|
||||
* - ``V4L2_MPEG_FRAME_SKIP_MODE_DISABLED``
|
||||
- Frame skip mode is disabled.
|
||||
* - ``V4L2_MPEG_FRAME_SKIP_MODE_LEVEL_LIMIT``
|
||||
- Frame skip mode enabled and buffer limit is set by the chosen
|
||||
level and is defined by the standard.
|
||||
* - ``V4L2_MPEG_FRAME_SKIP_MODE_BUF_LIMIT``
|
||||
- Frame skip mode enabled and buffer limit is set by the
|
||||
:ref:`VBV (MPEG1/2/4) <v4l2-mpeg-video-vbv-size>` or
|
||||
:ref:`CPB (H264) buffer size <v4l2-mpeg-video-h264-cpb-size>` control.
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
\normalsize
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (integer)``
|
||||
For every captured frame, skip this many subsequent frames (default
|
||||
0).
|
||||
@ -1163,6 +1207,8 @@ enum v4l2_mpeg_video_h264_entropy_mode -
|
||||
Quantization parameter for an B frame for MPEG4. Valid range: from 1
|
||||
to 31.
|
||||
|
||||
.. _v4l2-mpeg-video-vbv-size:
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_VBV_SIZE (integer)``
|
||||
The Video Buffer Verifier size in kilobytes, it is used as a
|
||||
limitation of frame skip. The VBV is defined in the standard as a
|
||||
@ -1200,6 +1246,8 @@ enum v4l2_mpeg_video_h264_entropy_mode -
|
||||
Force a key frame for the next queued buffer. Applicable to
|
||||
encoders. This is a general, codec-agnostic keyframe control.
|
||||
|
||||
.. _v4l2-mpeg-video-h264-cpb-size:
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (integer)``
|
||||
The Coded Picture Buffer size in kilobytes, it is used as a
|
||||
limitation of frame skip. The CPB is defined in the H264 standard as
|
||||
@ -3316,6 +3364,49 @@ enum v4l2_mpeg_video_vp9_profile -
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3``
|
||||
- Profile 3
|
||||
|
||||
.. _v4l2-mpeg-video-vp9-level:
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_VP9_LEVEL (enum)``
|
||||
|
||||
enum v4l2_mpeg_video_vp9_level -
|
||||
This control allows selecting the level for VP9 encoder.
|
||||
This is also used to enumerate supported levels by VP9 encoder or decoder.
|
||||
More information can be found at
|
||||
`webmproject <https://www.webmproject.org/vp9/levels/>`__. Possible values are:
|
||||
|
||||
.. flat-table::
|
||||
:header-rows: 0
|
||||
:stub-columns: 0
|
||||
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_1_0``
|
||||
- Level 1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_1_1``
|
||||
- Level 1.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_2_0``
|
||||
- Level 2
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_2_1``
|
||||
- Level 2.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_3_0``
|
||||
- Level 3
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_3_1``
|
||||
- Level 3.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_4_0``
|
||||
- Level 4
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_4_1``
|
||||
- Level 4.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_5_0``
|
||||
- Level 5
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_5_1``
|
||||
- Level 5.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_5_2``
|
||||
- Level 5.2
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_6_0``
|
||||
- Level 6
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_6_1``
|
||||
- Level 6.1
|
||||
* - ``V4L2_MPEG_VIDEO_VP9_LEVEL_6_2``
|
||||
- Level 6.2
|
||||
|
||||
|
||||
High Efficiency Video Coding (HEVC/H.265) Control Reference
|
||||
===========================================================
|
||||
|
@ -120,13 +120,9 @@ than the number requested.
|
||||
If you want to just query the capabilities without making any
|
||||
other changes, then set ``count`` to 0, ``memory`` to
|
||||
``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type.
|
||||
* - __u32
|
||||
- ``flags``
|
||||
- Specifies additional buffer management attributes.
|
||||
See :ref:`memory-flags`.
|
||||
|
||||
* - __u32
|
||||
- ``reserved``\ [6]
|
||||
- ``reserved``\ [7]
|
||||
- A place holder for future extensions. Drivers and applications
|
||||
must set the array to zero.
|
||||
|
||||
|
@ -112,17 +112,10 @@ aborting or finishing any DMA in progress, an implicit
|
||||
``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will
|
||||
free any previously allocated buffers, so this is typically something
|
||||
that will be done at the start of the application.
|
||||
* - union {
|
||||
- (anonymous)
|
||||
* - __u32
|
||||
- ``flags``
|
||||
- Specifies additional buffer management attributes.
|
||||
See :ref:`memory-flags`.
|
||||
* - __u32
|
||||
- ``reserved``\ [1]
|
||||
- Kept for backwards compatibility. Use ``flags`` instead.
|
||||
* - }
|
||||
-
|
||||
- A place holder for future extensions. Drivers and applications
|
||||
must set the array to zero.
|
||||
|
||||
.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
|
||||
|
||||
@ -169,7 +162,6 @@ aborting or finishing any DMA in progress, an implicit
|
||||
- This capability is set by the driver to indicate that the queue supports
|
||||
cache and memory management hints. However, it's only valid when the
|
||||
queue is used for :ref:`memory mapping <mmap>` streaming I/O. See
|
||||
:ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT <V4L2-FLAG-MEMORY-NON-CONSISTENT>`,
|
||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>` and
|
||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>`.
|
||||
|
||||
|
@ -6173,3 +6173,23 @@ specific interfaces must be consistent, i.e. if one says the feature
|
||||
is supported, than the other should as well and vice versa. For arm64
|
||||
see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL".
|
||||
For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".
|
||||
|
||||
8.25 KVM_CAP_S390_DIAG318
|
||||
-------------------------
|
||||
|
||||
:Architectures: s390
|
||||
|
||||
This capability enables a guest to set information about its control program
|
||||
(i.e. guest kernel type and version). The information is helpful during
|
||||
system/firmware service events, providing additional data about the guest
|
||||
environments running on the machine.
|
||||
|
||||
The information is associated with the DIAGNOSE 0x318 instruction, which sets
|
||||
an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and
|
||||
a 7-byte Control Program Version Code (CPVC). The CPNC determines what
|
||||
environment the control program is running in (e.g. Linux, z/VM...), and the
|
||||
CPVC is used for information specific to OS (e.g. Linux version, Linux
|
||||
distribution...)
|
||||
|
||||
If this capability is available, then the CPNC and CPVC can be synchronized
|
||||
between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318).
|
||||
|
21
MAINTAINERS
21
MAINTAINERS
@ -4408,12 +4408,6 @@ T: git git://git.infradead.org/users/hch/configfs.git
|
||||
F: fs/configfs/
|
||||
F: include/linux/configfs.h
|
||||
|
||||
CONNECTOR
|
||||
M: Evgeniy Polyakov <zbr@ioremap.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/connector/
|
||||
|
||||
CONSOLE SUBSYSTEM
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
S: Supported
|
||||
@ -6180,7 +6174,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
|
||||
F: drivers/edac/aspeed_edac.c
|
||||
|
||||
EDAC-BLUEFIELD
|
||||
M: Shravan Kumar Ramani <sramani@nvidia.com>
|
||||
M: Shravan Kumar Ramani <shravankr@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/edac/bluefield_edac.c
|
||||
|
||||
@ -8329,8 +8323,9 @@ S: Supported
|
||||
F: drivers/pci/hotplug/rpaphp*
|
||||
|
||||
IBM Power SRIOV Virtual NIC Device Driver
|
||||
M: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
M: John Allen <jallen@linux.ibm.com>
|
||||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Lijun Pan <ljp@linux.ibm.com>
|
||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
@ -8344,7 +8339,7 @@ F: arch/powerpc/platforms/powernv/copy-paste.h
|
||||
F: arch/powerpc/platforms/powernv/vas*
|
||||
|
||||
IBM Power Virtual Ethernet Device Driver
|
||||
M: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
M: Cristobal Forno <cforno12@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmveth.*
|
||||
@ -9258,7 +9253,7 @@ F: drivers/firmware/iscsi_ibft*
|
||||
|
||||
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
M: Max Gurtovoy <maxg@nvidia.com>
|
||||
M: Max Gurtovoy <mgurtovoy@nvidia.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.openfabrics.org
|
||||
@ -11049,6 +11044,7 @@ F: drivers/char/hw_random/mtk-rng.c
|
||||
|
||||
MEDIATEK SWITCH DRIVER
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
M: Landen Chao <Landen.Chao@mediatek.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/dsa/mt7530.*
|
||||
@ -12062,6 +12058,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: include/linux/etherdevice.h
|
||||
F: include/linux/fcdevice.h
|
||||
@ -16170,7 +16167,7 @@ M: Leon Luo <leonl@leopardimaging.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/devicetree/bindings/media/i2c/imx274.txt
|
||||
F: Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
|
||||
F: drivers/media/i2c/imx274.c
|
||||
|
||||
SONY IMX290 SENSOR DRIVER
|
||||
|
3
Makefile
3
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -435,6 +435,7 @@ OBJDUMP = llvm-objdump
|
||||
READELF = llvm-readelf
|
||||
OBJSIZE = llvm-size
|
||||
STRIP = llvm-strip
|
||||
KBUILD_HOSTLDFLAGS += -fuse-ld=lld --rtlib=compiler-rt
|
||||
else
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
LD = $(CROSS_COMPILE)ld
|
||||
|
208514
abi_gki_aarch64.xml
208514
abi_gki_aarch64.xml
File diff suppressed because it is too large
Load Diff
@ -49,6 +49,7 @@ config ARM
|
||||
select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
|
||||
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_IRQ_IPI if SMP
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
|
@ -116,7 +116,6 @@ spi2: spi@400 {
|
||||
switch0: ksz8563@0 {
|
||||
compatible = "microchip,ksz8563";
|
||||
reg = <0>;
|
||||
phy-mode = "mii";
|
||||
reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
|
||||
|
||||
spi-max-frequency = <500000>;
|
||||
@ -140,6 +139,7 @@ port@2 {
|
||||
reg = <2>;
|
||||
label = "cpu";
|
||||
ethernet = <&macb0>;
|
||||
phy-mode = "mii";
|
||||
fixed-link {
|
||||
speed = <100>;
|
||||
full-duplex;
|
||||
|
@ -6,29 +6,12 @@
|
||||
#include <linux/threads.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
|
||||
#define NR_IPI 7
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int ipi_irqs[NR_IPI];
|
||||
#endif
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
||||
#else
|
||||
#define smp_irq_stat_cpu(cpu) 0
|
||||
#endif
|
||||
|
||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
||||
|
||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||
|
||||
#endif /* __ASM_HARDIRQ_H */
|
||||
|
@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||
*/
|
||||
extern void smp_init_cpus(void);
|
||||
|
||||
|
||||
/*
|
||||
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
||||
* Register IPI interrupts with the arch SMP code
|
||||
*/
|
||||
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
||||
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
||||
|
||||
/*
|
||||
* Called from platform specific assembly code, this is the
|
||||
|
@ -18,7 +18,6 @@
|
||||
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
|
||||
* Naturally it's not a 1:1 relation, but there are similarities.
|
||||
*/
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/bugs.h>
|
||||
@ -65,18 +66,26 @@ enum ipi_msg_type {
|
||||
IPI_CPU_STOP,
|
||||
IPI_IRQ_WORK,
|
||||
IPI_COMPLETION,
|
||||
NR_IPI,
|
||||
/*
|
||||
* CPU_BACKTRACE is special and not included in NR_IPI
|
||||
* or tracable with trace_ipi_*
|
||||
*/
|
||||
IPI_CPU_BACKTRACE,
|
||||
IPI_CPU_BACKTRACE = NR_IPI,
|
||||
/*
|
||||
* SGI8-15 can be reserved by secure firmware, and thus may
|
||||
* not be usable by the kernel. Please keep the above limited
|
||||
* to at most 8 entries.
|
||||
*/
|
||||
MAX_IPI
|
||||
};
|
||||
|
||||
static int ipi_irq_base __read_mostly;
|
||||
static int nr_ipi __read_mostly = NR_IPI;
|
||||
static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
|
||||
|
||||
static void ipi_setup(int cpu);
|
||||
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
|
||||
static struct smp_operations smp_ops __ro_after_init;
|
||||
@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu)
|
||||
return cpu != 0;
|
||||
}
|
||||
|
||||
static void ipi_teardown(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_ipi; i++)
|
||||
disable_percpu_irq(ipi_irq_base + i);
|
||||
}
|
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown.
|
||||
*/
|
||||
@ -247,6 +267,7 @@ int __cpu_disable(void)
|
||||
* and we must not schedule until we're ready to give up the cpu.
|
||||
*/
|
||||
set_cpu_online(cpu, false);
|
||||
ipi_teardown(cpu);
|
||||
|
||||
/*
|
||||
* OK - migrate IRQs away from this CPU
|
||||
@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void)
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
ipi_setup(cpu);
|
||||
|
||||
calibrate_delay();
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
}
|
||||
|
||||
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
||||
|
||||
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
||||
{
|
||||
if (!__smp_cross_call)
|
||||
__smp_cross_call = fn;
|
||||
}
|
||||
|
||||
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||
#define S(x,s) [x] = s
|
||||
S(IPI_WAKEUP, "CPU wakeup interrupts"),
|
||||
@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||
S(IPI_COMPLETION, "completion interrupts"),
|
||||
};
|
||||
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||
{
|
||||
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
|
||||
__smp_cross_call(target, ipinr);
|
||||
}
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu, i;
|
||||
|
||||
for (i = 0; i < NR_IPI; i++) {
|
||||
unsigned int irq;
|
||||
|
||||
if (!ipi_desc[i])
|
||||
continue;
|
||||
|
||||
irq = irq_desc_get_irq(ipi_desc[i]);
|
||||
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ",
|
||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
|
||||
|
||||
seq_printf(p, " %s\n", ipi_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
u64 sum = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_IPI; i++)
|
||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
smp_cross_call(mask, IPI_CALL_FUNC);
|
||||
@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||
handle_IPI(ipinr, regs);
|
||||
}
|
||||
|
||||
void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
static void do_handle_IPI(int ipinr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI) {
|
||||
if ((unsigned)ipinr < NR_IPI)
|
||||
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
||||
}
|
||||
|
||||
switch (ipinr) {
|
||||
case IPI_WAKEUP:
|
||||
@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
case IPI_TIMER:
|
||||
irq_enter();
|
||||
tick_receive_broadcast();
|
||||
irq_exit();
|
||||
break;
|
||||
#endif
|
||||
|
||||
@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
irq_enter();
|
||||
ipi_cpu_stop(cpu);
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
case IPI_IRQ_WORK:
|
||||
irq_enter();
|
||||
irq_work_run();
|
||||
irq_exit();
|
||||
break;
|
||||
#endif
|
||||
|
||||
case IPI_COMPLETION:
|
||||
irq_enter();
|
||||
ipi_complete(cpu);
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
case IPI_CPU_BACKTRACE:
|
||||
printk_nmi_enter();
|
||||
irq_enter();
|
||||
nmi_cpu_backtrace(regs);
|
||||
irq_exit();
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
printk_nmi_exit();
|
||||
break;
|
||||
|
||||
@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI)
|
||||
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||
}
|
||||
|
||||
/* Legacy version, should go away once all irqchips have been converted */
|
||||
void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
irq_enter();
|
||||
do_handle_IPI(ipinr);
|
||||
irq_exit();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
static irqreturn_t ipi_handler(int irq, void *data)
|
||||
{
|
||||
do_handle_IPI(irq - ipi_irq_base);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||
{
|
||||
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
|
||||
__ipi_send_mask(ipi_desc[ipinr], target);
|
||||
}
|
||||
|
||||
static void ipi_setup(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_ipi; i++)
|
||||
enable_percpu_irq(ipi_irq_base + i, 0);
|
||||
}
|
||||
|
||||
void __init set_smp_ipi_range(int ipi_base, int n)
|
||||
{
|
||||
int i;
|
||||
|
||||
WARN_ON(n < MAX_IPI);
|
||||
nr_ipi = min(n, MAX_IPI);
|
||||
|
||||
for (i = 0; i < nr_ipi; i++) {
|
||||
int err;
|
||||
|
||||
err = request_percpu_irq(ipi_base + i, ipi_handler,
|
||||
"IPI", &irq_stat);
|
||||
WARN_ON(err);
|
||||
|
||||
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||
}
|
||||
|
||||
ipi_irq_base = ipi_base;
|
||||
|
||||
/* Setup the boot CPU immediately */
|
||||
ipi_setup(smp_processor_id());
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier);
|
||||
|
||||
static void raise_nmi(cpumask_t *mask)
|
||||
{
|
||||
__smp_cross_call(mask, IPI_CPU_BACKTRACE);
|
||||
__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
|
@ -106,6 +106,7 @@ config ARM64
|
||||
select GENERIC_CPU_VULNERABILITIES
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
select GENERIC_IRQ_IPI
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
@ -19,8 +19,6 @@ CONFIG_MFD_SPMI_PMIC=m
|
||||
CONFIG_SPMI_MSM_PMIC_ARB=m
|
||||
CONFIG_REGULATOR_QCOM_RPMH=m
|
||||
CONFIG_REGULATOR_QCOM_SPMI=m
|
||||
CONFIG_BT_HCIUART=m
|
||||
CONFIG_BT_HCIUART_QCA=y
|
||||
CONFIG_DRM_MSM=m
|
||||
# CONFIG_DRM_MSM_DSI_28NM_PHY is not set
|
||||
# CONFIG_DRM_MSM_DSI_20NM_PHY is not set
|
||||
@ -57,6 +55,7 @@ CONFIG_PHY_QCOM_QUSB2=m
|
||||
CONFIG_PHY_QCOM_USB_HS=m
|
||||
CONFIG_QCOM_QFPROM=m
|
||||
CONFIG_INTERCONNECT_QCOM=y
|
||||
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
|
||||
CONFIG_INTERCONNECT_QCOM_SDM845=m
|
||||
CONFIG_INCREMENTAL_FS=m
|
||||
CONFIG_QCOM_RPMH=m
|
||||
@ -87,6 +86,7 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=m
|
||||
CONFIG_QCOM_PDC=m
|
||||
CONFIG_QCOM_SCM=m
|
||||
CONFIG_ARM_SMMU=m
|
||||
CONFIG_ARM_QCOM_CPUFREQ_HW=m
|
||||
# XXX Audio bits start here
|
||||
CONFIG_I2C_CHARDEV=m
|
||||
CONFIG_I2C_MUX=m
|
||||
|
@ -82,6 +82,7 @@ CONFIG_KVM=y
|
||||
CONFIG_CRYPTO_SHA2_ARM64_CE=y
|
||||
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_SHADOW_CALL_STACK=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -207,6 +208,13 @@ CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BT=y
|
||||
CONFIG_BT_RFCOMM=y
|
||||
CONFIG_BT_RFCOMM_TTY=y
|
||||
CONFIG_BT_HIDP=y
|
||||
CONFIG_BT_HCIBTSDIO=y
|
||||
CONFIG_BT_HCIUART=y
|
||||
CONFIG_BT_HCIUART_LL=y
|
||||
CONFIG_BT_HCIUART_QCA=y
|
||||
CONFIG_CFG80211=y
|
||||
# CONFIG_CFG80211_DEFAULT_PS is not set
|
||||
# CONFIG_CFG80211_CRDA_SUPPORT is not set
|
||||
@ -376,6 +384,7 @@ CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NINTENDO=y
|
||||
CONFIG_HID_PLANTRONICS=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_SONY_FF=y
|
||||
CONFIG_HID_STEAM=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_OTG=y
|
||||
@ -399,6 +408,7 @@ CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_USB_CONFIGFS_F_HID=y
|
||||
CONFIG_TYPEC=y
|
||||
CONFIG_TYPEC_TCPM=y
|
||||
CONFIG_TYPEC_TCPCI=y
|
||||
CONFIG_MMC=y
|
||||
# CONFIG_PWRSEQ_EMMC is not set
|
||||
# CONFIG_PWRSEQ_SIMPLE is not set
|
||||
@ -409,6 +419,7 @@ CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_CLASS_FLASH=y
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
CONFIG_LEDS_TRIGGER_TIMER=y
|
||||
CONFIG_EDAC=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_PL030=y
|
||||
CONFIG_RTC_DRV_PL031=y
|
||||
@ -442,6 +453,7 @@ CONFIG_IIO_BUFFER=y
|
||||
CONFIG_IIO_TRIGGER=y
|
||||
CONFIG_PWM=y
|
||||
CONFIG_GENERIC_PHY=y
|
||||
CONFIG_RAS=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_ANDROID_BINDERFS=y
|
||||
|
@ -13,21 +13,12 @@
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define NR_IPI 7
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
unsigned int ipi_irqs[NR_IPI];
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
||||
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
||||
|
||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||
|
||||
struct nmi_ctx {
|
||||
|
@ -2,11 +2,9 @@
|
||||
#ifndef __ASM_IRQ_WORK_H
|
||||
#define __ASM_IRQ_WORK_H
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
static inline bool arch_irq_work_has_interrupt(void)
|
||||
{
|
||||
return !!__smp_cross_call;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ASM_IRQ_WORK_H */
|
||||
|
@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
|
||||
/* Always check for S1PTW *before* using this. */
|
||||
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
||||
@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
|
||||
}
|
||||
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
|
||||
@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
|
@ -55,16 +55,6 @@ static inline void set_cpu_logical_map(int cpu, u64 hwid)
|
||||
|
||||
struct seq_file;
|
||||
|
||||
/*
|
||||
* generate IPI list text
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *p, int prec);
|
||||
|
||||
/*
|
||||
* Called from C code, this handles an IPI.
|
||||
*/
|
||||
extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Discover the set of possible CPUs and determine their
|
||||
* SMP operations.
|
||||
@ -72,11 +62,9 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||
extern void smp_init_cpus(void);
|
||||
|
||||
/*
|
||||
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
||||
* Register IPI interrupts with the arch SMP code
|
||||
*/
|
||||
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
||||
|
||||
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
||||
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
||||
|
||||
/*
|
||||
* Called from the secondary holding pen, this is the secondary CPU entry point.
|
||||
|
@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.desc = "ARM erratum 1418040",
|
||||
.capability = ARM64_WORKAROUND_1418040,
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
|
||||
.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
|
||||
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
|
||||
/*
|
||||
* We need to allow affected CPUs to come in late, but
|
||||
* also need the non-affected CPUs to be able to come
|
||||
* in at any point in time. Wonderful.
|
||||
*/
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
|
||||
|
@ -284,21 +284,25 @@ void register_user_break_hook(struct break_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &user_break_hook);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_user_break_hook);
|
||||
|
||||
void unregister_user_break_hook(struct break_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_user_break_hook);
|
||||
|
||||
void register_kernel_break_hook(struct break_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &kernel_break_hook);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_kernel_break_hook);
|
||||
|
||||
void unregister_kernel_break_hook(struct break_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_kernel_break_hook);
|
||||
|
||||
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
|
@ -10,10 +10,10 @@
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/kprobes.h>
|
||||
@ -22,20 +22,11 @@
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/vmap_stack.h>
|
||||
|
||||
unsigned long irq_err_count;
|
||||
|
||||
/* Only access this in an NMI enter/exit */
|
||||
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
show_ipi_list(p, prec);
|
||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
static void init_irq_stacks(void)
|
||||
{
|
||||
|
@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
if (!reg->kaddr) {
|
||||
pr_warn_once("stolen time enabled but not configured for cpu %d\n",
|
||||
cpu);
|
||||
|
||||
/*
|
||||
* paravirt_steal_clock() may be called before the CPU
|
||||
* online notification callback runs. Until the callback
|
||||
* has run we just return zero.
|
||||
*/
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
}
|
||||
|
||||
static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_stolen_time_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pv_time_init_stolen_time(void)
|
||||
static int __init pv_time_init_stolen_time(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
|
||||
"hypervisor/arm/pvtime:starting",
|
||||
init_stolen_time_cpu, stolen_time_dying_cpu);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"hypervisor/arm/pvtime:online",
|
||||
stolen_time_cpu_online,
|
||||
stolen_time_cpu_down_prepare);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool has_pv_steal_clock(void)
|
||||
static bool __init has_pv_steal_clock(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
@ -72,10 +73,18 @@ enum ipi_msg_type {
|
||||
IPI_CPU_CRASH_STOP,
|
||||
IPI_TIMER,
|
||||
IPI_IRQ_WORK,
|
||||
IPI_WAKEUP
|
||||
IPI_WAKEUP,
|
||||
NR_IPI
|
||||
};
|
||||
|
||||
static int ipi_irq_base __read_mostly;
|
||||
static int nr_ipi __read_mostly = NR_IPI;
|
||||
static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
|
||||
|
||||
static void ipi_setup(int cpu);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void ipi_teardown(int cpu);
|
||||
static int op_cpu_kill(unsigned int cpu);
|
||||
#else
|
||||
static inline int op_cpu_kill(unsigned int cpu)
|
||||
@ -237,6 +246,8 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||
*/
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
ipi_setup(cpu);
|
||||
|
||||
store_cpu_topology(cpu);
|
||||
numa_add_cpu(cpu);
|
||||
|
||||
@ -302,6 +313,7 @@ int __cpu_disable(void)
|
||||
* and we must not schedule until we're ready to give up the cpu.
|
||||
*/
|
||||
set_cpu_online(cpu, false);
|
||||
ipi_teardown(cpu);
|
||||
|
||||
/*
|
||||
* OK - migrate IRQs away from this CPU
|
||||
@ -772,13 +784,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
}
|
||||
|
||||
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
||||
|
||||
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
||||
{
|
||||
__smp_cross_call = fn;
|
||||
}
|
||||
|
||||
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||
#define S(x,s) [x] = s
|
||||
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
||||
@ -790,35 +795,25 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||
S(IPI_WAKEUP, "CPU wake-up interrupts"),
|
||||
};
|
||||
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||
{
|
||||
trace_ipi_raise(target, ipi_types[ipinr]);
|
||||
__smp_cross_call(target, ipinr);
|
||||
}
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
unsigned long irq_err_count;
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu, i;
|
||||
|
||||
for (i = 0; i < NR_IPI; i++) {
|
||||
unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
|
||||
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
||||
prec >= 4 ? " " : "");
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ",
|
||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
|
||||
seq_printf(p, " %s\n", ipi_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
u64 sum = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_IPI; i++)
|
||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
||||
|
||||
return sum;
|
||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
@ -841,8 +836,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
if (__smp_cross_call)
|
||||
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
||||
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -890,15 +884,12 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*/
|
||||
void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
static void do_handle_IPI(int ipinr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI) {
|
||||
if ((unsigned)ipinr < NR_IPI)
|
||||
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
||||
}
|
||||
|
||||
switch (ipinr) {
|
||||
case IPI_RESCHEDULE:
|
||||
@ -906,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
irq_enter();
|
||||
local_cpu_stop();
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
case IPI_CPU_CRASH_STOP:
|
||||
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
|
||||
irq_enter();
|
||||
ipi_cpu_crash_stop(cpu, regs);
|
||||
ipi_cpu_crash_stop(cpu, get_irq_regs());
|
||||
|
||||
unreachable();
|
||||
}
|
||||
@ -928,17 +914,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
case IPI_TIMER:
|
||||
irq_enter();
|
||||
tick_receive_broadcast();
|
||||
irq_exit();
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
case IPI_IRQ_WORK:
|
||||
irq_enter();
|
||||
irq_work_run();
|
||||
irq_exit();
|
||||
break;
|
||||
#endif
|
||||
|
||||
@ -957,7 +939,66 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||
|
||||
if ((unsigned)ipinr < NR_IPI)
|
||||
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
static irqreturn_t ipi_handler(int irq, void *data)
|
||||
{
|
||||
do_handle_IPI(irq - ipi_irq_base);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||
{
|
||||
trace_ipi_raise(target, ipi_types[ipinr]);
|
||||
__ipi_send_mask(ipi_desc[ipinr], target);
|
||||
}
|
||||
|
||||
static void ipi_setup(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_ipi; i++)
|
||||
enable_percpu_irq(ipi_irq_base + i, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void ipi_teardown(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_ipi; i++)
|
||||
disable_percpu_irq(ipi_irq_base + i);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init set_smp_ipi_range(int ipi_base, int n)
|
||||
{
|
||||
int i;
|
||||
|
||||
WARN_ON(n < NR_IPI);
|
||||
nr_ipi = min(n, NR_IPI);
|
||||
|
||||
for (i = 0; i < nr_ipi; i++) {
|
||||
int err;
|
||||
|
||||
err = request_percpu_irq(ipi_base + i, ipi_handler,
|
||||
"IPI", &cpu_number);
|
||||
WARN_ON(err);
|
||||
|
||||
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||
}
|
||||
|
||||
ipi_irq_base = ipi_base;
|
||||
|
||||
/* Setup the boot CPU immediately */
|
||||
ipi_setup(smp_processor_id());
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
|
@ -136,6 +136,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
|
||||
put_task_stack(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dump_backtrace);
|
||||
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
||||
{
|
||||
|
@ -449,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
||||
kvm_vcpu_dabt_isvalid(vcpu) &&
|
||||
!kvm_vcpu_abt_issea(vcpu) &&
|
||||
!kvm_vcpu_dabt_iss1tw(vcpu);
|
||||
!kvm_vcpu_abt_iss1tw(vcpu);
|
||||
|
||||
if (valid) {
|
||||
int ret = __vgic_v2_perform_cpuif_access(vcpu);
|
||||
|
@ -1849,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
|
||||
|
||||
write_fault = kvm_is_write_fault(vcpu);
|
||||
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
|
||||
VM_BUG_ON(write_fault && exec_fault);
|
||||
|
||||
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
|
||||
@ -2131,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
|
@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
|
||||
static inline int bpf2a64_offset(int bpf_insn, int off,
|
||||
const struct jit_ctx *ctx)
|
||||
{
|
||||
int to = ctx->offset[bpf_to];
|
||||
/* -1 to account for the Branch instruction */
|
||||
int from = ctx->offset[bpf_from] - 1;
|
||||
|
||||
return to - from;
|
||||
/* BPF JMP offset is relative to the next instruction */
|
||||
bpf_insn++;
|
||||
/*
|
||||
* Whereas arm64 branch instructions encode the offset
|
||||
* from the branch itself, so we must subtract 1 from the
|
||||
* instruction offset.
|
||||
*/
|
||||
return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
|
||||
}
|
||||
|
||||
static void jit_fill_hole(void *area, unsigned int size)
|
||||
@ -642,7 +645,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm26(jmp_offset);
|
||||
emit(A64_B(jmp_offset), ctx);
|
||||
break;
|
||||
@ -669,7 +672,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
||||
emit(A64_CMP(is64, dst, src), ctx);
|
||||
emit_cond_jmp:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm19(jmp_offset);
|
||||
switch (BPF_OP(code)) {
|
||||
case BPF_JEQ:
|
||||
@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* - offset[0] offset of the end of prologue,
|
||||
* start of the 1st instruction.
|
||||
* - offset[1] - offset of the end of 1st instruction,
|
||||
* start of the 2nd instruction
|
||||
* [....]
|
||||
* - offset[3] - offset of the end of 3rd instruction,
|
||||
* start of 4th instruction
|
||||
*/
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
const struct bpf_insn *insn = &prog->insnsi[i];
|
||||
int ret;
|
||||
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
ret = build_insn(insn, ctx, extra_pass);
|
||||
if (ret > 0) {
|
||||
i++;
|
||||
@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
continue;
|
||||
}
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* offset is allocated with prog->len + 1 so fill in
|
||||
* the last element with the offset after the last
|
||||
* instruction (end of program)
|
||||
*/
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
|
||||
ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
@ -1089,7 +1108,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
prog->jited_len = prog_size;
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
|
||||
out_off:
|
||||
kfree(ctx.offset);
|
||||
kfree(jit_data);
|
||||
|
@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
|
||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
}
|
||||
|
||||
#define acpi_unlazy_tlb(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern cpumask_t early_cpu_possible_map;
|
||||
#define for_each_possible_early_cpu(cpu) \
|
||||
|
@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
|
||||
if (map_start < map_end)
|
||||
memmap_init_zone((unsigned long)(map_end - map_start),
|
||||
args->nid, args->zone, page_to_pfn(map_start),
|
||||
MEMMAP_EARLY, NULL);
|
||||
MEMINIT_EARLY, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
|
||||
unsigned long start_pfn)
|
||||
{
|
||||
if (!vmem_map) {
|
||||
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
|
||||
NULL);
|
||||
memmap_init_zone(size, nid, zone, start_pfn,
|
||||
MEMINIT_EARLY, NULL);
|
||||
} else {
|
||||
struct page *start;
|
||||
struct memmap_init_callback_data args;
|
||||
|
@ -877,6 +877,7 @@ config SNI_RM
|
||||
select I8253
|
||||
select I8259
|
||||
select ISA
|
||||
select MIPS_L1_CACHE_SHIFT_6
|
||||
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
|
||||
select SYS_HAS_CPU_R4X00
|
||||
select SYS_HAS_CPU_R5000
|
||||
|
@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
|
||||
if (c->cputype == CPU_74K) {
|
||||
pr_info("Using bcma bus\n");
|
||||
#ifdef CONFIG_BCM47XX_BCMA
|
||||
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
|
||||
|
@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
case CPU_74K:
|
||||
case CPU_1074K:
|
||||
case CPU_M14KC:
|
||||
case CPU_M14KEC:
|
||||
case CPU_INTERAPTIV:
|
||||
|
@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
|
||||
endif
|
||||
endif
|
||||
|
||||
# Some -march= flags enable MMI instructions, and GCC complains about that
|
||||
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
|
||||
cflags-y += $(call cc-option,-mno-loongson-mmi)
|
||||
|
||||
#
|
||||
# Loongson Machines' Support
|
||||
#
|
||||
|
@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rt, value);
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rq, value_next);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
}
|
||||
@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value_next = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rq);
|
||||
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
|
||||
|
||||
StoreDW(addr + 8, value_next, res);
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
|
||||
|
||||
StoreDW(addr, value, res);
|
||||
if (res)
|
||||
@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
|
||||
@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
break;
|
||||
@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||
|
||||
StoreW(addr, value, res);
|
||||
if (res)
|
||||
@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||
|
||||
StoreDW(addr, value, res);
|
||||
if (res)
|
||||
|
@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
|
||||
},
|
||||
};
|
||||
|
||||
static u32 a20r_ack_hwint(void)
|
||||
/*
|
||||
* Trigger chipset to update CPU's CAUSE IP field
|
||||
*/
|
||||
static u32 a20r_update_cause_ip(void)
|
||||
{
|
||||
u32 status = read_c0_status();
|
||||
|
||||
@ -205,12 +208,14 @@ static void a20r_hwint(void)
|
||||
int irq;
|
||||
|
||||
clear_c0_status(IE_IRQ0);
|
||||
status = a20r_ack_hwint();
|
||||
status = a20r_update_cause_ip();
|
||||
cause = read_c0_cause();
|
||||
|
||||
irq = ffs(((cause & status) >> 8) & 0xf8);
|
||||
if (likely(irq > 0))
|
||||
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
|
||||
|
||||
a20r_update_cause_ip();
|
||||
set_c0_status(IE_IRQ0);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,6 @@ config PPC
|
||||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
|
||||
CONFIG_FB_NVIDIA_I2C=y
|
||||
CONFIG_FB_RADEON=y
|
||||
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
|
@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
|
||||
CONFIG_FB_SM501=m
|
||||
CONFIG_FB_IBM_GXT4500=y
|
||||
CONFIG_LCD_PLATFORM=m
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
|
||||
CONFIG_LOGO=y
|
||||
|
@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
|
||||
|
||||
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
if (early_radix_enabled())
|
||||
return radix__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
/*
|
||||
* Hash has more strict restrictions. At this point we don't
|
||||
* know which translations we will pick. Hence go with hash
|
||||
* restrictions.
|
||||
*/
|
||||
return hash__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
}
|
||||
|
@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
||||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
|
||||
mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
|
||||
tbl->it_page_shift - 1);
|
||||
mask += mask - 1;
|
||||
|
||||
return mask;
|
||||
|
@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso32ld = VDSO32L $@
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
quiet_cmd_vdso32as = VDSO32A $@
|
||||
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
|
||||
|
||||
|
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.note.GNU-stack)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso64ld = VDSO64L $@
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
|
@ -30,7 +30,7 @@ SECTIONS
|
||||
. = ALIGN(16);
|
||||
.text : {
|
||||
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
|
||||
*(.sfpr)
|
||||
*(.sfpr .glink)
|
||||
} :text
|
||||
PROVIDE(__etext = .);
|
||||
PROVIDE(_etext = .);
|
||||
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.branch_lt)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/*
|
||||
* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/*
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
|
@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
|
||||
if (!(mfmsr() & MSR_HV))
|
||||
early_check_vec5();
|
||||
|
||||
if (early_radix_enabled())
|
||||
if (early_radix_enabled()) {
|
||||
radix__early_init_devtree();
|
||||
else
|
||||
/*
|
||||
* We have finalized the translation we are going to use by now.
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
* Hence don't limit memblock allocations.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
} else
|
||||
hash__early_init_devtree();
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
@ -822,7 +822,7 @@ static ssize_t perf_stats_show(struct device *dev,
|
||||
kfree(stats);
|
||||
return rc ? rc : seq_buf_used(&s);
|
||||
}
|
||||
DEVICE_ATTR_RO(perf_stats);
|
||||
DEVICE_ATTR_ADMIN_RO(perf_stats);
|
||||
|
||||
static ssize_t flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -32,6 +32,7 @@ config RISCV
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
select CLONE_BACKWARDS
|
||||
select CLINT_TIMER if !MMU
|
||||
select COMMON_CLK
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
|
@ -95,10 +95,12 @@ sysctl: sysctl@50440000 {
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
clint0: interrupt-controller@2000000 {
|
||||
clint0: clint@2000000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,clint0";
|
||||
reg = <0x2000000 0xC000>;
|
||||
interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
|
||||
interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
|
||||
&cpu1_intc 3 &cpu1_intc 7>;
|
||||
clocks = <&sysctl K210_CLK_ACLK>;
|
||||
};
|
||||
|
||||
|
26
arch/riscv/include/asm/clint.h
Normal file
26
arch/riscv/include/asm/clint.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 Google, Inc
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_CLINT_H
|
||||
#define _ASM_RISCV_CLINT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/mmio.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/*
|
||||
* This lives in the CLINT driver, but is accessed directly by timex.h to avoid
|
||||
* any overhead when accessing the MMIO timer.
|
||||
*
|
||||
* The ISA defines mtime as a 64-bit memory-mapped register that increments at
|
||||
* a constant frequency, but it doesn't define some other constraints we depend
|
||||
* on (most notably ordering constraints, but also some simpler stuff like the
|
||||
* memory layout). Thus, this is called "clint_time_val" instead of something
|
||||
* like "riscv_mtime", to signify that these non-ISA assumptions must hold.
|
||||
*/
|
||||
extern u64 __iomem *clint_time_val;
|
||||
#endif
|
||||
|
||||
#endif
|
@ -66,6 +66,13 @@ do { \
|
||||
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
|
||||
*/
|
||||
#define MCOUNT_INSN_SIZE 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct dyn_ftrace;
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
||||
#define ftrace_init_nop ftrace_init_nop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_FTRACE_H */
|
||||
|
@ -10,6 +10,31 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
|
||||
#include <asm/clint.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return readq_relaxed(clint_time_val);
|
||||
}
|
||||
#else /* !CONFIG_64BIT */
|
||||
static inline u32 get_cycles(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val));
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
static inline u32 get_cycles_hi(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val) + 1);
|
||||
}
|
||||
#define get_cycles_hi get_cycles_hi
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#else /* CONFIG_RISCV_M_MODE */
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return csr_read(CSR_TIME);
|
||||
@ -41,6 +66,8 @@ static inline u64 get_cycles64(void)
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#endif /* !CONFIG_RISCV_M_MODE */
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
static inline int read_current_timer(unsigned long *timer_val)
|
||||
{
|
||||
|
@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
return __ftrace_modify_call(rec->ip, addr, false);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is called early on, and isn't wrapped by
|
||||
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
|
||||
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
|
||||
* just directly poke the text, but it's simpler to just take the lock
|
||||
* ourselves.
|
||||
*/
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
{
|
||||
int out;
|
||||
|
||||
ftrace_arch_code_modify_prepare();
|
||||
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
||||
ftrace_arch_code_modify_post_process();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
|
||||
|
@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
||||
|
||||
ptep = &fixmap_pte[pte_index(addr)];
|
||||
|
||||
if (pgprot_val(prot)) {
|
||||
if (pgprot_val(prot))
|
||||
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
} else {
|
||||
else
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
|
||||
static pte_t *__init get_pte_virt(phys_addr_t pa)
|
||||
|
@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
|
||||
|
||||
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
|
||||
{
|
||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgd;
|
||||
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgdp;
|
||||
}
|
||||
#define p4d_offset_lockless p4d_offset_lockless
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
|
||||
{
|
||||
return p4d_offset_lockless(pgdp, *pgdp, address);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
||||
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
|
||||
{
|
||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
|
||||
return (pud_t *) p4d;
|
||||
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(p4d) + pud_index(address);
|
||||
return (pud_t *) p4dp;
|
||||
}
|
||||
#define pud_offset_lockless pud_offset_lockless
|
||||
|
||||
static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
|
||||
{
|
||||
return pud_offset_lockless(p4dp, *p4dp, address);
|
||||
}
|
||||
#define pud_offset pud_offset
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
|
||||
{
|
||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
|
||||
return (pmd_t *) pud;
|
||||
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(pud) + pmd_index(address);
|
||||
return (pmd_t *) pudp;
|
||||
}
|
||||
#define pmd_offset_lockless pmd_offset_lockless
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
|
||||
{
|
||||
return pmd_offset_lockless(pudp, *pudp, address);
|
||||
}
|
||||
#define pmd_offset pmd_offset
|
||||
|
||||
|
@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
|
||||
void do_dat_exception(struct pt_regs *regs);
|
||||
void do_secure_storage_access(struct pt_regs *regs);
|
||||
void do_non_secure_storage_access(struct pt_regs *regs);
|
||||
void do_secure_storage_violation(struct pt_regs *regs);
|
||||
|
||||
void addressing_exception(struct pt_regs *regs);
|
||||
void data_exception(struct pt_regs *regs);
|
||||
|
@ -39,14 +39,13 @@ void enabled_wait(void)
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Account time spent with enabled wait psw loaded as idle time. */
|
||||
/* XXX seqcount has tracepoints that require RCU */
|
||||
write_seqcount_begin(&idle->seqcount);
|
||||
raw_write_seqcount_begin(&idle->seqcount);
|
||||
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
|
||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||
idle->idle_time += idle_time;
|
||||
idle->idle_count++;
|
||||
account_idle_time(cputime_to_nsecs(idle_time));
|
||||
write_seqcount_end(&idle->seqcount);
|
||||
raw_write_seqcount_end(&idle->seqcount);
|
||||
}
|
||||
NOKPROBE_SYMBOL(enabled_wait);
|
||||
|
||||
|
@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
|
||||
PGM_CHECK_DEFAULT /* 3c */
|
||||
PGM_CHECK(do_secure_storage_access) /* 3d */
|
||||
PGM_CHECK(do_non_secure_storage_access) /* 3e */
|
||||
PGM_CHECK_DEFAULT /* 3f */
|
||||
PGM_CHECK(do_secure_storage_violation) /* 3f */
|
||||
PGM_CHECK(monitor_event_exception) /* 40 */
|
||||
PGM_CHECK_DEFAULT /* 41 */
|
||||
PGM_CHECK_DEFAULT /* 42 */
|
||||
|
@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
|
||||
/*
|
||||
* Make sure that the area behind memory_end is protected
|
||||
*/
|
||||
static void reserve_memory_end(void)
|
||||
static void __init reserve_memory_end(void)
|
||||
{
|
||||
if (memory_end_set)
|
||||
memblock_reserve(memory_end, ULONG_MAX);
|
||||
@ -628,7 +628,7 @@ static void reserve_memory_end(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void reserve_oldmem(void)
|
||||
static void __init reserve_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
@ -640,7 +640,7 @@ static void reserve_oldmem(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void remove_oldmem(void)
|
||||
static void __init remove_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
|
@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_non_secure_storage_access);
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Either KVM messed up the secure guest mapping or the same
|
||||
* page is mapped into multiple secure guests.
|
||||
*
|
||||
* This exception is only triggered when a guest 2 is running
|
||||
* and can therefore never occur in kernel context.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"Secure storage violation in task: %s, pid %d\n",
|
||||
current->comm, current->pid);
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
void do_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
#endif
|
||||
|
@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
|
||||
int zpci_disable_device(struct zpci_dev *zdev)
|
||||
{
|
||||
zpci_dma_exit_device(zdev);
|
||||
/*
|
||||
* The zPCI function may already be disabled by the platform, this is
|
||||
* detected in clp_disable_fh() which becomes a no-op.
|
||||
*/
|
||||
return clp_disable_fh(zdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||
|
@ -143,6 +143,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
zpci_remove_device(zdev);
|
||||
}
|
||||
|
||||
zdev->fh = ccdf->fh;
|
||||
zpci_disable_device(zdev);
|
||||
zdev->state = ZPCI_FN_STATE_STANDBY;
|
||||
if (!clp_get_state(ccdf->fid, &state) &&
|
||||
state == ZPCI_FN_STATE_RESERVED) {
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/percpu.h>
|
||||
|
@ -370,7 +370,6 @@ syscall_trace_entry:
|
||||
nop
|
||||
cmp/eq #-1, r0
|
||||
bt syscall_exit
|
||||
mov.l r0, @(OFF_R0,r15) ! Save return value
|
||||
! Reload R0-R4 from kernel stack, where the
|
||||
! parent may have modified them using
|
||||
! ptrace(POKEUSR). (Note that R0-R2 are
|
||||
|
@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs))
|
||||
/*
|
||||
* Tracing decided this syscall should not happen.
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in regs->regs[0].
|
||||
*/
|
||||
ret = -1L;
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
regs->regs[0] = -ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (secure_computing() == -1)
|
||||
return -1;
|
||||
@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
|
||||
regs->regs[6], regs->regs[7]);
|
||||
|
||||
return ret ?: regs->regs[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
|
@ -44,6 +44,8 @@ KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
KBUILD_CFLAGS += -include $(srctree)/$(src)/hidden.h
|
||||
# Disable relocation relaxation in case the link is not PIE.
|
||||
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
@ -62,6 +62,7 @@ CONFIG_KVM=y
|
||||
CONFIG_KVM_INTEL=y
|
||||
CONFIG_KVM_AMD=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
@ -184,6 +185,14 @@ CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BT=y
|
||||
CONFIG_BT_RFCOMM=y
|
||||
CONFIG_BT_RFCOMM_TTY=y
|
||||
CONFIG_BT_HIDP=y
|
||||
CONFIG_BT_HCIBTSDIO=y
|
||||
CONFIG_BT_HCIUART=y
|
||||
CONFIG_BT_HCIUART_LL=y
|
||||
CONFIG_BT_HCIUART_QCA=y
|
||||
CONFIG_CFG80211=y
|
||||
# CONFIG_CFG80211_DEFAULT_PS is not set
|
||||
# CONFIG_CFG80211_CRDA_SUPPORT is not set
|
||||
@ -330,6 +339,7 @@ CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NINTENDO=y
|
||||
CONFIG_HID_PLANTRONICS=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_SONY_FF=y
|
||||
CONFIG_HID_STEAM=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
@ -350,6 +360,9 @@ CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_USB_CONFIGFS_F_HID=y
|
||||
CONFIG_TYPEC=y
|
||||
CONFIG_TYPEC_TCPM=y
|
||||
CONFIG_TYPEC_TCPCI=y
|
||||
CONFIG_MMC=y
|
||||
# CONFIG_PWRSEQ_EMMC is not set
|
||||
# CONFIG_PWRSEQ_SIMPLE is not set
|
||||
@ -360,6 +373,7 @@ CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_CLASS_FLASH=y
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
CONFIG_LEDS_TRIGGER_TIMER=y
|
||||
CONFIG_EDAC=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_DMABUF_HEAPS=y
|
||||
CONFIG_DMABUF_HEAPS_SYSTEM=y
|
||||
@ -376,6 +390,7 @@ CONFIG_PM_DEVFREQ_EVENT=y
|
||||
CONFIG_IIO=y
|
||||
CONFIG_IIO_BUFFER=y
|
||||
CONFIG_IIO_TRIGGER=y
|
||||
CONFIG_RAS=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_ANDROID_BINDERFS=y
|
||||
|
@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
# CONFIG_64BIT is not set
|
||||
CONFIG_SMP=y
|
||||
CONFIG_X86_GENERIC=y
|
||||
CONFIG_HPET_TIMER=y
|
||||
@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
old_regs = set_irq_regs(regs);
|
||||
|
||||
instrumentation_begin();
|
||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
|
||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||
instrumentation_begin();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
|
@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
|
||||
* rdx: Function argument (can be NULL if none)
|
||||
*/
|
||||
SYM_FUNC_START(asm_call_on_stack)
|
||||
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
|
||||
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
|
||||
/*
|
||||
* Save the frame pointer unconditionally. This allows the ORC
|
||||
* unwinder to handle the stack switch.
|
||||
|
@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
|
||||
extern int x86_acpi_numa_init(void);
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
|
@ -60,12 +60,26 @@
|
||||
#define FRAME_END "pop %" _ASM_BP "\n"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"lea 1(%rsp), %rbp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs + 1;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_X86_64 */
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"movl %esp, %ebp\n\t" \
|
||||
"andl $0x7fffffff, %ebp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs & 0x7fffffff;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
@ -83,6 +97,11 @@
|
||||
|
||||
#define ENCODE_FRAME_POINTER
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define FRAME_BEGIN
|
||||
|
@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
|
||||
instrumentation_begin(); \
|
||||
irq_enter_rcu(); \
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
run_on_irqstack_cond(__##func, regs, regs); \
|
||||
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||
irq_exit_rcu(); \
|
||||
instrumentation_end(); \
|
||||
irqentry_exit(regs, state); \
|
||||
|
@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
|
||||
return __this_cpu_read(irq_count) != -1;
|
||||
}
|
||||
|
||||
void asm_call_on_stack(void *sp, void *func, void *arg);
|
||||
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
|
||||
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs);
|
||||
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc);
|
||||
|
||||
static __always_inline void __run_on_irqstack(void *func, void *arg)
|
||||
static __always_inline void __run_on_irqstack(void (*func)(void))
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_on_stack(tos - 8, func, arg);
|
||||
asm_call_on_stack(tos - 8, func, NULL);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_sysvec_on_stack(tos - 8, func, regs);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_irq_on_stack(tos - 8, func, desc);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
static inline bool irqstack_active(void) { return false; }
|
||||
static inline void __run_on_irqstack(void *func, void *arg) { }
|
||||
static inline void __run_on_irqstack(void (*func)(void)) { }
|
||||
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs) { }
|
||||
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc) { }
|
||||
#endif /* !CONFIG_X86_64 */
|
||||
|
||||
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||
@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||
return !user_mode(regs) && !irqstack_active();
|
||||
}
|
||||
|
||||
static __always_inline void run_on_irqstack_cond(void *func, void *arg,
|
||||
|
||||
static __always_inline void run_on_irqstack_cond(void (*func)(void),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
void (*__func)(void *arg) = func;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_on_irqstack(__func, arg);
|
||||
__run_on_irqstack(func);
|
||||
else
|
||||
__func(arg);
|
||||
func();
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_sysvec_on_irqstack(func, regs);
|
||||
else
|
||||
func(regs);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_irq_on_irqstack(func, desc);
|
||||
else
|
||||
func(desc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
|
||||
legacy_pic->init(0);
|
||||
legacy_pic->make_irq(0);
|
||||
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
||||
legacy_pic->unmask(0);
|
||||
|
||||
unlock_ExtINT_logic();
|
||||
|
||||
|
@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_64))
|
||||
run_on_irqstack_cond(desc->handle_irq, desc, regs);
|
||||
run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
|
||||
else
|
||||
__handle_irq(desc, regs);
|
||||
}
|
||||
|
@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
||||
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
run_on_irqstack_cond(__do_softirq, NULL, NULL);
|
||||
run_on_irqstack_cond(__do_softirq, NULL);
|
||||
}
|
||||
|
@ -652,6 +652,7 @@ static void __init kvm_guest_init(void)
|
||||
}
|
||||
|
||||
if (pv_tlb_flush_supported()) {
|
||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
||||
pr_info("KVM setup pv remote TLB flush\n");
|
||||
}
|
||||
@ -764,14 +765,6 @@ static __init int activate_jump_labels(void)
|
||||
}
|
||||
arch_initcall(activate_jump_labels);
|
||||
|
||||
static void kvm_free_pv_cpu_mask(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
|
||||
}
|
||||
|
||||
static __init int kvm_alloc_cpumask(void)
|
||||
{
|
||||
int cpu;
|
||||
@ -790,20 +783,11 @@ static __init int kvm_alloc_cpumask(void)
|
||||
|
||||
if (alloc)
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!zalloc_cpumask_var_node(
|
||||
per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu))) {
|
||||
goto zalloc_cpumask_fail;
|
||||
}
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
|
||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||
return 0;
|
||||
|
||||
zalloc_cpumask_fail:
|
||||
kvm_free_pv_cpu_mask();
|
||||
return -ENOMEM;
|
||||
}
|
||||
arch_initcall(kvm_alloc_cpumask);
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/io_bitmap.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
#include "process.h"
|
||||
|
||||
@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
fork_frame = container_of(childregs, struct fork_frame, regs);
|
||||
frame = &fork_frame->frame;
|
||||
|
||||
frame->bp = 0;
|
||||
frame->bp = encode_frame_pointer(childregs);
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
p->thread.io_bitmap = NULL;
|
||||
|
@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int invd_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
/* Treat an INVD instruction as a NOP and just skip it. */
|
||||
return kvm_skip_emulated_instruction(&svm->vcpu);
|
||||
}
|
||||
|
||||
static int invlpg_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
|
||||
@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||
[SVM_EXIT_RDPMC] = rdpmc_interception,
|
||||
[SVM_EXIT_CPUID] = cpuid_interception,
|
||||
[SVM_EXIT_IRET] = iret_interception,
|
||||
[SVM_EXIT_INVD] = emulate_on_interception,
|
||||
[SVM_EXIT_INVD] = invd_interception,
|
||||
[SVM_EXIT_PAUSE] = pause_interception,
|
||||
[SVM_EXIT_HLT] = halt_interception,
|
||||
[SVM_EXIT_INVLPG] = invlpg_interception,
|
||||
|
@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
|
||||
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
|
||||
#endif
|
||||
|
||||
extern bool __read_mostly allow_smaller_maxphyaddr;
|
||||
module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
|
||||
|
||||
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
|
||||
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
|
||||
#define KVM_VM_CR0_ALWAYS_ON \
|
||||
@ -4803,6 +4806,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||
* EPT will cause page fault only if we need to
|
||||
* detect illegal GPAs.
|
||||
*/
|
||||
WARN_ON_ONCE(!allow_smaller_maxphyaddr);
|
||||
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
|
||||
return 1;
|
||||
} else
|
||||
@ -5331,7 +5335,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||
* would also use advanced VM-exit information for EPT violations to
|
||||
* reconstruct the page fault error code.
|
||||
*/
|
||||
if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
||||
if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
||||
return kvm_emulate_instruction(vcpu, 0);
|
||||
|
||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||
@ -8305,11 +8309,12 @@ static int __init vmx_init(void)
|
||||
vmx_check_vmcs12_offsets();
|
||||
|
||||
/*
|
||||
* Intel processors don't have problems with
|
||||
* GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
|
||||
* it for VMX by default
|
||||
* Shadow paging doesn't have a (further) performance penalty
|
||||
* from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
|
||||
* by default
|
||||
*/
|
||||
allow_smaller_maxphyaddr = true;
|
||||
if (!enable_ept)
|
||||
allow_smaller_maxphyaddr = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -552,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||
|
||||
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
||||
if (!enable_ept)
|
||||
return true;
|
||||
|
||||
return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
||||
}
|
||||
|
||||
void dump_vmcs(void);
|
||||
|
@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
|
||||
u64 __read_mostly host_efer;
|
||||
EXPORT_SYMBOL_GPL(host_efer);
|
||||
|
||||
bool __read_mostly allow_smaller_maxphyaddr;
|
||||
bool __read_mostly allow_smaller_maxphyaddr = 0;
|
||||
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
|
||||
|
||||
static u64 __read_mostly host_xss;
|
||||
@ -976,6 +976,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP;
|
||||
unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
|
||||
|
||||
if (kvm_valid_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
if (kvm_x86_ops.set_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
|
||||
if (((cr4 ^ old_cr4) & pdptr_bits) ||
|
||||
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
@ -3223,9 +3224,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_IA32_POWER_CTL:
|
||||
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
|
||||
break;
|
||||
case MSR_IA32_TSC:
|
||||
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
|
||||
case MSR_IA32_TSC: {
|
||||
/*
|
||||
* Intel SDM states that MSR_IA32_TSC read adds the TSC offset
|
||||
* even when not intercepted. AMD manual doesn't explicitly
|
||||
* state this but appears to behave the same.
|
||||
*
|
||||
* On userspace reads and writes, however, we unconditionally
|
||||
* operate L1's TSC value to ensure backwards-compatible
|
||||
* behavior for migration.
|
||||
*/
|
||||
u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
|
||||
vcpu->arch.tsc_offset;
|
||||
|
||||
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
|
||||
break;
|
||||
}
|
||||
case MSR_MTRRcap:
|
||||
case 0x200 ... 0x2ff:
|
||||
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
|
||||
|
@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||
*/
|
||||
if (size < 8) {
|
||||
if (!IS_ALIGNED(dest, 4) || size != 4)
|
||||
clean_cache_range(dst, 1);
|
||||
clean_cache_range(dst, size);
|
||||
} else {
|
||||
if (!IS_ALIGNED(dest, 8)) {
|
||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||
|
@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
|
||||
/**
|
||||
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||
* @disk: the gendisk of the queue to configure
|
||||
* @model: the zoned model to set
|
||||
*
|
||||
* Set the zoned model of the request queue of @disk according to @model.
|
||||
* When @model is BLK_ZONED_HM (host managed), this should be called only
|
||||
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
|
||||
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
|
||||
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
|
||||
* on the disk.
|
||||
*/
|
||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
{
|
||||
switch (model) {
|
||||
case BLK_ZONED_HM:
|
||||
/*
|
||||
* Host managed devices are supported only if
|
||||
* CONFIG_BLK_DEV_ZONED is enabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
|
||||
break;
|
||||
case BLK_ZONED_HA:
|
||||
/*
|
||||
* Host aware devices can be treated either as regular block
|
||||
* devices (similar to drive managed devices) or as zoned block
|
||||
* devices to take advantage of the zone command set, similarly
|
||||
* to host managed devices. We try the latter if there are no
|
||||
* partitions and zoned block device support is enabled, else
|
||||
* we do nothing special as far as the block layer is concerned.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||
disk_has_partitions(disk))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
case BLK_ZONED_NONE:
|
||||
default:
|
||||
if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
disk->queue->limits.zoned = model;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||
|
||||
static int __init blk_settings_init(void)
|
||||
{
|
||||
blk_max_low_pfn = max_low_pfn - 1;
|
||||
|
@ -2,7 +2,7 @@ ARCH=arm64
|
||||
|
||||
CLANG_TRIPLE=aarch64-linux-gnu-
|
||||
CROSS_COMPILE=aarch64-linux-androidkernel-
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
|
||||
|
||||
MAKE_GOALS="
|
||||
Image
|
||||
|
@ -2,7 +2,7 @@ ARCH=arm
|
||||
|
||||
CLANG_TRIPLE=arm-linux-gnueabi-
|
||||
CROSS_COMPILE=arm-linux-androidkernel-
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
|
||||
|
||||
MAKE_GOALS="
|
||||
Image
|
||||
|
@ -1,10 +1,8 @@
|
||||
BRANCH=android-mainline
|
||||
KMI_GENERATION=0
|
||||
|
||||
CC=clang
|
||||
LD=ld.lld
|
||||
NM=llvm-nm
|
||||
OBJCOPY=llvm-objcopy
|
||||
LLVM=1
|
||||
LLVM_IAS=1
|
||||
DEPMOD=depmod
|
||||
DTC=dtc
|
||||
CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r383902/bin
|
||||
|
@ -2,7 +2,7 @@ ARCH=x86_64
|
||||
|
||||
CLANG_TRIPLE=x86_64-linux-gnu-
|
||||
CROSS_COMPILE=x86_64-linux-androidkernel-
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
|
||||
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
|
||||
|
||||
MAKE_GOALS="
|
||||
bzImage
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user