Merge c84e1efae0
("Merge tag 'asm-generic-fixes-5.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic") into android-mainline
Steps on the way to 5.10-rc5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I644783003a83186a34cdbb753aa492f4350f49ee
This commit is contained in:
commit
b19651bfcc
1
.mailmap
1
.mailmap
@ -290,6 +290,7 @@ Santosh Shilimkar <ssantosh@kernel.org>
|
||||
Sarangdhar Joshi <spjoshi@codeaurora.org>
|
||||
Sascha Hauer <s.hauer@pengutronix.de>
|
||||
S.Çağlar Onur <caglar@pardus.org.tr>
|
||||
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
|
||||
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
|
||||
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
|
||||
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
||||
|
@ -76,6 +76,12 @@ properties:
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
wifi-2.4ghz-coexistence:
|
||||
type: boolean
|
||||
description: >
|
||||
Should the pixel frequencies in the WiFi frequencies range be
|
||||
avoided?
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@ -149,11 +149,11 @@ vidtv_psi.[ch]
|
||||
Because the generator is implemented in a separate file, it can be
|
||||
reused elsewhere in the media subsystem.
|
||||
|
||||
Currently vidtv supports working with 3 PSI tables: PAT, PMT and
|
||||
SDT.
|
||||
Currently vidtv supports working with 5 PSI tables: PAT, PMT,
|
||||
SDT, NIT and EIT.
|
||||
|
||||
The specification for PAT and PMT can be found in *ISO 13818-1:
|
||||
Systems*, while the specification for the SDT can be found in *ETSI
|
||||
Systems*, while the specification for the SDT, NIT, EIT can be found in *ETSI
|
||||
EN 300 468: Specification for Service Information (SI) in DVB
|
||||
systems*.
|
||||
|
||||
@ -197,6 +197,8 @@ vidtv_channel.[ch]
|
||||
|
||||
#. Their programs will be concatenated to populate the PAT
|
||||
|
||||
#. Their events will be concatenated to populate the EIT
|
||||
|
||||
#. For each program in the PAT, a PMT section will be created
|
||||
|
||||
#. The PMT section for a channel will be assigned its streams.
|
||||
@ -256,6 +258,42 @@ Using dvb-fe-tool
|
||||
The first step to check whether the demod loaded successfully is to run::
|
||||
|
||||
$ dvb-fe-tool
|
||||
Device Dummy demod for DVB-T/T2/C/S/S2 (/dev/dvb/adapter0/frontend0) capabilities:
|
||||
CAN_FEC_1_2
|
||||
CAN_FEC_2_3
|
||||
CAN_FEC_3_4
|
||||
CAN_FEC_4_5
|
||||
CAN_FEC_5_6
|
||||
CAN_FEC_6_7
|
||||
CAN_FEC_7_8
|
||||
CAN_FEC_8_9
|
||||
CAN_FEC_AUTO
|
||||
CAN_GUARD_INTERVAL_AUTO
|
||||
CAN_HIERARCHY_AUTO
|
||||
CAN_INVERSION_AUTO
|
||||
CAN_QAM_16
|
||||
CAN_QAM_32
|
||||
CAN_QAM_64
|
||||
CAN_QAM_128
|
||||
CAN_QAM_256
|
||||
CAN_QAM_AUTO
|
||||
CAN_QPSK
|
||||
CAN_TRANSMISSION_MODE_AUTO
|
||||
DVB API Version 5.11, Current v5 delivery system: DVBC/ANNEX_A
|
||||
Supported delivery systems:
|
||||
DVBT
|
||||
DVBT2
|
||||
[DVBC/ANNEX_A]
|
||||
DVBS
|
||||
DVBS2
|
||||
Frequency range for the current standard:
|
||||
From: 51.0 MHz
|
||||
To: 2.15 GHz
|
||||
Step: 62.5 kHz
|
||||
Tolerance: 29.5 MHz
|
||||
Symbol rate ranges for the current standard:
|
||||
From: 1.00 MBauds
|
||||
To: 45.0 MBauds
|
||||
|
||||
This should return what is currently set up at the demod struct, i.e.::
|
||||
|
||||
@ -314,7 +352,7 @@ For this, one should provide a configuration file known as a 'scan file',
|
||||
here's an example::
|
||||
|
||||
[Channel]
|
||||
FREQUENCY = 330000000
|
||||
FREQUENCY = 474000000
|
||||
MODULATION = QAM/AUTO
|
||||
SYMBOL_RATE = 6940000
|
||||
INNER_FEC = AUTO
|
||||
@ -335,6 +373,14 @@ You can browse scan tables online here: `dvb-scan-tables
|
||||
Assuming this channel is named 'channel.conf', you can then run::
|
||||
|
||||
$ dvbv5-scan channel.conf
|
||||
dvbv5-scan ~/vidtv.conf
|
||||
ERROR command BANDWIDTH_HZ (5) not found during retrieve
|
||||
Cannot calc frequency shift. Either bandwidth/symbol-rate is unavailable (yet).
|
||||
Scanning frequency #1 330000000
|
||||
(0x00) Signal= -68.00dBm
|
||||
Scanning frequency #2 474000000
|
||||
Lock (0x1f) Signal= -34.45dBm C/N= 33.74dB UCB= 0
|
||||
Service Beethoven, provider LinuxTV.org: digital television
|
||||
|
||||
For more information on dvb-scan, check its documentation online here:
|
||||
`dvb-scan Documentation <https://www.linuxtv.org/wiki/index.php/Dvbscan>`_.
|
||||
@ -344,23 +390,38 @@ Using dvb-zap
|
||||
|
||||
dvbv5-zap is a command line tool that can be used to record MPEG-TS to disk. The
|
||||
typical use is to tune into a channel and put it into record mode. The example
|
||||
below - which is taken from the documentation - illustrates that::
|
||||
below - which is taken from the documentation - illustrates that\ [1]_::
|
||||
|
||||
$ dvbv5-zap -c dvb_channel.conf "trilhas sonoras" -r
|
||||
using demux '/dev/dvb/adapter0/demux0'
|
||||
$ dvbv5-zap -c dvb_channel.conf "beethoven" -o music.ts -P -t 10
|
||||
using demux 'dvb0.demux0'
|
||||
reading channels from file 'dvb_channel.conf'
|
||||
service has pid type 05: 204
|
||||
tuning to 573000000 Hz
|
||||
audio pid 104
|
||||
dvb_set_pesfilter 104
|
||||
Lock (0x1f) Quality= Good Signal= 100.00% C/N= -13.80dB UCB= 70 postBER= 3.14x10^-3 PER= 0
|
||||
DVR interface '/dev/dvb/adapter0/dvr0' can now be opened
|
||||
tuning to 474000000 Hz
|
||||
pass all PID's to TS
|
||||
dvb_set_pesfilter 8192
|
||||
dvb_dev_set_bufsize: buffer set to 6160384
|
||||
Lock (0x1f) Quality= Good Signal= -34.66dBm C/N= 33.41dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
|
||||
Lock (0x1f) Quality= Good Signal= -34.57dBm C/N= 33.46dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
|
||||
Record to file 'music.ts' started
|
||||
received 24587768 bytes (2401 Kbytes/sec)
|
||||
Lock (0x1f) Quality= Good Signal= -34.42dBm C/N= 33.89dB UCB= 0 postBER= 0 preBER= 2.44x10^-3 PER= 0
|
||||
|
||||
The channel can be watched by playing the contents of the DVR interface, with
|
||||
some player that recognizes the MPEG-TS format, such as *mplayer* or *vlc*.
|
||||
.. [1] In this example, it records 10 seconds with all program ID's stored
|
||||
at the music.ts file.
|
||||
|
||||
|
||||
The channel can be watched by playing the contents of the stream with some
|
||||
player that recognizes the MPEG-TS format, such as ``mplayer`` or ``vlc``.
|
||||
|
||||
By playing the contents of the stream one can visually inspect the workings of
|
||||
vidtv, e.g.::
|
||||
vidtv, e.g., to play a recorded TS file with::
|
||||
|
||||
$ mplayer music.ts
|
||||
|
||||
or, alternatively, running this command on one terminal::
|
||||
|
||||
$ dvbv5-zap -c dvb_channel.conf "beethoven" -P -r &
|
||||
|
||||
And, on a second terminal, playing the contents from DVR interface with::
|
||||
|
||||
$ mplayer /dev/dvb/adapter0/dvr0
|
||||
|
||||
@ -423,3 +484,30 @@ A nice addition is to simulate some noise when the signal quality is bad by:
|
||||
- Updating the error statistics accordingly (e.g. BER, etc).
|
||||
|
||||
- Simulating some noise in the encoded data.
|
||||
|
||||
Functions and structs used within vidtv
|
||||
---------------------------------------
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_bridge.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_channel.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_demod.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_encoder.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_mux.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_pes.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_psi.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_s302m.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_ts.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.h
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_common.c
|
||||
|
||||
.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.c
|
||||
|
@ -254,6 +254,32 @@ you will have done run-time testing specific to your change, but at a
|
||||
minimum, your changes should survive an ``allyesconfig`` and an
|
||||
``allmodconfig`` build without new warnings or failures.
|
||||
|
||||
Q: How do I post corresponding changes to user space components?
|
||||
----------------------------------------------------------------
|
||||
A: User space code exercising kernel features should be posted
|
||||
alongside kernel patches. This gives reviewers a chance to see
|
||||
how any new interface is used and how well it works.
|
||||
|
||||
When user space tools reside in the kernel repo itself all changes
|
||||
should generally come as one series. If series becomes too large
|
||||
or the user space project is not reviewed on netdev include a link
|
||||
to a public repo where user space patches can be seen.
|
||||
|
||||
In case user space tooling lives in a separate repository but is
|
||||
reviewed on netdev (e.g. patches to `iproute2` tools) kernel and
|
||||
user space patches should form separate series (threads) when posted
|
||||
to the mailing list, e.g.::
|
||||
|
||||
[PATCH net-next 0/3] net: some feature cover letter
|
||||
└─ [PATCH net-next 1/3] net: some feature prep
|
||||
└─ [PATCH net-next 2/3] net: some feature do it
|
||||
└─ [PATCH net-next 3/3] selftest: net: some feature
|
||||
|
||||
[PATCH iproute2-next] ip: add support for some feature
|
||||
|
||||
Posting as one thread is discouraged because it confuses patchwork
|
||||
(as of patchwork 2.2.2).
|
||||
|
||||
Q: Any other tips to help ensure my net/net-next patch gets OK'd?
|
||||
-----------------------------------------------------------------
|
||||
A: Attention to detail. Re-read your own work as if you were the
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -1995,7 +1995,6 @@ N: lpc18xx
|
||||
|
||||
ARM/LPC32XX SOC SUPPORT
|
||||
M: Vladimir Zapolskiy <vz@mleia.com>
|
||||
M: Sylvain Lemieux <slemieux.tyco@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
|
||||
@ -3528,11 +3527,12 @@ BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
|
||||
M: Arend van Spriel <arend.vanspriel@broadcom.com>
|
||||
M: Franky Lin <franky.lin@broadcom.com>
|
||||
M: Hante Meuleman <hante.meuleman@broadcom.com>
|
||||
M: Chi-Hsien Lin <chi-hsien.lin@cypress.com>
|
||||
M: Wright Feng <wright.feng@cypress.com>
|
||||
M: Chi-hsien Lin <chi-hsien.lin@infineon.com>
|
||||
M: Wright Feng <wright.feng@infineon.com>
|
||||
M: Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: brcm80211-dev-list.pdl@broadcom.com
|
||||
L: brcm80211-dev-list@cypress.com
|
||||
L: SHA-cyfmac-dev-list@infineon.com
|
||||
S: Supported
|
||||
F: drivers/net/wireless/broadcom/brcm80211/
|
||||
|
||||
@ -9653,6 +9653,7 @@ F: Documentation/virt/kvm/s390*
|
||||
F: arch/s390/include/asm/gmap.h
|
||||
F: arch/s390/include/asm/kvm*
|
||||
F: arch/s390/include/uapi/asm/kvm*
|
||||
F: arch/s390/kernel/uv.c
|
||||
F: arch/s390/kvm/
|
||||
F: arch/s390/mm/gmap.c
|
||||
F: tools/testing/selftests/kvm/*/s390x/
|
||||
@ -13174,7 +13175,9 @@ M: Jesper Dangaard Brouer <hawk@kernel.org>
|
||||
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/networking/page_pool.rst
|
||||
F: include/net/page_pool.h
|
||||
F: include/trace/events/page_pool.h
|
||||
F: net/core/page_pool.c
|
||||
|
||||
PANASONIC LAPTOP ACPI EXTRAS DRIVER
|
||||
@ -14816,7 +14819,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
|
||||
F: drivers/net/wireless/realtek/rtlwifi/
|
||||
|
||||
REALTEK WIRELESS DRIVER (rtw88)
|
||||
M: Yan-Hsuan Chuang <yhchuang@realtek.com>
|
||||
M: Yan-Hsuan Chuang <tony0620emma@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/realtek/rtw88/
|
||||
@ -15789,9 +15792,8 @@ F: drivers/slimbus/
|
||||
F: include/linux/slimbus.h
|
||||
|
||||
SFC NETWORK DRIVER
|
||||
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
|
||||
M: Edward Cree <ecree@solarflare.com>
|
||||
M: Martin Habets <mhabets@solarflare.com>
|
||||
M: Edward Cree <ecree.xilinx@gmail.com>
|
||||
M: Martin Habets <habetsm.xilinx@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/sfc/
|
||||
|
@ -134,8 +134,10 @@
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#else
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
|
||||
/**************************************************************************
|
||||
|
@ -521,7 +521,7 @@ target-module@100000 { /* 0x4a100000, ap 3 04.0 */
|
||||
ranges = <0x0 0x100000 0x8000>;
|
||||
|
||||
mac_sw: switch@0 {
|
||||
compatible = "ti,am4372-cpsw","ti,cpsw-switch";
|
||||
compatible = "ti,am4372-cpsw-switch", "ti,cpsw-switch";
|
||||
reg = <0x0 0x4000>;
|
||||
ranges = <0 0 0x4000>;
|
||||
clocks = <&cpsw_125mhz_gclk>, <&dpll_clksel_mac_clk>;
|
||||
|
@ -32,8 +32,8 @@ m_can0: mcan@1a00 {
|
||||
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "int0", "int1";
|
||||
clocks = <&mcan_clk>, <&l3_iclk_div>;
|
||||
clock-names = "cclk", "hclk";
|
||||
clocks = <&l3_iclk_div>, <&mcan_clk>;
|
||||
clock-names = "hclk", "cclk";
|
||||
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
||||
};
|
||||
};
|
||||
|
@ -75,6 +75,8 @@
|
||||
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
|
||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
|
||||
/*
|
||||
* PMD_SHIFT determines the size of the area a second-level page table can map
|
||||
* PGDIR_SHIFT determines what a third-level page table entry can map
|
||||
|
@ -25,6 +25,8 @@
|
||||
#define PTE_HWTABLE_OFF (0)
|
||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines the size a top-level page table entry can map.
|
||||
*/
|
||||
|
@ -7,7 +7,6 @@ config ARCH_OMAP2
|
||||
depends on ARCH_MULTI_V6
|
||||
select ARCH_OMAP2PLUS
|
||||
select CPU_V6
|
||||
select PM_GENERIC_DOMAINS if PM
|
||||
select SOC_HAS_OMAP2_SDRC
|
||||
|
||||
config ARCH_OMAP3
|
||||
@ -106,6 +105,8 @@ config ARCH_OMAP2PLUS
|
||||
select OMAP_DM_TIMER
|
||||
select OMAP_GPMC
|
||||
select PINCTRL
|
||||
select PM_GENERIC_DOMAINS if PM
|
||||
select PM_GENERIC_DOMAINS_OF if PM
|
||||
select RESET_CONTROLLER
|
||||
select SOC_BUS
|
||||
select TI_SYSC
|
||||
|
@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
if (mpuss_can_lose_context) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error) {
|
||||
omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
|
||||
goto cpu_cluster_pm_out;
|
||||
index = 0;
|
||||
cx = state_ptr + index;
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
|
||||
mpuss_can_lose_context = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
||||
cpu_done[dev->cpu] = true;
|
||||
|
||||
cpu_cluster_pm_out:
|
||||
/* Wakeup CPU1 only if it is not offlined */
|
||||
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
|
||||
|
||||
|
@ -5,20 +5,20 @@
|
||||
usb {
|
||||
compatible = "simple-bus";
|
||||
dma-ranges;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x0 0x0 0x68500000 0x00400000>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
|
||||
|
||||
usbphy0: usb-phy@0 {
|
||||
compatible = "brcm,sr-usb-combo-phy";
|
||||
reg = <0x00000000 0x100>;
|
||||
reg = <0x0 0x00000000 0x0 0x100>;
|
||||
#phy-cells = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
xhci0: usb@1000 {
|
||||
compatible = "generic-xhci";
|
||||
reg = <0x00001000 0x1000>;
|
||||
reg = <0x0 0x00001000 0x0 0x1000>;
|
||||
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usbphy0 1>, <&usbphy0 0>;
|
||||
phy-names = "phy0", "phy1";
|
||||
@ -28,7 +28,7 @@ xhci0: usb@1000 {
|
||||
|
||||
bdc0: usb@2000 {
|
||||
compatible = "brcm,bdc-v0.16";
|
||||
reg = <0x00002000 0x1000>;
|
||||
reg = <0x0 0x00002000 0x0 0x1000>;
|
||||
interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usbphy0 0>, <&usbphy0 1>;
|
||||
phy-names = "phy0", "phy1";
|
||||
@ -38,21 +38,21 @@ bdc0: usb@2000 {
|
||||
|
||||
usbphy1: usb-phy@10000 {
|
||||
compatible = "brcm,sr-usb-combo-phy";
|
||||
reg = <0x00010000 0x100>;
|
||||
reg = <0x0 0x00010000 0x0 0x100>;
|
||||
#phy-cells = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
usbphy2: usb-phy@20000 {
|
||||
compatible = "brcm,sr-usb-hs-phy";
|
||||
reg = <0x00020000 0x100>;
|
||||
reg = <0x0 0x00020000 0x0 0x100>;
|
||||
#phy-cells = <0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
xhci1: usb@11000 {
|
||||
compatible = "generic-xhci";
|
||||
reg = <0x00011000 0x1000>;
|
||||
reg = <0x0 0x00011000 0x0 0x1000>;
|
||||
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
|
||||
phy-names = "phy0", "phy1", "phy2";
|
||||
@ -62,7 +62,7 @@ xhci1: usb@11000 {
|
||||
|
||||
bdc1: usb@21000 {
|
||||
compatible = "brcm,bdc-v0.16";
|
||||
reg = <0x00021000 0x1000>;
|
||||
reg = <0x0 0x00021000 0x0 0x1000>;
|
||||
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usbphy2>;
|
||||
phy-names = "phy0";
|
||||
|
@ -10,18 +10,6 @@ / {
|
||||
model = "NVIDIA Jetson TX2 Developer Kit";
|
||||
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
|
||||
|
||||
aconnect {
|
||||
status = "okay";
|
||||
|
||||
dma-controller@2930000 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
interrupt-controller@2a40000 {
|
||||
status = "okay";
|
||||
};
|
||||
};
|
||||
|
||||
i2c@3160000 {
|
||||
power-monitor@42 {
|
||||
compatible = "ti,ina3221";
|
||||
|
@ -54,7 +54,7 @@ memory-controller@2c00000 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
serial@c280000 {
|
||||
serial@3100000 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -1161,7 +1161,7 @@ p2u_hsio_11: phy@3f40000 {
|
||||
|
||||
hsp_aon: hsp@c150000 {
|
||||
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
|
||||
reg = <0x0c150000 0xa0000>;
|
||||
reg = <0x0c150000 0x90000>;
|
||||
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
@ -1663,16 +1663,6 @@ vdd_usb_vbus: regulator@9 {
|
||||
vin-supply = <&vdd_5v0_sys>;
|
||||
};
|
||||
|
||||
vdd_usb_vbus_otg: regulator@11 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "USB_VBUS_EN0";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
vin-supply = <&vdd_5v0_sys>;
|
||||
};
|
||||
|
||||
vdd_hdmi: regulator@10 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "VDD_HDMI_5V0";
|
||||
@ -1712,4 +1702,14 @@ vdd_cam_1v8: regulator@13 {
|
||||
enable-active-high;
|
||||
vin-supply = <&vdd_3v3_sys>;
|
||||
};
|
||||
|
||||
vdd_usb_vbus_otg: regulator@14 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "USB_VBUS_EN0";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
vin-supply = <&vdd_5v0_sys>;
|
||||
};
|
||||
};
|
||||
|
@ -8,7 +8,7 @@ / {
|
||||
compatible = "nvidia,tegra234-vdk", "nvidia,tegra234";
|
||||
|
||||
aliases {
|
||||
sdhci3 = "/cbb@0/sdhci@3460000";
|
||||
mmc3 = "/bus@0/mmc@3460000";
|
||||
serial0 = &uarta;
|
||||
};
|
||||
|
||||
@ -17,12 +17,12 @@ chosen {
|
||||
stdout-path = "serial0:115200n8";
|
||||
};
|
||||
|
||||
cbb@0 {
|
||||
bus@0 {
|
||||
serial@3100000 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
sdhci@3460000 {
|
||||
mmc@3460000 {
|
||||
status = "okay";
|
||||
bus-width = <8>;
|
||||
non-removable;
|
||||
|
@ -179,22 +179,22 @@ smem {
|
||||
};
|
||||
|
||||
soc: soc {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0 0 0 0xffffffff>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges = <0 0 0 0 0x0 0xffffffff>;
|
||||
dma-ranges;
|
||||
compatible = "simple-bus";
|
||||
|
||||
prng: qrng@e1000 {
|
||||
compatible = "qcom,prng-ee";
|
||||
reg = <0xe3000 0x1000>;
|
||||
reg = <0x0 0xe3000 0x0 0x1000>;
|
||||
clocks = <&gcc GCC_PRNG_AHB_CLK>;
|
||||
clock-names = "core";
|
||||
};
|
||||
|
||||
cryptobam: dma@704000 {
|
||||
compatible = "qcom,bam-v1.7.0";
|
||||
reg = <0x00704000 0x20000>;
|
||||
reg = <0x0 0x00704000 0x0 0x20000>;
|
||||
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
|
||||
clock-names = "bam_clk";
|
||||
@ -206,7 +206,7 @@ cryptobam: dma@704000 {
|
||||
|
||||
crypto: crypto@73a000 {
|
||||
compatible = "qcom,crypto-v5.1";
|
||||
reg = <0x0073a000 0x6000>;
|
||||
reg = <0x0 0x0073a000 0x0 0x6000>;
|
||||
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
|
||||
<&gcc GCC_CRYPTO_AXI_CLK>,
|
||||
<&gcc GCC_CRYPTO_CLK>;
|
||||
@ -217,7 +217,7 @@ crypto: crypto@73a000 {
|
||||
|
||||
tlmm: pinctrl@1000000 {
|
||||
compatible = "qcom,ipq6018-pinctrl";
|
||||
reg = <0x01000000 0x300000>;
|
||||
reg = <0x0 0x01000000 0x0 0x300000>;
|
||||
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
@ -235,7 +235,7 @@ serial_3_pins: serial3-pinmux {
|
||||
|
||||
gcc: gcc@1800000 {
|
||||
compatible = "qcom,gcc-ipq6018";
|
||||
reg = <0x01800000 0x80000>;
|
||||
reg = <0x0 0x01800000 0x0 0x80000>;
|
||||
clocks = <&xo>, <&sleep_clk>;
|
||||
clock-names = "xo", "sleep_clk";
|
||||
#clock-cells = <1>;
|
||||
@ -244,17 +244,17 @@ gcc: gcc@1800000 {
|
||||
|
||||
tcsr_mutex_regs: syscon@1905000 {
|
||||
compatible = "syscon";
|
||||
reg = <0x01905000 0x8000>;
|
||||
reg = <0x0 0x01905000 0x0 0x8000>;
|
||||
};
|
||||
|
||||
tcsr_q6: syscon@1945000 {
|
||||
compatible = "syscon";
|
||||
reg = <0x01945000 0xe000>;
|
||||
reg = <0x0 0x01945000 0x0 0xe000>;
|
||||
};
|
||||
|
||||
blsp_dma: dma@7884000 {
|
||||
compatible = "qcom,bam-v1.7.0";
|
||||
reg = <0x07884000 0x2b000>;
|
||||
reg = <0x0 0x07884000 0x0 0x2b000>;
|
||||
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
|
||||
clock-names = "bam_clk";
|
||||
@ -264,7 +264,7 @@ blsp_dma: dma@7884000 {
|
||||
|
||||
blsp1_uart3: serial@78b1000 {
|
||||
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
|
||||
reg = <0x078b1000 0x200>;
|
||||
reg = <0x0 0x078b1000 0x0 0x200>;
|
||||
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
|
||||
<&gcc GCC_BLSP1_AHB_CLK>;
|
||||
@ -276,7 +276,7 @@ spi_0: spi@78b5000 {
|
||||
compatible = "qcom,spi-qup-v2.2.1";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x078b5000 0x600>;
|
||||
reg = <0x0 0x078b5000 0x0 0x600>;
|
||||
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
|
||||
spi-max-frequency = <50000000>;
|
||||
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
|
||||
@ -291,7 +291,7 @@ spi_1: spi@78b6000 {
|
||||
compatible = "qcom,spi-qup-v2.2.1";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x078b6000 0x600>;
|
||||
reg = <0x0 0x078b6000 0x0 0x600>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
||||
spi-max-frequency = <50000000>;
|
||||
clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
|
||||
@ -306,7 +306,7 @@ i2c_0: i2c@78b6000 {
|
||||
compatible = "qcom,i2c-qup-v2.2.1";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x078b6000 0x600>;
|
||||
reg = <0x0 0x078b6000 0x0 0x600>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
||||
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
|
||||
@ -321,7 +321,7 @@ i2c_1: i2c@78b7000 { /* BLSP1 QUP2 */
|
||||
compatible = "qcom,i2c-qup-v2.2.1";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x078b7000 0x600>;
|
||||
reg = <0x0 0x078b7000 0x0 0x600>;
|
||||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
|
||||
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
|
||||
@ -336,24 +336,24 @@ intc: interrupt-controller@b000000 {
|
||||
compatible = "qcom,msm-qgic2";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <0x3>;
|
||||
reg = <0x0b000000 0x1000>, /*GICD*/
|
||||
<0x0b002000 0x1000>, /*GICC*/
|
||||
<0x0b001000 0x1000>, /*GICH*/
|
||||
<0x0b004000 0x1000>; /*GICV*/
|
||||
reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/
|
||||
<0x0 0x0b002000 0x0 0x1000>, /*GICC*/
|
||||
<0x0 0x0b001000 0x0 0x1000>, /*GICH*/
|
||||
<0x0 0x0b004000 0x0 0x1000>; /*GICV*/
|
||||
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
watchdog@b017000 {
|
||||
compatible = "qcom,kpss-wdt";
|
||||
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
|
||||
reg = <0x0b017000 0x40>;
|
||||
reg = <0x0 0x0b017000 0x0 0x40>;
|
||||
clocks = <&sleep_clk>;
|
||||
timeout-sec = <10>;
|
||||
};
|
||||
|
||||
apcs_glb: mailbox@b111000 {
|
||||
compatible = "qcom,ipq6018-apcs-apps-global";
|
||||
reg = <0x0b111000 0x1000>;
|
||||
reg = <0x0 0x0b111000 0x0 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
clocks = <&a53pll>, <&xo>;
|
||||
clock-names = "pll", "xo";
|
||||
@ -362,7 +362,7 @@ apcs_glb: mailbox@b111000 {
|
||||
|
||||
a53pll: clock@b116000 {
|
||||
compatible = "qcom,ipq6018-a53pll";
|
||||
reg = <0x0b116000 0x40>;
|
||||
reg = <0x0 0x0b116000 0x0 0x40>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&xo>;
|
||||
clock-names = "xo";
|
||||
@ -377,68 +377,68 @@ timer {
|
||||
};
|
||||
|
||||
timer@b120000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
compatible = "arm,armv7-timer-mem";
|
||||
reg = <0x0b120000 0x1000>;
|
||||
reg = <0x0 0x0b120000 0x0 0x1000>;
|
||||
clock-frequency = <19200000>;
|
||||
|
||||
frame@b120000 {
|
||||
frame-number = <0>;
|
||||
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b121000 0x1000>,
|
||||
<0x0b122000 0x1000>;
|
||||
reg = <0x0 0x0b121000 0x0 0x1000>,
|
||||
<0x0 0x0b122000 0x0 0x1000>;
|
||||
};
|
||||
|
||||
frame@b123000 {
|
||||
frame-number = <1>;
|
||||
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0xb123000 0x1000>;
|
||||
reg = <0x0 0xb123000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
frame@b124000 {
|
||||
frame-number = <2>;
|
||||
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b124000 0x1000>;
|
||||
reg = <0x0 0x0b124000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
frame@b125000 {
|
||||
frame-number = <3>;
|
||||
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b125000 0x1000>;
|
||||
reg = <0x0 0x0b125000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
frame@b126000 {
|
||||
frame-number = <4>;
|
||||
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b126000 0x1000>;
|
||||
reg = <0x0 0x0b126000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
frame@b127000 {
|
||||
frame-number = <5>;
|
||||
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b127000 0x1000>;
|
||||
reg = <0x0 0x0b127000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
frame@b128000 {
|
||||
frame-number = <6>;
|
||||
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x0b128000 0x1000>;
|
||||
reg = <0x0 0x0b128000 0x0 0x1000>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
q6v5_wcss: remoteproc@cd00000 {
|
||||
compatible = "qcom,ipq8074-wcss-pil";
|
||||
reg = <0x0cd00000 0x4040>,
|
||||
<0x004ab000 0x20>;
|
||||
reg = <0x0 0x0cd00000 0x0 0x4040>,
|
||||
<0x0 0x004ab000 0x0 0x20>;
|
||||
reg-names = "qdsp6",
|
||||
"rmb";
|
||||
interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
|
||||
|
@ -243,7 +243,6 @@ rk817: pmic@20 {
|
||||
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pmic_int>;
|
||||
rockchip,system-power-controller;
|
||||
wakeup-source;
|
||||
#clock-cells = <1>;
|
||||
clock-output-names = "rk808-clkout1", "xin32k";
|
||||
|
@ -20,7 +20,7 @@ chosen {
|
||||
gmac_clk: gmac-clock {
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <125000000>;
|
||||
clock-output-names = "gmac_clk";
|
||||
clock-output-names = "gmac_clkin";
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
|
||||
|
@ -74,14 +74,14 @@ diy_led: led-1 {
|
||||
label = "red:diy";
|
||||
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
|
||||
default-state = "off";
|
||||
linux,default-trigger = "mmc1";
|
||||
linux,default-trigger = "mmc2";
|
||||
};
|
||||
|
||||
yellow_led: led-2 {
|
||||
label = "yellow:yellow-led";
|
||||
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
|
||||
default-state = "off";
|
||||
linux,default-trigger = "mmc0";
|
||||
linux,default-trigger = "mmc1";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,9 @@ aliases {
|
||||
i2c6 = &i2c6;
|
||||
i2c7 = &i2c7;
|
||||
i2c8 = &i2c8;
|
||||
mmc0 = &sdio0;
|
||||
mmc1 = &sdmmc;
|
||||
mmc2 = &sdhci;
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
serial2 = &uart2;
|
||||
|
@ -115,8 +115,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||
#define pte_valid_not_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||
#define pte_valid_young(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
||||
#define pte_valid_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
||||
|
||||
@ -124,9 +122,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
|
||||
* so that we don't erroneously return false for pages that have been
|
||||
* remapped as PROT_NONE but are yet to be flushed from the TLB.
|
||||
* Note that we can't make any assumptions based on the state of the access
|
||||
* flag, since ptep_clear_flush_young() elides a DSB when invalidating the
|
||||
* TLB.
|
||||
*/
|
||||
#define pte_accessible(mm, pte) \
|
||||
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
|
||||
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
|
||||
|
||||
/*
|
||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||
@ -164,13 +165,6 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||
@ -196,6 +190,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
|
||||
* clear), set the PTE_DIRTY bit.
|
||||
*/
|
||||
if (pte_hw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
||||
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
return clear_pte_bit(pte, __pgprot(PTE_AF));
|
||||
@ -845,12 +853,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
||||
pte = READ_ONCE(*ptep);
|
||||
do {
|
||||
old_pte = pte;
|
||||
/*
|
||||
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
|
||||
* clear), set the PTE_DIRTY bit.
|
||||
*/
|
||||
if (pte_hw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
pte = pte_wrprotect(pte);
|
||||
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
|
||||
pte_val(old_pte), pte_val(pte));
|
||||
|
@ -7,6 +7,8 @@
|
||||
#ifndef _ARM_PROBES_H
|
||||
#define _ARM_PROBES_H
|
||||
|
||||
#include <asm/insn.h>
|
||||
|
||||
typedef u32 probe_opcode_t;
|
||||
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
|
||||
|
||||
|
@ -13,6 +13,11 @@
|
||||
|
||||
SECTIONS {
|
||||
HYP_SECTION(.text)
|
||||
/*
|
||||
* .hyp..data..percpu needs to be page aligned to maintain the same
|
||||
* alignment for when linking into vmlinux.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
HYP_SECTION_NAME(.data..percpu) : {
|
||||
PERCPU_INPUT(L1_CACHE_BYTES)
|
||||
}
|
||||
|
@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
|
||||
return extract_bytes(value, addr & 7, len);
|
||||
}
|
||||
|
||||
static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
|
||||
int target_vcpu_id = vcpu->vcpu_id;
|
||||
u64 value;
|
||||
|
||||
value = (u64)(mpidr & GENMASK(23, 0)) << 32;
|
||||
value |= ((target_vcpu_id & 0xffff) << 8);
|
||||
|
||||
if (vgic_has_its(vcpu->kvm))
|
||||
value |= GICR_TYPER_PLPIS;
|
||||
|
||||
/* reporting of the Last bit is not supported for userspace */
|
||||
return extract_bytes(value, addr & 7, len);
|
||||
}
|
||||
|
||||
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
||||
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
|
||||
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
|
||||
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
|
||||
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
|
||||
vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
|
||||
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
|
||||
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
|
||||
|
@ -154,6 +154,7 @@ static inline void pmd_clear(pmd_t *pmdp)
|
||||
|
||||
#if defined(CONFIG_XPA)
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
|
||||
static inline pte_t
|
||||
pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
@ -169,6 +170,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
|
||||
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
|
||||
|
||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
@ -183,6 +185,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
|
||||
#else
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#ifdef CONFIG_CPU_VR41XX
|
||||
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
|
||||
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
|
||||
|
@ -248,7 +248,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string)
|
||||
cpu-as-$(CONFIG_40x) += -Wa,-m405
|
||||
cpu-as-$(CONFIG_44x) += -Wa,-m440
|
||||
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
||||
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
||||
cpu-as-$(CONFIG_E500) += -Wa,-me500
|
||||
|
||||
# When using '-many -mpower4' gas will first try and find a matching power4
|
||||
|
@ -36,8 +36,10 @@ static inline bool pte_user(pte_t pte)
|
||||
*/
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||
#else
|
||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -63,6 +63,8 @@
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
||||
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
|
@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
|
||||
*/
|
||||
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
|
||||
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 36
|
||||
#else
|
||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
|
||||
* Vectors for the FWNMI option. Share common code.
|
||||
*/
|
||||
TRAMP_REAL_BEGIN(system_reset_fwnmi)
|
||||
/* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
|
||||
__IKVM_REAL(system_reset)=0
|
||||
GEN_INT_ENTRY system_reset, virt=0
|
||||
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
* If none is found, do a Linux page fault. Linux page faults can happen in
|
||||
* kernel mode due to user copy operations of course.
|
||||
*
|
||||
* KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
|
||||
* MMU context, which may cause a DSI in the host, which must go to the
|
||||
* KVM handler. MSR[IR] is not enabled, so the real-mode handler will
|
||||
* always be used regardless of AIL setting.
|
||||
*
|
||||
* - Radix MMU
|
||||
* The hardware loads from the Linux page table directly, so a fault goes
|
||||
* immediately to Linux page fault.
|
||||
@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
|
||||
IVEC=0x300
|
||||
IDAR=1
|
||||
IDSISR=1
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
#endif
|
||||
INT_DEFINE_END(data_access)
|
||||
|
||||
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
|
||||
@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
|
||||
* on user-handler data structures.
|
||||
*
|
||||
* KVM: Same as 0x300, DSLB must test for KVM guest.
|
||||
*
|
||||
* A dedicated save area EXSLB is used (XXX: but it actually need not be
|
||||
* these days, we could use EXGEN).
|
||||
*/
|
||||
@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
|
||||
IAREA=PACA_EXSLB
|
||||
IRECONCILE=0
|
||||
IDAR=1
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
IKVM_SKIP=1
|
||||
IKVM_REAL=1
|
||||
#endif
|
||||
INT_DEFINE_END(data_access_slb)
|
||||
|
||||
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
|
||||
|
@ -156,6 +156,7 @@ __after_mmu_off:
|
||||
bl initial_bats
|
||||
bl load_segment_registers
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
bl reloc_offset
|
||||
bl early_hash_table
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||
#if defined(CONFIG_BOOTX_TEXT)
|
||||
@ -920,7 +921,7 @@ early_hash_table:
|
||||
ori r6, r6, 3 /* 256kB table */
|
||||
mtspr SPRN_SDR1, r6
|
||||
lis r6, early_hash@h
|
||||
lis r3, Hash@ha
|
||||
addis r3, r3, Hash@ha
|
||||
stw r6, Hash@l(r3)
|
||||
blr
|
||||
|
||||
|
@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
state = &sb->irq_state[src];
|
||||
|
||||
/* Some sanity checking */
|
||||
if (!state->valid) {
|
||||
pr_devel("%s: source %lx invalid !\n", __func__, irq);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||
|
||||
arch_spin_lock(&sb->lock);
|
||||
|
@ -14,4 +14,6 @@
|
||||
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 34
|
||||
|
||||
#endif /* _ASM_RISCV_PGTABLE_32_H */
|
||||
|
@ -129,8 +129,15 @@ int uv_destroy_page(unsigned long paddr)
|
||||
.paddr = paddr
|
||||
};
|
||||
|
||||
if (uv_call(0, (u64)&uvcb))
|
||||
if (uv_call(0, (u64)&uvcb)) {
|
||||
/*
|
||||
* Older firmware uses 107/d as an indication of a non secure
|
||||
* page. Let us emulate the newer variant (no-op).
|
||||
*/
|
||||
if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||
struct kvm_s390_pv_unp unp = {};
|
||||
|
||||
r = -EINVAL;
|
||||
if (!kvm_s390_pv_is_protected(kvm))
|
||||
if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
|
||||
break;
|
||||
|
||||
r = -EFAULT;
|
||||
@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.sie_block->pp = 0;
|
||||
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
|
||||
vcpu->arch.sie_block->todpr = 0;
|
||||
vcpu->arch.sie_block->cpnc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
|
||||
|
||||
regs->etoken = 0;
|
||||
regs->etoken_extension = 0;
|
||||
regs->diag318 = 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
|
@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
return -EIO;
|
||||
}
|
||||
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
|
||||
atomic_set(&kvm->mm->context.is_protected, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
|
||||
*rrc = uvcb.header.rrc;
|
||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
|
||||
*rc, *rrc);
|
||||
if (!cc)
|
||||
atomic_set(&kvm->mm->context.is_protected, 1);
|
||||
return cc ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
|
@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
|
||||
#include <linux/sched/mm.h>
|
||||
void s390_reset_acc(struct mm_struct *mm)
|
||||
{
|
||||
if (!mm_is_protected(mm))
|
||||
return;
|
||||
/*
|
||||
* we might be called during
|
||||
* reset: we walk the pages and clear
|
||||
|
@ -1656,6 +1656,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_has_extint(struct kvm_vcpu *v);
|
||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
|
||||
|
@ -514,13 +514,10 @@ int tboot_force_iommu(void)
|
||||
if (!tboot_enabled())
|
||||
return 0;
|
||||
|
||||
if (no_iommu || swiotlb || dmar_disabled)
|
||||
if (no_iommu || dmar_disabled)
|
||||
pr_warn("Forcing Intel-IOMMU to enabled\n");
|
||||
|
||||
dmar_disabled = 0;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
swiotlb = 0;
|
||||
#endif
|
||||
no_iommu = 0;
|
||||
|
||||
return 1;
|
||||
|
@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
|
||||
* check if there is pending interrupt from
|
||||
* non-APIC source without intack.
|
||||
*/
|
||||
static int kvm_cpu_has_extint(struct kvm_vcpu *v)
|
||||
{
|
||||
u8 accept = kvm_apic_accept_pic_intr(v);
|
||||
|
||||
if (accept) {
|
||||
if (irqchip_split(v->kvm))
|
||||
return pending_userspace_extint(v);
|
||||
else
|
||||
return v->kvm->arch.vpic->output;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if there is injectable interrupt:
|
||||
* when virtual interrupt delivery enabled,
|
||||
* interrupt from apic will handled by hardware,
|
||||
* we don't need to check it here.
|
||||
*/
|
||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
||||
int kvm_cpu_has_extint(struct kvm_vcpu *v)
|
||||
{
|
||||
/*
|
||||
* FIXME: interrupt.injected represents an interrupt that it's
|
||||
* FIXME: interrupt.injected represents an interrupt whose
|
||||
* side-effects have already been applied (e.g. bit from IRR
|
||||
* already moved to ISR). Therefore, it is incorrect to rely
|
||||
* on interrupt.injected to know if there is a pending
|
||||
@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
||||
if (!lapic_in_kernel(v))
|
||||
return v->arch.interrupt.injected;
|
||||
|
||||
if (!kvm_apic_accept_pic_intr(v))
|
||||
return 0;
|
||||
|
||||
if (irqchip_split(v->kvm))
|
||||
return pending_userspace_extint(v);
|
||||
else
|
||||
return v->kvm->arch.vpic->output;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if there is injectable interrupt:
|
||||
* when virtual interrupt delivery enabled,
|
||||
* interrupt from apic will handled by hardware,
|
||||
* we don't need to check it here.
|
||||
*/
|
||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
||||
{
|
||||
if (kvm_cpu_has_extint(v))
|
||||
return 1;
|
||||
|
||||
@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
|
||||
*/
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
|
||||
{
|
||||
/*
|
||||
* FIXME: interrupt.injected represents an interrupt that it's
|
||||
* side-effects have already been applied (e.g. bit from IRR
|
||||
* already moved to ISR). Therefore, it is incorrect to rely
|
||||
* on interrupt.injected to know if there is a pending
|
||||
* interrupt in the user-mode LAPIC.
|
||||
* This leads to nVMX/nSVM not be able to distinguish
|
||||
* if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
|
||||
* pending interrupt or should re-inject an injected
|
||||
* interrupt.
|
||||
*/
|
||||
if (!lapic_in_kernel(v))
|
||||
return v->arch.interrupt.injected;
|
||||
|
||||
if (kvm_cpu_has_extint(v))
|
||||
return 1;
|
||||
|
||||
@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
|
||||
*/
|
||||
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
|
||||
{
|
||||
if (kvm_cpu_has_extint(v)) {
|
||||
if (irqchip_split(v->kvm)) {
|
||||
int vector = v->arch.pending_external_vector;
|
||||
|
||||
v->arch.pending_external_vector = -1;
|
||||
return vector;
|
||||
} else
|
||||
return kvm_pic_read_irq(v->kvm); /* PIC */
|
||||
} else
|
||||
if (!kvm_cpu_has_extint(v)) {
|
||||
WARN_ON(!lapic_in_kernel(v));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!lapic_in_kernel(v))
|
||||
return v->arch.interrupt.nr;
|
||||
|
||||
if (irqchip_split(v->kvm)) {
|
||||
int vector = v->arch.pending_external_vector;
|
||||
|
||||
v->arch.pending_external_vector = -1;
|
||||
return vector;
|
||||
} else
|
||||
return kvm_pic_read_irq(v->kvm); /* PIC */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
|
||||
*/
|
||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
|
||||
{
|
||||
int vector;
|
||||
|
||||
if (!lapic_in_kernel(v))
|
||||
return v->arch.interrupt.nr;
|
||||
|
||||
vector = kvm_cpu_get_extint(v);
|
||||
|
||||
int vector = kvm_cpu_get_extint(v);
|
||||
if (vector != -1)
|
||||
return vector; /* PIC */
|
||||
|
||||
|
@ -2465,7 +2465,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u32 ppr;
|
||||
|
||||
if (!kvm_apic_hw_enabled(apic))
|
||||
if (!kvm_apic_present(vcpu))
|
||||
return -1;
|
||||
|
||||
__apic_update_ppr(apic, &ppr);
|
||||
|
@ -3517,7 +3517,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
||||
{
|
||||
u64 sptes[PT64_ROOT_MAX_LEVEL];
|
||||
struct rsvd_bits_validate *rsvd_check;
|
||||
int root = vcpu->arch.mmu->root_level;
|
||||
int root = vcpu->arch.mmu->shadow_root_level;
|
||||
int leaf;
|
||||
int level;
|
||||
bool reserved = false;
|
||||
|
@ -642,8 +642,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
|
||||
* Its safe to read more than we are asked, caller should ensure that
|
||||
* destination has enough space.
|
||||
*/
|
||||
src_paddr = round_down(src_paddr, 16);
|
||||
offset = src_paddr & 15;
|
||||
src_paddr = round_down(src_paddr, 16);
|
||||
sz = round_up(sz + offset, 16);
|
||||
|
||||
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
|
||||
|
@ -1309,8 +1309,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
|
||||
svm->avic_is_running = true;
|
||||
|
||||
svm->msrpm = svm_vcpu_alloc_msrpm();
|
||||
if (!svm->msrpm)
|
||||
if (!svm->msrpm) {
|
||||
err = -ENOMEM;
|
||||
goto error_free_vmcb_page;
|
||||
}
|
||||
|
||||
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
|
||||
|
||||
|
@ -4051,21 +4051,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
||||
|
||||
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* We can accept userspace's request for interrupt injection
|
||||
* as long as we have a place to store the interrupt number.
|
||||
* The actual injection will happen when the CPU is able to
|
||||
* deliver the interrupt.
|
||||
*/
|
||||
if (kvm_cpu_has_extint(vcpu))
|
||||
return false;
|
||||
|
||||
/* Acknowledging ExtINT does not happen if LINT0 is masked. */
|
||||
return (!lapic_in_kernel(vcpu) ||
|
||||
kvm_apic_accept_pic_intr(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* if userspace requested an interrupt window, check that the
|
||||
* interrupt window is open.
|
||||
*
|
||||
* No need to exit to userspace if we already have an interrupt queued.
|
||||
*/
|
||||
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_arch_interrupt_allowed(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu) &&
|
||||
!kvm_event_needs_reinjection(vcpu) &&
|
||||
kvm_cpu_accept_dm_intr(vcpu);
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,13 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots)
|
||||
spin_lock_init(&ksm->idle_slots_lock);
|
||||
|
||||
slot_hashtable_size = roundup_pow_of_two(num_slots);
|
||||
/*
|
||||
* hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
|
||||
* buckets. This only makes a difference when there is only 1 keyslot.
|
||||
*/
|
||||
if (slot_hashtable_size < 2)
|
||||
slot_hashtable_size = 2;
|
||||
|
||||
ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
|
||||
ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
|
||||
sizeof(ksm->slot_hashtable[0]),
|
||||
|
@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(iort_fwnode_lock);
|
||||
* iort_set_fwnode() - Create iort_fwnode and use it to register
|
||||
* iommu data in the iort_fwnode_list
|
||||
*
|
||||
* @node: IORT table node associated with the IOMMU
|
||||
* @iort_node: IORT table node associated with the IOMMU
|
||||
* @fwnode: fwnode associated with the IORT node
|
||||
*
|
||||
* Returns: 0 on success
|
||||
@ -673,7 +673,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 id,
|
||||
/**
|
||||
* iort_get_device_domain() - Find MSI domain related to a device
|
||||
* @dev: The device.
|
||||
* @req_id: Requester ID for the device.
|
||||
* @id: Requester ID for the device.
|
||||
* @bus_token: irq domain bus token.
|
||||
*
|
||||
* Returns: the MSI domain for this device, NULL otherwise
|
||||
*/
|
||||
@ -1136,7 +1137,7 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
*
|
||||
* @dev: device to configure
|
||||
* @dma_addr: device DMA address result pointer
|
||||
* @size: DMA range size result pointer
|
||||
* @dma_size: DMA range size result pointer
|
||||
*/
|
||||
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
||||
{
|
||||
@ -1526,6 +1527,7 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
|
||||
/**
|
||||
* iort_add_platform_device() - Allocate a platform device for IORT node
|
||||
* @node: Pointer to device ACPI IORT node
|
||||
* @ops: Pointer to IORT device config struct
|
||||
*
|
||||
* Returns: 0 on success, <0 failure
|
||||
*/
|
||||
|
@ -227,6 +227,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
|
||||
u32 sysc_mask, syss_done, rstval;
|
||||
int syss_offset, error = 0;
|
||||
|
||||
if (ddata->cap->regbits->srst_shift < 0)
|
||||
return 0;
|
||||
|
||||
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
|
||||
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
|
||||
|
||||
@ -970,9 +973,15 @@ static int sysc_enable_module(struct device *dev)
|
||||
return error;
|
||||
}
|
||||
}
|
||||
error = sysc_wait_softreset(ddata);
|
||||
if (error)
|
||||
dev_warn(ddata->dev, "OCP softreset timed out\n");
|
||||
/*
|
||||
* Some modules like i2c and hdq1w have unusable reset status unless
|
||||
* the module reset quirk is enabled. Skip status check on enable.
|
||||
*/
|
||||
if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
|
||||
error = sysc_wait_softreset(ddata);
|
||||
if (error)
|
||||
dev_warn(ddata->dev, "OCP softreset timed out\n");
|
||||
}
|
||||
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
|
||||
sysc_disable_opt_clocks(ddata);
|
||||
|
||||
@ -1373,17 +1382,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
||||
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
||||
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_HDQ1W),
|
||||
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_HDQ1W),
|
||||
SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_SGX),
|
||||
@ -2880,7 +2889,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
|
||||
|
||||
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
|
||||
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
|
||||
return -EBUSY;
|
||||
return -ENXIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -20,12 +20,28 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include <linux/firmware/xlnx-zynqmp.h>
|
||||
#include "zynqmp-debug.h"
|
||||
|
||||
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
|
||||
#define PM_API_FEATURE_CHECK_MAX_ORDER 7
|
||||
|
||||
static bool feature_check_enabled;
|
||||
static u32 zynqmp_pm_features[PM_API_MAX];
|
||||
DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
|
||||
|
||||
/**
|
||||
* struct pm_api_feature_data - PM API Feature data
|
||||
* @pm_api_id: PM API Id, used as key to index into hashmap
|
||||
* @feature_status: status of PM API feature: valid, invalid
|
||||
* @hentry: hlist_node that hooks this entry into hashtable
|
||||
*/
|
||||
struct pm_api_feature_data {
|
||||
u32 pm_api_id;
|
||||
int feature_status;
|
||||
struct hlist_node hentry;
|
||||
};
|
||||
|
||||
static const struct mfd_cell firmware_devs[] = {
|
||||
{
|
||||
@ -142,29 +158,37 @@ static int zynqmp_pm_feature(u32 api_id)
|
||||
int ret;
|
||||
u32 ret_payload[PAYLOAD_ARG_CNT];
|
||||
u64 smc_arg[2];
|
||||
struct pm_api_feature_data *feature_data;
|
||||
|
||||
if (!feature_check_enabled)
|
||||
return 0;
|
||||
|
||||
/* Return value if feature is already checked */
|
||||
if (api_id > ARRAY_SIZE(zynqmp_pm_features))
|
||||
return PM_FEATURE_INVALID;
|
||||
/* Check for existing entry in hash table for given api */
|
||||
hash_for_each_possible(pm_api_features_map, feature_data, hentry,
|
||||
api_id) {
|
||||
if (feature_data->pm_api_id == api_id)
|
||||
return feature_data->feature_status;
|
||||
}
|
||||
|
||||
if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
|
||||
return zynqmp_pm_features[api_id];
|
||||
/* Add new entry if not present */
|
||||
feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
|
||||
if (!feature_data)
|
||||
return -ENOMEM;
|
||||
|
||||
feature_data->pm_api_id = api_id;
|
||||
smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
|
||||
smc_arg[1] = api_id;
|
||||
|
||||
ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
|
||||
if (ret) {
|
||||
zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
|
||||
return PM_FEATURE_INVALID;
|
||||
}
|
||||
if (ret)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = ret_payload[1];
|
||||
|
||||
zynqmp_pm_features[api_id] = ret_payload[1];
|
||||
feature_data->feature_status = ret;
|
||||
hash_add(pm_api_features_map, &feature_data->hentry, api_id);
|
||||
|
||||
return zynqmp_pm_features[api_id];
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -200,9 +224,12 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
|
||||
* Make sure to stay in x0 register
|
||||
*/
|
||||
u64 smc_arg[4];
|
||||
int ret;
|
||||
|
||||
if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
|
||||
return -ENOTSUPP;
|
||||
/* Check if feature is supported or not */
|
||||
ret = zynqmp_pm_feature(pm_api_id);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
smc_arg[0] = PM_SIP_SVC | pm_api_id;
|
||||
smc_arg[1] = ((u64)arg1 << 32) | arg0;
|
||||
@ -615,7 +642,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
|
||||
*/
|
||||
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
|
||||
{
|
||||
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
|
||||
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
|
||||
type, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
|
||||
@ -1252,9 +1279,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
|
||||
|
||||
static int zynqmp_firmware_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct pm_api_feature_data *feature_data;
|
||||
int i;
|
||||
|
||||
mfd_remove_devices(&pdev->dev);
|
||||
zynqmp_pm_api_debugfs_exit();
|
||||
|
||||
hash_for_each(pm_api_features_map, i, feature_data, hentry) {
|
||||
hash_del(&feature_data->hentry);
|
||||
kfree(feature_data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4852,7 +4852,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
|
||||
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (ras && ras->supported)
|
||||
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
|
||||
|
||||
return amdgpu_dpm_baco_enter(adev);
|
||||
@ -4871,7 +4871,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ras && ras->supported)
|
||||
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||
|
||||
return 0;
|
||||
|
@ -69,10 +69,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
|
||||
|
||||
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
|
||||
unsigned int type,
|
||||
uint64_t size)
|
||||
uint64_t size_in_page)
|
||||
{
|
||||
return ttm_range_man_init(&adev->mman.bdev, type,
|
||||
false, size >> PAGE_SHIFT);
|
||||
false, size_in_page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -67,6 +67,7 @@ struct amdgpu_uvd {
|
||||
unsigned harvest_config;
|
||||
/* store image width to adjust nb memory state */
|
||||
unsigned decode_image_width;
|
||||
uint32_t keyselect;
|
||||
};
|
||||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||
|
@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
|
@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
|
||||
*/
|
||||
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
|
||||
{
|
||||
void *ptr;
|
||||
uint32_t ucode_len, i;
|
||||
uint32_t keysel;
|
||||
|
||||
ptr = adev->uvd.inst[0].cpu_addr;
|
||||
ptr += 192 + 16;
|
||||
memcpy(&ucode_len, ptr, 4);
|
||||
ptr += ucode_len;
|
||||
memcpy(&keysel, ptr, 4);
|
||||
int i;
|
||||
uint32_t keysel = adev->uvd.keyselect;
|
||||
|
||||
WREG32(mmUVD_FW_START, keysel);
|
||||
|
||||
@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
void *ptr;
|
||||
uint32_t ucode_len;
|
||||
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
||||
@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Retrieval firmware validate key */
|
||||
ptr = adev->uvd.inst[0].cpu_addr;
|
||||
ptr += 192 + 16;
|
||||
memcpy(&ucode_len, ptr, 4);
|
||||
ptr += ucode_len;
|
||||
memcpy(&adev->uvd.keyselect, ptr, 4);
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
|
||||
return r;
|
||||
|
@ -1041,7 +1041,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
amdgpu_dm_init_color_mod();
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->asic_type >= CHIP_RAVEN) {
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.hdcp_workqueue)
|
||||
|
@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
ast_set_dp501_video_output(crtc->dev, 1);
|
||||
ast_crtc_load_lut(ast, crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
@ -777,6 +776,21 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(crtc->dev);
|
||||
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
|
||||
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
|
||||
|
||||
/*
|
||||
* The gamma LUT has to be reloaded after changing the primary
|
||||
* plane's color format.
|
||||
*/
|
||||
if (old_ast_crtc_state->format != ast_crtc_state->format)
|
||||
ast_crtc_load_lut(ast, crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
@ -830,6 +844,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
|
||||
|
||||
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
|
||||
.atomic_check = ast_crtc_helper_atomic_check,
|
||||
.atomic_flush = ast_crtc_helper_atomic_flush,
|
||||
.atomic_enable = ast_crtc_helper_atomic_enable,
|
||||
.atomic_disable = ast_crtc_helper_atomic_disable,
|
||||
};
|
||||
|
@ -1,7 +1,8 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config DRM_EXYNOS
|
||||
tristate "DRM Support for Samsung SoC Exynos Series"
|
||||
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
|
||||
depends on OF && DRM && COMMON_CLK
|
||||
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
|
||||
depends on MMU
|
||||
select DRM_KMS_HELPER
|
||||
select VIDEOMODE_HELPERS
|
||||
|
@ -30,18 +30,21 @@
|
||||
#include "i915_trace.h"
|
||||
#include "intel_breadcrumbs.h"
|
||||
#include "intel_context.h"
|
||||
#include "intel_engine_pm.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_gt_requests.h"
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
static bool irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->irq_enable)
|
||||
return;
|
||||
return false;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void irq_disable(struct intel_engine_cs *engine)
|
||||
@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
|
||||
|
||||
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
lockdep_assert_held(&b->irq_lock);
|
||||
|
||||
if (!b->irq_engine || b->irq_armed)
|
||||
return;
|
||||
|
||||
if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
|
||||
/*
|
||||
* Since we are waiting on a request, the GPU should be busy
|
||||
* and should have its own rpm reference.
|
||||
*/
|
||||
if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
*/
|
||||
WRITE_ONCE(b->irq_armed, true);
|
||||
|
||||
/*
|
||||
* Since we are waiting on a request, the GPU should be busy
|
||||
* and should have its own rpm reference. This is tracked
|
||||
* by i915->gt.awake, we can forgo holding our own wakref
|
||||
* for the interrupt as before i915->gt.awake is released (when
|
||||
* the driver is idle) we disarm the breadcrumbs.
|
||||
*/
|
||||
/* Requests may have completed before we could enable the interrupt. */
|
||||
if (!b->irq_enabled++ && irq_enable(b->irq_engine))
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
if (!b->irq_enabled++)
|
||||
irq_enable(b->irq_engine);
|
||||
static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
if (!b->irq_engine)
|
||||
return;
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
if (!b->irq_armed)
|
||||
__intel_breadcrumbs_arm_irq(b);
|
||||
spin_unlock(&b->irq_lock);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
lockdep_assert_held(&b->irq_lock);
|
||||
|
||||
if (!b->irq_engine || !b->irq_armed)
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!b->irq_enabled);
|
||||
if (!--b->irq_enabled)
|
||||
irq_disable(b->irq_engine);
|
||||
@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
|
||||
{
|
||||
intel_context_get(ce);
|
||||
list_add_tail(&ce->signal_link, &b->signalers);
|
||||
if (list_is_first(&ce->signal_link, &b->signalers))
|
||||
__intel_breadcrumbs_arm_irq(b);
|
||||
}
|
||||
|
||||
static void remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
|
||||
intel_engine_add_retire(b->irq_engine, tl);
|
||||
}
|
||||
|
||||
static bool __signal_request(struct i915_request *rq, struct list_head *signals)
|
||||
static bool __signal_request(struct i915_request *rq)
|
||||
{
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
|
||||
if (!__dma_fence_signal(&rq->fence)) {
|
||||
i915_request_put(rq);
|
||||
return false;
|
||||
}
|
||||
|
||||
list_add_tail(&rq->signal_link, signals);
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct llist_node *
|
||||
slist_add(struct llist_node *node, struct llist_node *head)
|
||||
{
|
||||
node->next = head;
|
||||
return node;
|
||||
}
|
||||
|
||||
static void signal_irq_work(struct irq_work *work)
|
||||
{
|
||||
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
|
||||
const ktime_t timestamp = ktime_get();
|
||||
struct llist_node *signal, *sn;
|
||||
struct intel_context *ce, *cn;
|
||||
struct list_head *pos, *next;
|
||||
LIST_HEAD(signal);
|
||||
|
||||
signal = NULL;
|
||||
if (unlikely(!llist_empty(&b->signaled_requests)))
|
||||
signal = llist_del_all(&b->signaled_requests);
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
|
||||
if (list_empty(&b->signalers))
|
||||
/*
|
||||
* Keep the irq armed until the interrupt after all listeners are gone.
|
||||
*
|
||||
* Enabling/disabling the interrupt is rather costly, roughly a couple
|
||||
* of hundred microseconds. If we are proactive and enable/disable
|
||||
* the interrupt around every request that wants a breadcrumb, we
|
||||
* quickly drown in the extra orders of magnitude of latency imposed
|
||||
* on request submission.
|
||||
*
|
||||
* So we try to be lazy, and keep the interrupts enabled until no
|
||||
* more listeners appear within a breadcrumb interrupt interval (that
|
||||
* is until a request completes that no one cares about). The
|
||||
* observation is that listeners come in batches, and will often
|
||||
* listen to a bunch of requests in succession. Though note on icl+,
|
||||
* interrupts are always enabled due to concerns with rc6 being
|
||||
* dysfunctional with per-engine interrupt masking.
|
||||
*
|
||||
* We also try to avoid raising too many interrupts, as they may
|
||||
* be generated by userspace batches and it is unfortunately rather
|
||||
* too easy to drown the CPU under a flood of GPU interrupts. Thus
|
||||
* whenever no one appears to be listening, we turn off the interrupts.
|
||||
* Fewer interrupts should conserve power -- at the very least, fewer
|
||||
* interrupt draw less ire from other users of the system and tools
|
||||
* like powertop.
|
||||
*/
|
||||
if (!signal && b->irq_armed && list_empty(&b->signalers))
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
|
||||
list_splice_init(&b->signaled_requests, &signal);
|
||||
|
||||
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
|
||||
GEM_BUG_ON(list_empty(&ce->signals));
|
||||
|
||||
@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
|
||||
* spinlock as the callback chain may end up adding
|
||||
* more signalers to the same context or engine.
|
||||
*/
|
||||
__signal_request(rq, &signal);
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
if (__signal_request(rq))
|
||||
/* We own signal_node now, xfer to local list */
|
||||
signal = slist_add(&rq->signal_node, signal);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
|
||||
|
||||
spin_unlock(&b->irq_lock);
|
||||
|
||||
list_for_each_safe(pos, next, &signal) {
|
||||
llist_for_each_safe(signal, sn, signal) {
|
||||
struct i915_request *rq =
|
||||
list_entry(pos, typeof(*rq), signal_link);
|
||||
llist_entry(signal, typeof(*rq), signal_node);
|
||||
struct list_head cb_list;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)
|
||||
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
|
||||
intel_breadcrumbs_arm_irq(b);
|
||||
}
|
||||
|
||||
struct intel_breadcrumbs *
|
||||
@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
|
||||
|
||||
spin_lock_init(&b->irq_lock);
|
||||
INIT_LIST_HEAD(&b->signalers);
|
||||
INIT_LIST_HEAD(&b->signaled_requests);
|
||||
init_llist_head(&b->signaled_requests);
|
||||
|
||||
init_irq_work(&b->irq_work, signal_irq_work);
|
||||
|
||||
@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
|
||||
|
||||
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!READ_ONCE(b->irq_armed))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&b->irq_lock, flags);
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
spin_unlock_irqrestore(&b->irq_lock, flags);
|
||||
|
||||
if (!list_empty(&b->signalers))
|
||||
irq_work_queue(&b->irq_work);
|
||||
/* Kick the work once more to drain the signalers */
|
||||
irq_work_sync(&b->irq_work);
|
||||
while (unlikely(READ_ONCE(b->irq_armed))) {
|
||||
local_irq_disable();
|
||||
signal_irq_work(&b->irq_work);
|
||||
local_irq_enable();
|
||||
cond_resched();
|
||||
}
|
||||
GEM_BUG_ON(!list_empty(&b->signalers));
|
||||
}
|
||||
|
||||
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
|
||||
{
|
||||
irq_work_sync(&b->irq_work);
|
||||
GEM_BUG_ON(!list_empty(&b->signalers));
|
||||
GEM_BUG_ON(b->irq_armed);
|
||||
kfree(b);
|
||||
}
|
||||
|
||||
@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
|
||||
* its signal completion.
|
||||
*/
|
||||
if (__request_completed(rq)) {
|
||||
if (__signal_request(rq, &b->signaled_requests))
|
||||
if (__signal_request(rq) &&
|
||||
llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
return;
|
||||
}
|
||||
@ -362,9 +400,12 @@ static void insert_breadcrumb(struct i915_request *rq,
|
||||
GEM_BUG_ON(!check_signal_order(ce, rq));
|
||||
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
|
||||
/* Check after attaching to irq, interrupt may have already fired. */
|
||||
if (__request_completed(rq))
|
||||
irq_work_queue(&b->irq_work);
|
||||
/*
|
||||
* Defer enabling the interrupt to after HW submission and recheck
|
||||
* the request as it may have completed and raised the interrupt as
|
||||
* we were attaching it into the lists.
|
||||
*/
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
||||
|
@ -35,7 +35,7 @@ struct intel_breadcrumbs {
|
||||
struct intel_engine_cs *irq_engine;
|
||||
|
||||
struct list_head signalers;
|
||||
struct list_head signaled_requests;
|
||||
struct llist_head signaled_requests;
|
||||
|
||||
struct irq_work irq_work; /* for use from inside irq_lock */
|
||||
|
||||
|
@ -182,6 +182,7 @@
|
||||
struct virtual_engine {
|
||||
struct intel_engine_cs base;
|
||||
struct intel_context context;
|
||||
struct rcu_work rcu;
|
||||
|
||||
/*
|
||||
* We allow only a single request through the virtual engine at a time
|
||||
@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
|
||||
return &ve->base.execlists.default_priolist.requests[0];
|
||||
}
|
||||
|
||||
static void virtual_context_destroy(struct kref *kref)
|
||||
static void rcu_virtual_context_destroy(struct work_struct *wrk)
|
||||
{
|
||||
struct virtual_engine *ve =
|
||||
container_of(kref, typeof(*ve), context.ref);
|
||||
container_of(wrk, typeof(*ve), rcu.work);
|
||||
unsigned int n;
|
||||
|
||||
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
|
||||
GEM_BUG_ON(ve->request);
|
||||
GEM_BUG_ON(ve->context.inflight);
|
||||
|
||||
/* Preempt-to-busy may leave a stale request behind. */
|
||||
if (unlikely(ve->request)) {
|
||||
struct i915_request *old;
|
||||
|
||||
spin_lock_irq(&ve->base.active.lock);
|
||||
|
||||
old = fetch_and_zero(&ve->request);
|
||||
if (old) {
|
||||
GEM_BUG_ON(!i915_request_completed(old));
|
||||
__i915_request_submit(old);
|
||||
i915_request_put(old);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ve->base.active.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the tasklet in case it is still running on another core.
|
||||
*
|
||||
* This needs to be done before we remove ourselves from the siblings'
|
||||
* rbtrees as in the case it is running in parallel, it may reinsert
|
||||
* the rb_node into a sibling.
|
||||
*/
|
||||
tasklet_kill(&ve->base.execlists.tasklet);
|
||||
|
||||
/* Decouple ourselves from the siblings, no more access allowed. */
|
||||
for (n = 0; n < ve->num_siblings; n++) {
|
||||
struct intel_engine_cs *sibling = ve->siblings[n];
|
||||
struct rb_node *node = &ve->nodes[sibling->id].rb;
|
||||
unsigned long flags;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&sibling->active.lock, flags);
|
||||
spin_lock_irq(&sibling->active.lock);
|
||||
|
||||
/* Detachment is lazily performed in the execlists tasklet */
|
||||
if (!RB_EMPTY_NODE(node))
|
||||
rb_erase_cached(node, &sibling->execlists.virtual);
|
||||
|
||||
spin_unlock_irqrestore(&sibling->active.lock, flags);
|
||||
spin_unlock_irq(&sibling->active.lock);
|
||||
}
|
||||
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
|
||||
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
|
||||
|
||||
if (ve->context.state)
|
||||
__execlists_context_fini(&ve->context);
|
||||
@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
|
||||
kfree(ve);
|
||||
}
|
||||
|
||||
static void virtual_context_destroy(struct kref *kref)
|
||||
{
|
||||
struct virtual_engine *ve =
|
||||
container_of(kref, typeof(*ve), context.ref);
|
||||
|
||||
GEM_BUG_ON(!list_empty(&ve->context.signals));
|
||||
|
||||
/*
|
||||
* When destroying the virtual engine, we have to be aware that
|
||||
* it may still be in use from an hardirq/softirq context causing
|
||||
* the resubmission of a completed request (background completion
|
||||
* due to preempt-to-busy). Before we can free the engine, we need
|
||||
* to flush the submission code and tasklets that are still potentially
|
||||
* accessing the engine. Flushing the tasklets requires process context,
|
||||
* and since we can guard the resubmit onto the engine with an RCU read
|
||||
* lock, we can delegate the free of the engine to an RCU worker.
|
||||
*/
|
||||
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
|
||||
queue_rcu_work(system_wq, &ve->rcu);
|
||||
}
|
||||
|
||||
static void virtual_engine_initial_hint(struct virtual_engine *ve)
|
||||
{
|
||||
int swp;
|
||||
|
@ -255,7 +255,7 @@ struct intel_gvt_mmio {
|
||||
#define F_CMD_ACCESS (1 << 3)
|
||||
/* This reg has been accessed by a VM */
|
||||
#define F_ACCESSED (1 << 4)
|
||||
/* This reg has been accessed through GPU commands */
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
/* This reg is in GVT's mmio save-restor list and in hardware
|
||||
* logical context image
|
||||
|
@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
|
||||
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_uncore_write(uncore, oastatus_reg,
|
||||
oastatus & ~GEN8_OASTATUS_REPORT_LOST);
|
||||
|
||||
intel_uncore_rmw(uncore, oastatus_reg,
|
||||
GEN8_OASTATUS_COUNTER_OVERFLOW |
|
||||
GEN8_OASTATUS_REPORT_LOST,
|
||||
IS_GEN_RANGE(uncore->i915, 8, 10) ?
|
||||
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
|
||||
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
|
||||
}
|
||||
|
||||
return gen8_append_oa_reports(stream, buf, count, offset);
|
||||
|
@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
|
||||
|
||||
#define GEN8_OASTATUS _MMIO(0x2b08)
|
||||
#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
|
||||
#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
|
||||
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
|
||||
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
|
||||
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
|
||||
|
@ -176,7 +176,11 @@ struct i915_request {
|
||||
struct intel_context *context;
|
||||
struct intel_ring *ring;
|
||||
struct intel_timeline __rcu *timeline;
|
||||
struct list_head signal_link;
|
||||
|
||||
union {
|
||||
struct list_head signal_link;
|
||||
struct llist_node signal_node;
|
||||
};
|
||||
|
||||
/*
|
||||
* The rcu epoch of when this request was allocated. Used to judiciously
|
||||
|
@ -522,15 +522,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
drm_encoder_cleanup(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
|
||||
.destroy = mtk_dpi_encoder_destroy,
|
||||
};
|
||||
|
||||
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
|
@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
||||
u32 horizontal_sync_active_byte;
|
||||
u32 horizontal_backporch_byte;
|
||||
u32 horizontal_frontporch_byte;
|
||||
u32 horizontal_front_back_byte;
|
||||
u32 data_phy_cycles_byte;
|
||||
u32 dsi_tmp_buf_bpp, data_phy_cycles;
|
||||
u32 delta;
|
||||
struct mtk_phy_timing *timing = &dsi->phy_timing;
|
||||
|
||||
struct videomode *vm = &dsi->vm;
|
||||
@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
||||
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
|
||||
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
|
||||
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
|
||||
else
|
||||
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
|
||||
dsi_tmp_buf_bpp;
|
||||
dsi_tmp_buf_bpp - 10;
|
||||
|
||||
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
|
||||
timing->da_hs_zero + timing->da_hs_exit;
|
||||
timing->da_hs_zero + timing->da_hs_exit + 3;
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
||||
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||
data_phy_cycles * dsi->lanes + 18) {
|
||||
horizontal_frontporch_byte =
|
||||
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||
(data_phy_cycles * dsi->lanes + 18) *
|
||||
vm->hfront_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
|
||||
|
||||
horizontal_backporch_byte =
|
||||
horizontal_backporch_byte -
|
||||
(data_phy_cycles * dsi->lanes + 18) *
|
||||
vm->hback_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
} else {
|
||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||
horizontal_frontporch_byte = vm->hfront_porch *
|
||||
dsi_tmp_buf_bpp;
|
||||
}
|
||||
horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
|
||||
horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
|
||||
data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
|
||||
|
||||
if (horizontal_front_back_byte > data_phy_cycles_byte) {
|
||||
horizontal_frontporch_byte -= data_phy_cycles_byte *
|
||||
horizontal_frontporch_byte /
|
||||
horizontal_front_back_byte;
|
||||
|
||||
horizontal_backporch_byte -= data_phy_cycles_byte *
|
||||
horizontal_backporch_byte /
|
||||
horizontal_front_back_byte;
|
||||
} else {
|
||||
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||
data_phy_cycles * dsi->lanes + 12) {
|
||||
horizontal_frontporch_byte =
|
||||
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||
(data_phy_cycles * dsi->lanes + 12) *
|
||||
vm->hfront_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
horizontal_backporch_byte = horizontal_backporch_byte -
|
||||
(data_phy_cycles * dsi->lanes + 12) *
|
||||
vm->hback_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
} else {
|
||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||
horizontal_frontporch_byte = vm->hfront_porch *
|
||||
dsi_tmp_buf_bpp;
|
||||
}
|
||||
DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
|
||||
}
|
||||
|
||||
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
|
||||
|
@ -558,8 +558,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
||||
NV_PRINTK(err, cli, "validating bo list\n");
|
||||
validate_fini(op, chan, NULL, NULL);
|
||||
return ret;
|
||||
} else if (ret > 0) {
|
||||
*apply_relocs = true;
|
||||
}
|
||||
*apply_relocs = ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -662,7 +664,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
||||
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
|
||||
}
|
||||
|
||||
u_free(reloc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -872,9 +873,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
break;
|
||||
}
|
||||
}
|
||||
u_free(reloc);
|
||||
}
|
||||
out_prevalid:
|
||||
if (!IS_ERR(reloc))
|
||||
u_free(reloc);
|
||||
u_free(bo);
|
||||
u_free(push);
|
||||
|
||||
|
@ -219,6 +219,7 @@ struct vc4_dev {
|
||||
|
||||
struct drm_modeset_lock ctm_state_lock;
|
||||
struct drm_private_obj ctm_manager;
|
||||
struct drm_private_obj hvs_channels;
|
||||
struct drm_private_obj load_tracker;
|
||||
|
||||
/* List of vc4_debugfs_info_entry for adding to debugfs once
|
||||
@ -531,6 +532,9 @@ struct vc4_crtc_state {
|
||||
unsigned int top;
|
||||
unsigned int bottom;
|
||||
} margins;
|
||||
|
||||
/* Transitional state below, only valid during atomic commits */
|
||||
bool update_muxing;
|
||||
};
|
||||
|
||||
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
|
||||
|
@ -760,12 +760,54 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
}
|
||||
|
||||
#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
|
||||
#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
|
||||
|
||||
static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
unsigned long long pixel_rate = mode->clock * 1000;
|
||||
unsigned long long tmds_rate;
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The 1440p@60 pixel rate is in the same range than the first
|
||||
* WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
|
||||
* bandwidth). Slightly lower the frequency to bring it out of
|
||||
* the WiFi range.
|
||||
*/
|
||||
tmds_rate = pixel_rate * 10;
|
||||
if (vc4_hdmi->disable_wifi_frequencies &&
|
||||
(tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
|
||||
tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
|
||||
mode->clock = 238560;
|
||||
pixel_rate = mode->clock * 1000;
|
||||
}
|
||||
|
||||
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
@ -773,6 +815,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
|
||||
.atomic_check = vc4_hdmi_encoder_atomic_check,
|
||||
.mode_valid = vc4_hdmi_encoder_mode_valid,
|
||||
.disable = vc4_hdmi_encoder_disable,
|
||||
.enable = vc4_hdmi_encoder_enable,
|
||||
@ -1694,6 +1737,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
|
||||
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
|
||||
}
|
||||
|
||||
vc4_hdmi->disable_wifi_frequencies =
|
||||
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
@ -1817,6 +1863,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
|
||||
PHY_LANE_2,
|
||||
PHY_LANE_CK,
|
||||
},
|
||||
.unsupported_odd_h_timings = true,
|
||||
|
||||
.init_resources = vc5_hdmi_init_resources,
|
||||
.csc_setup = vc5_hdmi_csc_setup,
|
||||
@ -1842,6 +1889,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
|
||||
PHY_LANE_CK,
|
||||
PHY_LANE_2,
|
||||
},
|
||||
.unsupported_odd_h_timings = true,
|
||||
|
||||
.init_resources = vc5_hdmi_init_resources,
|
||||
.csc_setup = vc5_hdmi_csc_setup,
|
||||
|
@ -62,6 +62,9 @@ struct vc4_hdmi_variant {
|
||||
*/
|
||||
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
|
||||
|
||||
/* The BCM2711 cannot deal with odd horizontal pixel timings */
|
||||
bool unsupported_odd_h_timings;
|
||||
|
||||
/* Callback to get the resources (memory region, interrupts,
|
||||
* clocks, etc) for that variant.
|
||||
*/
|
||||
@ -139,6 +142,14 @@ struct vc4_hdmi {
|
||||
int hpd_gpio;
|
||||
bool hpd_active_low;
|
||||
|
||||
/*
|
||||
* On some systems (like the RPi4), some modes are in the same
|
||||
* frequency range than the WiFi channels (1440p@60Hz for
|
||||
* example). Should we take evasive actions because that system
|
||||
* has a wifi adapter?
|
||||
*/
|
||||
bool disable_wifi_frequencies;
|
||||
|
||||
struct cec_adapter *cec_adap;
|
||||
struct cec_msg cec_rx_msg;
|
||||
bool cec_tx_ok;
|
||||
|
@ -24,6 +24,8 @@
|
||||
#include "vc4_drv.h"
|
||||
#include "vc4_regs.h"
|
||||
|
||||
#define HVS_NUM_CHANNELS 3
|
||||
|
||||
struct vc4_ctm_state {
|
||||
struct drm_private_state base;
|
||||
struct drm_color_ctm *ctm;
|
||||
@ -35,6 +37,17 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
|
||||
return container_of(priv, struct vc4_ctm_state, base);
|
||||
}
|
||||
|
||||
struct vc4_hvs_state {
|
||||
struct drm_private_state base;
|
||||
unsigned int unassigned_channels;
|
||||
};
|
||||
|
||||
static struct vc4_hvs_state *
|
||||
to_vc4_hvs_state(struct drm_private_state *priv)
|
||||
{
|
||||
return container_of(priv, struct vc4_hvs_state, base);
|
||||
}
|
||||
|
||||
struct vc4_load_tracker_state {
|
||||
struct drm_private_state base;
|
||||
u64 hvs_load;
|
||||
@ -113,7 +126,7 @@ static int vc4_ctm_obj_init(struct vc4_dev *vc4)
|
||||
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
|
||||
&vc4_ctm_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
|
||||
}
|
||||
|
||||
/* Converts a DRM S31.32 value to the HW S0.9 format. */
|
||||
@ -169,6 +182,19 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
|
||||
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
|
||||
}
|
||||
|
||||
static struct vc4_hvs_state *
|
||||
vc4_hvs_get_global_state(struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
||||
struct drm_private_state *priv_state;
|
||||
|
||||
priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
|
||||
if (IS_ERR(priv_state))
|
||||
return ERR_CAST(priv_state);
|
||||
|
||||
return to_vc4_hvs_state(priv_state);
|
||||
}
|
||||
|
||||
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
@ -213,10 +239,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
unsigned char dsp2_mux = 0;
|
||||
unsigned char dsp3_mux = 3;
|
||||
unsigned char dsp4_mux = 3;
|
||||
unsigned char dsp5_mux = 3;
|
||||
unsigned char mux;
|
||||
unsigned int i;
|
||||
u32 reg;
|
||||
|
||||
@ -224,50 +247,59 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
|
||||
if (!crtc_state->active)
|
||||
if (!vc4_state->update_muxing)
|
||||
continue;
|
||||
|
||||
switch (vc4_crtc->data->hvs_output) {
|
||||
case 2:
|
||||
dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
|
||||
mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
|
||||
reg = HVS_READ(SCALER_DISPECTRL);
|
||||
HVS_WRITE(SCALER_DISPECTRL,
|
||||
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
|
||||
break;
|
||||
|
||||
case 3:
|
||||
dsp3_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPCTRL);
|
||||
HVS_WRITE(SCALER_DISPCTRL,
|
||||
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
|
||||
break;
|
||||
|
||||
case 4:
|
||||
dsp4_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPEOLN);
|
||||
HVS_WRITE(SCALER_DISPEOLN,
|
||||
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
|
||||
|
||||
break;
|
||||
|
||||
case 5:
|
||||
dsp5_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPDITHER);
|
||||
HVS_WRITE(SCALER_DISPDITHER,
|
||||
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
reg = HVS_READ(SCALER_DISPECTRL);
|
||||
HVS_WRITE(SCALER_DISPECTRL,
|
||||
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPCTRL);
|
||||
HVS_WRITE(SCALER_DISPCTRL,
|
||||
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPEOLN);
|
||||
HVS_WRITE(SCALER_DISPEOLN,
|
||||
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPDITHER);
|
||||
HVS_WRITE(SCALER_DISPDITHER,
|
||||
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -657,53 +689,123 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
|
||||
&load_state->base,
|
||||
&vc4_load_tracker_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
||||
}
|
||||
|
||||
#define NUM_OUTPUTS 6
|
||||
#define NUM_CHANNELS 3
|
||||
|
||||
static int
|
||||
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
||||
static struct drm_private_state *
|
||||
vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
|
||||
{
|
||||
unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
|
||||
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
|
||||
struct vc4_hvs_state *state;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
||||
|
||||
state->unassigned_channels = old_state->unassigned_channels;
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
|
||||
struct drm_private_state *state)
|
||||
{
|
||||
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
|
||||
|
||||
kfree(hvs_state);
|
||||
}
|
||||
|
||||
static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
|
||||
.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
|
||||
.atomic_destroy_state = vc4_hvs_channels_destroy_state,
|
||||
};
|
||||
|
||||
static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->hvs_channels);
|
||||
}
|
||||
|
||||
static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
|
||||
{
|
||||
struct vc4_hvs_state *state;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
|
||||
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
|
||||
&state->base,
|
||||
&vc4_hvs_state_funcs);
|
||||
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
|
||||
* the TXP (and therefore all the CRTCs found on that platform).
|
||||
*
|
||||
* The naive (and our initial) implementation would just iterate over
|
||||
* all the active CRTCs, try to find a suitable FIFO, and then remove it
|
||||
* from the pool of available FIFOs. However, there are a few corner
|
||||
* cases that need to be considered:
|
||||
*
|
||||
* - When running in a dual-display setup (so with two CRTCs involved),
|
||||
* we can update the state of a single CRTC (for example by changing
|
||||
* its mode using xrandr under X11) without affecting the other. In
|
||||
* this case, the other CRTC wouldn't be in the state at all, so we
|
||||
* need to consider all the running CRTCs in the DRM device to assign
|
||||
* a FIFO, not just the one in the state.
|
||||
*
|
||||
* - To fix the above, we can't use drm_atomic_get_crtc_state on all
|
||||
* enabled CRTCs to pull their CRTC state into the global state, since
|
||||
* a page flip would start considering their vblank to complete. Since
|
||||
* we don't have a guarantee that they are actually active, that
|
||||
* vblank might never happen, and shouldn't even be considered if we
|
||||
* want to do a page flip on a single CRTC. That can be tested by
|
||||
* doing a modetest -v first on HDMI1 and then on HDMI0.
|
||||
*
|
||||
* - Since we need the pixelvalve to be disabled and enabled back when
|
||||
* the FIFO is changed, we should keep the FIFO assigned for as long
|
||||
* as the CRTC is enabled, only considering it free again once that
|
||||
* CRTC has been disabled. This can be tested by booting X11 on a
|
||||
* single display, and changing the resolution down and then back up.
|
||||
*/
|
||||
static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_hvs_state *hvs_new_state;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
int i, ret;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Since the HVS FIFOs are shared across all the pixelvalves and
|
||||
* the TXP (and thus all the CRTCs), we need to pull the current
|
||||
* state of all the enabled CRTCs so that an update to a single
|
||||
* CRTC still keeps the previous FIFOs enabled and assigned to
|
||||
* the same CRTCs, instead of evaluating only the CRTC being
|
||||
* modified.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (!crtc->state->enable)
|
||||
continue;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
}
|
||||
hvs_new_state = vc4_hvs_get_global_state(state);
|
||||
if (!hvs_new_state)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct vc4_crtc_state *old_vc4_crtc_state =
|
||||
to_vc4_crtc_state(old_crtc_state);
|
||||
struct vc4_crtc_state *new_vc4_crtc_state =
|
||||
to_vc4_crtc_state(new_crtc_state);
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
unsigned int matching_channels;
|
||||
|
||||
if (old_crtc_state->enable && !new_crtc_state->enable)
|
||||
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
|
||||
|
||||
if (!new_crtc_state->enable)
|
||||
/* Nothing to do here, let's skip it */
|
||||
if (old_crtc_state->enable == new_crtc_state->enable)
|
||||
continue;
|
||||
|
||||
if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
|
||||
unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
|
||||
/* Muxing will need to be modified, mark it as such */
|
||||
new_vc4_crtc_state->update_muxing = true;
|
||||
|
||||
/* If we're disabling our CRTC, we put back our channel */
|
||||
if (!new_crtc_state->enable) {
|
||||
hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
|
||||
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -731,17 +833,29 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
||||
* the future, we will need to have something smarter,
|
||||
* but it works so far.
|
||||
*/
|
||||
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
|
||||
matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
|
||||
if (matching_channels) {
|
||||
unsigned int channel = ffs(matching_channels) - 1;
|
||||
|
||||
new_vc4_crtc_state->assigned_channel = channel;
|
||||
unassigned_channels &= ~BIT(channel);
|
||||
hvs_new_state->unassigned_channels &= ~BIT(channel);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vc4_pv_muxing_atomic_check(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vc4_ctm_atomic_check(dev, state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -808,6 +922,10 @@ int vc4_kms_load(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vc4_hvs_channels_obj_init(vc4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2020 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
|
||||
spin_lock_init(&fd->tid_lock);
|
||||
spin_lock_init(&fd->invalid_lock);
|
||||
fd->rec_cpu_num = -1; /* no cpu affinity by default */
|
||||
fd->mm = current->mm;
|
||||
mmgrab(fd->mm);
|
||||
fd->dd = dd;
|
||||
fp->private_data = fd;
|
||||
return 0;
|
||||
@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
|
||||
|
||||
deallocate_ctxt(uctxt);
|
||||
done:
|
||||
mmdrop(fdata->mm);
|
||||
|
||||
if (atomic_dec_and_test(&dd->user_refcount))
|
||||
complete(&dd->user_comp);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _HFI1_KERNEL_H
|
||||
#define _HFI1_KERNEL_H
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2020 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -1451,7 +1452,6 @@ struct hfi1_filedata {
|
||||
u32 invalid_tid_idx;
|
||||
/* protect invalid_tids array and invalid_tid_idx */
|
||||
spinlock_t invalid_lock;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
extern struct xarray hfi1_dev_table;
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -48,23 +49,11 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/interval_tree_generic.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include "mmu_rb.h"
|
||||
#include "trace.h"
|
||||
|
||||
struct mmu_rb_handler {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
struct mmu_rb_ops *ops;
|
||||
struct mm_struct *mm;
|
||||
struct list_head lru_list;
|
||||
struct work_struct del_work;
|
||||
struct list_head del_list;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
static unsigned long mmu_node_start(struct mmu_rb_node *);
|
||||
static unsigned long mmu_node_last(struct mmu_rb_node *);
|
||||
static int mmu_notifier_range_start(struct mmu_notifier *,
|
||||
@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
|
||||
return PAGE_ALIGN(node->addr + node->len) - 1;
|
||||
}
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
|
||||
int hfi1_mmu_rb_register(void *ops_arg,
|
||||
struct mmu_rb_ops *ops,
|
||||
struct workqueue_struct *wq,
|
||||
struct mmu_rb_handler **handler)
|
||||
{
|
||||
struct mmu_rb_handler *handlr;
|
||||
struct mmu_rb_handler *h;
|
||||
int ret;
|
||||
|
||||
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
|
||||
if (!handlr)
|
||||
h = kmalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
return -ENOMEM;
|
||||
|
||||
handlr->root = RB_ROOT_CACHED;
|
||||
handlr->ops = ops;
|
||||
handlr->ops_arg = ops_arg;
|
||||
INIT_HLIST_NODE(&handlr->mn.hlist);
|
||||
spin_lock_init(&handlr->lock);
|
||||
handlr->mn.ops = &mn_opts;
|
||||
handlr->mm = mm;
|
||||
INIT_WORK(&handlr->del_work, handle_remove);
|
||||
INIT_LIST_HEAD(&handlr->del_list);
|
||||
INIT_LIST_HEAD(&handlr->lru_list);
|
||||
handlr->wq = wq;
|
||||
h->root = RB_ROOT_CACHED;
|
||||
h->ops = ops;
|
||||
h->ops_arg = ops_arg;
|
||||
INIT_HLIST_NODE(&h->mn.hlist);
|
||||
spin_lock_init(&h->lock);
|
||||
h->mn.ops = &mn_opts;
|
||||
INIT_WORK(&h->del_work, handle_remove);
|
||||
INIT_LIST_HEAD(&h->del_list);
|
||||
INIT_LIST_HEAD(&h->lru_list);
|
||||
h->wq = wq;
|
||||
|
||||
ret = mmu_notifier_register(&handlr->mn, handlr->mm);
|
||||
ret = mmu_notifier_register(&h->mn, current->mm);
|
||||
if (ret) {
|
||||
kfree(handlr);
|
||||
kfree(h);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*handler = handlr;
|
||||
*handler = h;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
||||
struct list_head del_list;
|
||||
|
||||
/* Unregister first so we don't get any more notifications. */
|
||||
mmu_notifier_unregister(&handler->mn, handler->mm);
|
||||
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
|
||||
|
||||
/*
|
||||
* Make sure the wq delete handler is finished running. It will not
|
||||
@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
int ret = 0;
|
||||
|
||||
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
||||
if (node) {
|
||||
@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
__mmu_int_rb_remove(mnode, &handler->root);
|
||||
list_del(&mnode->list); /* remove from LRU list */
|
||||
}
|
||||
mnode->handler = handler;
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
return ret;
|
||||
@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, addr, len);
|
||||
if (node) {
|
||||
@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
unsigned long flags;
|
||||
bool stop = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
/* Validity of handler and node pointers has been checked by caller. */
|
||||
trace_hfi1_mmu_rb_remove(node->addr, node->len);
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2016 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -54,6 +55,7 @@ struct mmu_rb_node {
|
||||
unsigned long len;
|
||||
unsigned long __last;
|
||||
struct rb_node node;
|
||||
struct mmu_rb_handler *handler;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
@ -71,7 +73,19 @@ struct mmu_rb_ops {
|
||||
void *evict_arg, bool *stop);
|
||||
};
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
|
||||
struct mmu_rb_handler {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
struct mmu_rb_ops *ops;
|
||||
struct list_head lru_list;
|
||||
struct work_struct del_work;
|
||||
struct list_head del_list;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
int hfi1_mmu_rb_register(void *ops_arg,
|
||||
struct mmu_rb_ops *ops,
|
||||
struct workqueue_struct *wq,
|
||||
struct mmu_rb_handler **handler);
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2020 Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015-2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
|
||||
{
|
||||
struct page **pages;
|
||||
struct hfi1_devdata *dd = fd->uctxt->dd;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (mapped) {
|
||||
pci_unmap_single(dd->pcidev, node->dma_addr,
|
||||
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
pages = &node->pages[idx];
|
||||
mm = mm_from_tid_node(node);
|
||||
} else {
|
||||
pages = &tidbuf->pages[idx];
|
||||
mm = current->mm;
|
||||
}
|
||||
hfi1_release_user_pages(fd->mm, pages, npages, mapped);
|
||||
hfi1_release_user_pages(mm, pages, npages, mapped);
|
||||
fd->tid_n_pinned -= npages;
|
||||
}
|
||||
|
||||
@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
|
||||
* pages, accept the amount pinned so far and program only that.
|
||||
* User space knows how to deal with partially programmed buffers.
|
||||
*/
|
||||
if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
|
||||
if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
|
||||
kfree(pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
|
||||
pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
|
||||
if (pinned <= 0) {
|
||||
kfree(pages);
|
||||
return pinned;
|
||||
@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
|
||||
|
||||
if (fd->use_mn) {
|
||||
ret = mmu_interval_notifier_insert(
|
||||
&node->notifier, fd->mm,
|
||||
&node->notifier, current->mm,
|
||||
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
|
||||
&tid_mn_ops);
|
||||
if (ret)
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _HFI1_USER_EXP_RCV_H
|
||||
#define _HFI1_USER_EXP_RCV_H
|
||||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
|
||||
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
|
||||
struct hfi1_tid_info *tinfo);
|
||||
|
||||
static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
|
||||
{
|
||||
return node->notifier.mm;
|
||||
}
|
||||
|
||||
#endif /* _HFI1_USER_EXP_RCV_H */
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
||||
atomic_set(&pq->n_reqs, 0);
|
||||
init_waitqueue_head(&pq->wait);
|
||||
atomic_set(&pq->n_locked, 0);
|
||||
pq->mm = fd->mm;
|
||||
|
||||
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
|
||||
activate_packet_queue, NULL, NULL);
|
||||
@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
||||
|
||||
cq->nentries = hfi1_sdma_comp_ring_size;
|
||||
|
||||
ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
|
||||
ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
|
||||
&pq->handler);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Failed to register with MMU %d", ret);
|
||||
@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
|
||||
npages -= node->npages;
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, pq->mm,
|
||||
if (!hfi1_can_pin_pages(pq->dd, current->mm,
|
||||
atomic_read(&pq->n_locked), npages)) {
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
pinned = hfi1_acquire_user_pages(pq->mm,
|
||||
pinned = hfi1_acquire_user_pages(current->mm,
|
||||
((unsigned long)iovec->iov.iov_base +
|
||||
(node->npages * PAGE_SIZE)), npages, 0,
|
||||
pages + node->npages);
|
||||
@ -995,7 +995,7 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
return pinned;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(pq->mm, pages, node->npages, pinned);
|
||||
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(node->pages);
|
||||
@ -1008,7 +1008,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
static void unpin_sdma_pages(struct sdma_mmu_node *node)
|
||||
{
|
||||
if (node->npages) {
|
||||
unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
|
||||
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
||||
node->npages);
|
||||
atomic_sub(node->npages, &node->pq->n_locked);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _HFI1_USER_SDMA_H
|
||||
#define _HFI1_USER_SDMA_H
|
||||
/*
|
||||
* Copyright(c) 2020 - Cornelis Networks, Inc.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
|
||||
unsigned long unpinned;
|
||||
struct mmu_rb_handler *handler;
|
||||
atomic_t n_locked;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
struct hfi1_user_sdma_comp_q {
|
||||
@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
||||
struct iovec *iovec, unsigned long dim,
|
||||
unsigned long *count);
|
||||
|
||||
static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
|
||||
{
|
||||
return node->rb.handler->mn.mm;
|
||||
}
|
||||
|
||||
#endif /* _HFI1_USER_SDMA_H */
|
||||
|
@ -2936,6 +2936,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
|
||||
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
|
||||
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
|
||||
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
|
||||
@ -4989,11 +4990,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
V2_QPC_BYTE_28_AT_M,
|
||||
V2_QPC_BYTE_28_AT_S);
|
||||
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
|
||||
V2_QPC_BYTE_212_RETRY_CNT_M,
|
||||
V2_QPC_BYTE_212_RETRY_CNT_S);
|
||||
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
|
||||
V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
|
||||
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
|
||||
V2_QPC_BYTE_244_RNR_CNT_M,
|
||||
V2_QPC_BYTE_244_RNR_CNT_S);
|
||||
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
|
||||
V2_QPC_BYTE_244_RNR_NUM_INIT_S);
|
||||
|
||||
done:
|
||||
qp_attr->cur_qp_state = qp_attr->qp_state;
|
||||
|
@ -1661,7 +1661,7 @@ struct hns_roce_query_pf_caps_d {
|
||||
__le32 rsv_uars_rsv_qps;
|
||||
};
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
|
||||
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
|
||||
|
||||
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
|
||||
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
|
||||
|
@ -54,10 +54,6 @@
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
|
||||
|
||||
static int push_mode;
|
||||
module_param(push_mode, int, 0644);
|
||||
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
|
||||
@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
|
||||
if (status)
|
||||
goto exit;
|
||||
iwdev->obj_next = iwdev->obj_mem;
|
||||
iwdev->push_mode = push_mode;
|
||||
|
||||
init_waitqueue_head(&iwdev->vchnl_waitq);
|
||||
init_waitqueue_head(&dev->vf_reqs);
|
||||
|
@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
*/
|
||||
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
struct i40iw_ucontext *ucontext;
|
||||
u64 db_addr_offset, push_offset, pfn;
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(context);
|
||||
u64 dbaddr;
|
||||
|
||||
ucontext = to_ucontext(context);
|
||||
if (ucontext->iwdev->sc_dev.is_pf) {
|
||||
db_addr_offset = I40IW_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
} else {
|
||||
db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
|
||||
push_offset = I40IW_VF_PUSH_OFFSET;
|
||||
if (vma->vm_pgoff)
|
||||
vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
|
||||
}
|
||||
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
|
||||
dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
|
||||
|
||||
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
} else {
|
||||
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
}
|
||||
|
||||
pfn = vma->vm_pgoff +
|
||||
(pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
|
||||
PAGE_SHIFT);
|
||||
|
||||
return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
|
||||
vma->vm_page_prot, NULL);
|
||||
return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
|
||||
}
|
||||
|
||||
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
|
||||
if (IS_ERR(mailbox))
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto err_out_arm;
|
||||
}
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
|
||||
@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->cq_table.lock);
|
||||
if (mthca_array_set(&dev->cq_table.cq,
|
||||
cq->cqn & (dev->limits.num_cqs - 1),
|
||||
cq)) {
|
||||
err = mthca_array_set(&dev->cq_table.cq,
|
||||
cq->cqn & (dev->limits.num_cqs - 1), cq);
|
||||
if (err) {
|
||||
spin_unlock_irq(&dev->cq_table.lock);
|
||||
goto err_out_free_mr;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <asm/iommu_table.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
@ -672,11 +673,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
|
||||
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
gfp_t gfp, size_t size)
|
||||
{
|
||||
int order = get_order(size);
|
||||
void *buf = (void *)__get_free_pages(gfp, order);
|
||||
|
||||
if (buf &&
|
||||
iommu_feature(iommu, FEATURE_SNP) &&
|
||||
set_memory_4k((unsigned long)buf, (1 << order))) {
|
||||
free_pages((unsigned long)buf, order);
|
||||
buf = NULL;
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(EVT_BUFFER_SIZE));
|
||||
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
EVT_BUFFER_SIZE);
|
||||
|
||||
return iommu->evt_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
@ -715,8 +732,8 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(PPR_LOG_SIZE));
|
||||
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
PPR_LOG_SIZE);
|
||||
|
||||
return iommu->ppr_log ? 0 : -ENOMEM;
|
||||
}
|
||||
@ -838,7 +855,7 @@ static int iommu_init_ga(struct amd_iommu *iommu)
|
||||
|
||||
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
|
||||
|
||||
return iommu->cmd_sem ? 0 : -ENOMEM;
|
||||
}
|
||||
|
@ -108,6 +108,10 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
|
||||
{
|
||||
struct qcom_smmu *qsmmu;
|
||||
|
||||
/* Check to make sure qcom_scm has finished probing */
|
||||
if (!qcom_scm_is_available())
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
|
||||
if (!qsmmu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -986,7 +986,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
|
||||
warn_invalid_dmar(phys_addr, " returns all ones");
|
||||
goto unmap;
|
||||
}
|
||||
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
|
||||
if (ecap_vcs(iommu->ecap))
|
||||
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
|
||||
|
||||
/* the registers might be more than one page */
|
||||
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
|
||||
|
@ -1833,7 +1833,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
|
||||
if (ecap_prs(iommu->ecap))
|
||||
intel_svm_finish_prq(iommu);
|
||||
}
|
||||
if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
|
||||
if (vccap_pasid(iommu->vccap))
|
||||
ioasid_unregister_allocator(&iommu->pasid_allocator);
|
||||
|
||||
#endif
|
||||
@ -3212,7 +3212,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
|
||||
* is active. All vIOMMU allocators will eventually be calling the same
|
||||
* host allocator.
|
||||
*/
|
||||
if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
|
||||
if (!vccap_pasid(iommu->vccap))
|
||||
return;
|
||||
|
||||
pr_info("Register custom PASID allocator\n");
|
||||
|
@ -264,16 +264,18 @@ int iommu_probe_device(struct device *dev)
|
||||
*/
|
||||
iommu_alloc_default_domain(group, dev);
|
||||
|
||||
if (group->default_domain)
|
||||
if (group->default_domain) {
|
||||
ret = __iommu_attach_device(group->default_domain, dev);
|
||||
if (ret) {
|
||||
iommu_group_put(group);
|
||||
goto err_release;
|
||||
}
|
||||
}
|
||||
|
||||
iommu_create_device_direct_mappings(group, dev);
|
||||
|
||||
iommu_group_put(group);
|
||||
|
||||
if (ret)
|
||||
goto err_release;
|
||||
|
||||
if (ops->probe_finalize)
|
||||
ops->probe_finalize(dev);
|
||||
|
||||
|
@ -4,36 +4,43 @@
|
||||
* validate the existing APIs in the media subsystem. It can also aid
|
||||
* developers working on userspace applications.
|
||||
*
|
||||
* When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner' and 'dvb_vidtv_demod'.
|
||||
* When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner'
|
||||
* and 'dvb_vidtv_demod'.
|
||||
*
|
||||
* Copyright (C) 2020 Daniel W. S. Almeida
|
||||
*/
|
||||
|
||||
#include <linux/dev_printk.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dev_printk.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "vidtv_bridge.h"
|
||||
#include "vidtv_demod.h"
|
||||
#include "vidtv_tuner.h"
|
||||
#include "vidtv_ts.h"
|
||||
#include "vidtv_mux.h"
|
||||
#include "vidtv_common.h"
|
||||
#include "vidtv_demod.h"
|
||||
#include "vidtv_mux.h"
|
||||
#include "vidtv_ts.h"
|
||||
#include "vidtv_tuner.h"
|
||||
|
||||
//#define MUX_BUF_MAX_SZ
|
||||
//#define MUX_BUF_MIN_SZ
|
||||
#define MUX_BUF_MIN_SZ 90164
|
||||
#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
|
||||
#define TUNER_DEFAULT_ADDR 0x68
|
||||
#define DEMOD_DEFAULT_ADDR 0x60
|
||||
#define VIDTV_DEFAULT_NETWORK_ID 0xff44
|
||||
#define VIDTV_DEFAULT_NETWORK_NAME "LinuxTV.org"
|
||||
#define VIDTV_DEFAULT_TS_ID 0x4081
|
||||
|
||||
/* LNBf fake parameters: ranges used by an Universal (extended) European LNBf */
|
||||
#define LNB_CUT_FREQUENCY 11700000
|
||||
#define LNB_LOW_FREQ 9750000
|
||||
#define LNB_HIGH_FREQ 10600000
|
||||
|
||||
/*
|
||||
* The LNBf fake parameters here are the ranges used by an
|
||||
* Universal (extended) European LNBf, which is likely the most common LNBf
|
||||
* found on Satellite digital TV system nowadays.
|
||||
*/
|
||||
#define LNB_CUT_FREQUENCY 11700000 /* high IF frequency */
|
||||
#define LNB_LOW_FREQ 9750000 /* low IF frequency */
|
||||
#define LNB_HIGH_FREQ 10600000 /* transition frequency */
|
||||
|
||||
static unsigned int drop_tslock_prob_on_low_snr;
|
||||
module_param(drop_tslock_prob_on_low_snr, uint, 0);
|
||||
@ -92,7 +99,8 @@ MODULE_PARM_DESC(si_period_msec, "How often to send SI packets. Default: 40ms");
|
||||
|
||||
static unsigned int pcr_period_msec = 40;
|
||||
module_param(pcr_period_msec, uint, 0);
|
||||
MODULE_PARM_DESC(pcr_period_msec, "How often to send PCR packets. Default: 40ms");
|
||||
MODULE_PARM_DESC(pcr_period_msec,
|
||||
"How often to send PCR packets. Default: 40ms");
|
||||
|
||||
static unsigned int mux_rate_kbytes_sec = 4096;
|
||||
module_param(mux_rate_kbytes_sec, uint, 0);
|
||||
@ -104,16 +112,14 @@ MODULE_PARM_DESC(pcr_pid, "PCR PID for all channels: defaults to 0x200");
|
||||
|
||||
static unsigned int mux_buf_sz_pkts;
|
||||
module_param(mux_buf_sz_pkts, uint, 0);
|
||||
MODULE_PARM_DESC(mux_buf_sz_pkts, "Size for the internal mux buffer in multiples of 188 bytes");
|
||||
|
||||
#define MUX_BUF_MIN_SZ 90164
|
||||
#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
|
||||
MODULE_PARM_DESC(mux_buf_sz_pkts,
|
||||
"Size for the internal mux buffer in multiples of 188 bytes");
|
||||
|
||||
static u32 vidtv_bridge_mux_buf_sz_for_mux_rate(void)
|
||||
{
|
||||
u32 max_elapsed_time_msecs = VIDTV_MAX_SLEEP_USECS / USEC_PER_MSEC;
|
||||
u32 nbytes_expected;
|
||||
u32 mux_buf_sz = mux_buf_sz_pkts * TS_PACKET_LEN;
|
||||
u32 nbytes_expected;
|
||||
|
||||
nbytes_expected = mux_rate_kbytes_sec;
|
||||
nbytes_expected *= max_elapsed_time_msecs;
|
||||
@ -143,14 +149,12 @@ static bool vidtv_bridge_check_demod_lock(struct vidtv_dvb *dvb, u32 n)
|
||||
FE_HAS_LOCK);
|
||||
}
|
||||
|
||||
static void
|
||||
vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
|
||||
/*
|
||||
* called on a separate thread by the mux when new packets become available
|
||||
*/
|
||||
static void vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
|
||||
{
|
||||
/*
|
||||
* called on a separate thread by the mux when new packets become
|
||||
* available
|
||||
*/
|
||||
struct vidtv_dvb *dvb = (struct vidtv_dvb *)priv;
|
||||
struct vidtv_dvb *dvb = priv;
|
||||
|
||||
/* drop packets if we lose the lock */
|
||||
if (vidtv_bridge_check_demod_lock(dvb, 0))
|
||||
@ -159,7 +163,17 @@ vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
|
||||
|
||||
static int vidtv_start_streaming(struct vidtv_dvb *dvb)
|
||||
{
|
||||
struct vidtv_mux_init_args mux_args = {0};
|
||||
struct vidtv_mux_init_args mux_args = {
|
||||
.mux_rate_kbytes_sec = mux_rate_kbytes_sec,
|
||||
.on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail,
|
||||
.pcr_period_usecs = pcr_period_msec * USEC_PER_MSEC,
|
||||
.si_period_usecs = si_period_msec * USEC_PER_MSEC,
|
||||
.pcr_pid = pcr_pid,
|
||||
.transport_stream_id = VIDTV_DEFAULT_TS_ID,
|
||||
.network_id = VIDTV_DEFAULT_NETWORK_ID,
|
||||
.network_name = VIDTV_DEFAULT_NETWORK_NAME,
|
||||
.priv = dvb,
|
||||
};
|
||||
struct device *dev = &dvb->pdev->dev;
|
||||
u32 mux_buf_sz;
|
||||
|
||||
@ -168,19 +182,17 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
mux_buf_sz = (mux_buf_sz_pkts) ? mux_buf_sz_pkts : vidtv_bridge_mux_buf_sz_for_mux_rate();
|
||||
if (mux_buf_sz_pkts)
|
||||
mux_buf_sz = mux_buf_sz_pkts;
|
||||
else
|
||||
mux_buf_sz = vidtv_bridge_mux_buf_sz_for_mux_rate();
|
||||
|
||||
mux_args.mux_rate_kbytes_sec = mux_rate_kbytes_sec;
|
||||
mux_args.on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail;
|
||||
mux_args.mux_buf_sz = mux_buf_sz;
|
||||
mux_args.pcr_period_usecs = pcr_period_msec * 1000;
|
||||
mux_args.si_period_usecs = si_period_msec * 1000;
|
||||
mux_args.pcr_pid = pcr_pid;
|
||||
mux_args.transport_stream_id = VIDTV_DEFAULT_TS_ID;
|
||||
mux_args.priv = dvb;
|
||||
mux_args.mux_buf_sz = mux_buf_sz;
|
||||
|
||||
dvb->streaming = true;
|
||||
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, mux_args);
|
||||
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
|
||||
if (!dvb->mux)
|
||||
return -ENOMEM;
|
||||
vidtv_mux_start_thread(dvb->mux);
|
||||
|
||||
dev_dbg_ratelimited(dev, "Started streaming\n");
|
||||
@ -204,8 +216,8 @@ static int vidtv_start_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct vidtv_dvb *dvb = demux->priv;
|
||||
int rc;
|
||||
int ret;
|
||||
int rc;
|
||||
|
||||
if (!demux->dmx.frontend)
|
||||
return -EINVAL;
|
||||
@ -243,9 +255,9 @@ static int vidtv_stop_feed(struct dvb_demux_feed *feed)
|
||||
|
||||
static struct dvb_frontend *vidtv_get_frontend_ptr(struct i2c_client *c)
|
||||
{
|
||||
/* the demod will set this when its probe function runs */
|
||||
struct vidtv_demod_state *state = i2c_get_clientdata(c);
|
||||
|
||||
/* the demod will set this when its probe function runs */
|
||||
return &state->frontend;
|
||||
}
|
||||
|
||||
@ -253,6 +265,11 @@ static int vidtv_master_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg msgs[],
|
||||
int num)
|
||||
{
|
||||
/*
|
||||
* Right now, this virtual driver doesn't really send or receive
|
||||
* messages from I2C. A real driver will require an implementation
|
||||
* here.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -320,11 +337,10 @@ static int vidtv_bridge_dmxdev_init(struct vidtv_dvb *dvb)
|
||||
|
||||
static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
|
||||
{
|
||||
struct vidtv_demod_config cfg = {};
|
||||
|
||||
cfg.drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr;
|
||||
cfg.recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr;
|
||||
|
||||
struct vidtv_demod_config cfg = {
|
||||
.drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr,
|
||||
.recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr,
|
||||
};
|
||||
dvb->i2c_client_demod[n] = dvb_module_probe("dvb_vidtv_demod",
|
||||
NULL,
|
||||
&dvb->i2c_adapter,
|
||||
@ -343,14 +359,14 @@ static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
|
||||
|
||||
static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
|
||||
{
|
||||
struct vidtv_tuner_config cfg = {};
|
||||
struct vidtv_tuner_config cfg = {
|
||||
.fe = dvb->fe[n],
|
||||
.mock_power_up_delay_msec = mock_power_up_delay_msec,
|
||||
.mock_tune_delay_msec = mock_tune_delay_msec,
|
||||
};
|
||||
u32 freq;
|
||||
int i;
|
||||
|
||||
cfg.fe = dvb->fe[n];
|
||||
cfg.mock_power_up_delay_msec = mock_power_up_delay_msec;
|
||||
cfg.mock_tune_delay_msec = mock_tune_delay_msec;
|
||||
|
||||
/* TODO: check if the frequencies are at a valid range */
|
||||
|
||||
memcpy(cfg.vidtv_valid_dvb_t_freqs,
|
||||
@ -389,9 +405,7 @@ static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
|
||||
|
||||
static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
int j;
|
||||
int ret, i, j;
|
||||
|
||||
ret = vidtv_bridge_i2c_register_adap(dvb);
|
||||
if (ret < 0)
|
||||
|
@ -20,9 +20,11 @@
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <media/dmxdev.h>
|
||||
#include <media/dvb_demux.h>
|
||||
#include <media/dvb_frontend.h>
|
||||
|
||||
#include "vidtv_mux.h"
|
||||
|
||||
/**
|
||||
@ -32,7 +34,7 @@
|
||||
* @adapter: Represents a DTV adapter. See 'dvb_register_adapter'.
|
||||
* @demux: The demux used by the dvb_dmx_swfilter_packets() call.
|
||||
* @dmx_dev: Represents a demux device.
|
||||
* @dmx_frontend: The frontends associated with the demux.
|
||||
* @dmx_fe: The frontends associated with the demux.
|
||||
* @i2c_adapter: The i2c_adapter associated with the bridge driver.
|
||||
* @i2c_client_demod: The i2c_clients associated with the demodulator modules.
|
||||
* @i2c_client_tuner: The i2c_clients associated with the tuner modules.
|
||||
|
@ -9,6 +9,7 @@
|
||||
* When vidtv boots, it will create some hardcoded channels.
|
||||
* Their services will be concatenated to populate the SDT.
|
||||
* Their programs will be concatenated to populate the PAT
|
||||
* Their events will be concatenated to populate the EIT
|
||||
* For each program in the PAT, a PMT section will be created
|
||||
* The PMT section for a channel will be assigned its streams.
|
||||
* Every stream will have its corresponding encoder polled to produce TS packets
|
||||
@ -18,22 +19,22 @@
|
||||
* Copyright (C) 2020 Daniel W. S. Almeida
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dev_printk.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "vidtv_channel.h"
|
||||
#include "vidtv_psi.h"
|
||||
#include "vidtv_common.h"
|
||||
#include "vidtv_encoder.h"
|
||||
#include "vidtv_mux.h"
|
||||
#include "vidtv_common.h"
|
||||
#include "vidtv_psi.h"
|
||||
#include "vidtv_s302m.h"
|
||||
|
||||
static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
|
||||
{
|
||||
struct vidtv_encoder *curr = e;
|
||||
struct vidtv_encoder *tmp = NULL;
|
||||
struct vidtv_encoder *curr = e;
|
||||
|
||||
while (curr) {
|
||||
/* forward the call to the derived type */
|
||||
@ -44,55 +45,88 @@ static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
|
||||
}
|
||||
|
||||
#define ENCODING_ISO8859_15 "\x0b"
|
||||
#define TS_NIT_PID 0x10
|
||||
|
||||
/*
|
||||
* init an audio only channel with a s302m encoder
|
||||
*/
|
||||
struct vidtv_channel
|
||||
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id)
|
||||
{
|
||||
/*
|
||||
* init an audio only channel with a s302m encoder
|
||||
*/
|
||||
const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
|
||||
char *event_text = ENCODING_ISO8859_15 "Bagatelle No. 25 in A minor for solo piano, also known as F\xfcr Elise, composed by Ludwig van Beethoven";
|
||||
char *event_name = ENCODING_ISO8859_15 "Ludwig van Beethoven: F\xfcr Elise";
|
||||
struct vidtv_s302m_encoder_init_args encoder_args = {};
|
||||
char *iso_language_code = ENCODING_ISO8859_15 "eng";
|
||||
char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
|
||||
char *name = ENCODING_ISO8859_15 "Beethoven";
|
||||
const u16 s302m_es_pid = 0x111; /* packet id for the ES */
|
||||
const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
|
||||
const u16 s302m_service_id = 0x880;
|
||||
const u16 s302m_program_num = 0x880;
|
||||
const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
|
||||
const u16 s302m_es_pid = 0x111; /* packet id for the ES */
|
||||
const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
|
||||
const u16 s302m_beethoven_event_id = 1;
|
||||
struct vidtv_channel *s302m;
|
||||
|
||||
char *name = ENCODING_ISO8859_15 "Beethoven";
|
||||
char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
|
||||
|
||||
struct vidtv_channel *s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
|
||||
struct vidtv_s302m_encoder_init_args encoder_args = {};
|
||||
s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
|
||||
if (!s302m)
|
||||
return NULL;
|
||||
|
||||
s302m->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!s302m->name)
|
||||
goto free_s302m;
|
||||
|
||||
s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id);
|
||||
s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id, false, true);
|
||||
if (!s302m->service)
|
||||
goto free_name;
|
||||
|
||||
s302m->service->descriptor = (struct vidtv_psi_desc *)
|
||||
vidtv_psi_service_desc_init(NULL,
|
||||
DIGITAL_TELEVISION_SERVICE,
|
||||
DIGITAL_RADIO_SOUND_SERVICE,
|
||||
name,
|
||||
provider);
|
||||
if (!s302m->service->descriptor)
|
||||
goto free_service;
|
||||
|
||||
s302m->transport_stream_id = transport_stream_id;
|
||||
|
||||
s302m->program = vidtv_psi_pat_program_init(NULL,
|
||||
s302m_service_id,
|
||||
s302m_program_pid);
|
||||
if (!s302m->program)
|
||||
goto free_service;
|
||||
|
||||
s302m->program_num = s302m_program_num;
|
||||
|
||||
s302m->streams = vidtv_psi_pmt_stream_init(NULL,
|
||||
STREAM_PRIVATE_DATA,
|
||||
s302m_es_pid);
|
||||
if (!s302m->streams)
|
||||
goto free_program;
|
||||
|
||||
s302m->streams->descriptor = (struct vidtv_psi_desc *)
|
||||
vidtv_psi_registration_desc_init(NULL,
|
||||
s302m_fid,
|
||||
NULL,
|
||||
0);
|
||||
if (!s302m->streams->descriptor)
|
||||
goto free_streams;
|
||||
|
||||
encoder_args.es_pid = s302m_es_pid;
|
||||
|
||||
s302m->encoders = vidtv_s302m_encoder_init(encoder_args);
|
||||
if (!s302m->encoders)
|
||||
goto free_streams;
|
||||
|
||||
s302m->events = vidtv_psi_eit_event_init(NULL, s302m_beethoven_event_id);
|
||||
if (!s302m->events)
|
||||
goto free_encoders;
|
||||
s302m->events->descriptor = (struct vidtv_psi_desc *)
|
||||
vidtv_psi_short_event_desc_init(NULL,
|
||||
iso_language_code,
|
||||
event_name,
|
||||
event_text);
|
||||
if (!s302m->events->descriptor)
|
||||
goto free_events;
|
||||
|
||||
if (head) {
|
||||
while (head->next)
|
||||
@ -102,6 +136,68 @@ struct vidtv_channel
|
||||
}
|
||||
|
||||
return s302m;
|
||||
|
||||
free_events:
|
||||
vidtv_psi_eit_event_destroy(s302m->events);
|
||||
free_encoders:
|
||||
vidtv_s302m_encoder_destroy(s302m->encoders);
|
||||
free_streams:
|
||||
vidtv_psi_pmt_stream_destroy(s302m->streams);
|
||||
free_program:
|
||||
vidtv_psi_pat_program_destroy(s302m->program);
|
||||
free_service:
|
||||
vidtv_psi_sdt_service_destroy(s302m->service);
|
||||
free_name:
|
||||
kfree(s302m->name);
|
||||
free_s302m:
|
||||
kfree(s302m);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vidtv_psi_table_eit_event
|
||||
*vidtv_channel_eit_event_cat_into_new(struct vidtv_mux *m)
|
||||
{
|
||||
/* Concatenate the events */
|
||||
const struct vidtv_channel *cur_chnl = m->channels;
|
||||
struct vidtv_psi_table_eit_event *curr = NULL;
|
||||
struct vidtv_psi_table_eit_event *head = NULL;
|
||||
struct vidtv_psi_table_eit_event *tail = NULL;
|
||||
struct vidtv_psi_desc *desc = NULL;
|
||||
u16 event_id;
|
||||
|
||||
if (!cur_chnl)
|
||||
return NULL;
|
||||
|
||||
while (cur_chnl) {
|
||||
curr = cur_chnl->events;
|
||||
|
||||
if (!curr)
|
||||
dev_warn_ratelimited(m->dev,
|
||||
"No events found for channel %s\n",
|
||||
cur_chnl->name);
|
||||
|
||||
while (curr) {
|
||||
event_id = be16_to_cpu(curr->event_id);
|
||||
tail = vidtv_psi_eit_event_init(tail, event_id);
|
||||
if (!tail) {
|
||||
vidtv_psi_eit_event_destroy(head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc = vidtv_psi_desc_clone(curr->descriptor);
|
||||
vidtv_psi_desc_assign(&tail->descriptor, desc);
|
||||
|
||||
if (!head)
|
||||
head = tail;
|
||||
|
||||
curr = curr->next;
|
||||
}
|
||||
|
||||
cur_chnl = cur_chnl->next;
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
static struct vidtv_psi_table_sdt_service
|
||||
@ -125,13 +221,21 @@ static struct vidtv_psi_table_sdt_service
|
||||
|
||||
if (!curr)
|
||||
dev_warn_ratelimited(m->dev,
|
||||
"No services found for channel %s\n", cur_chnl->name);
|
||||
"No services found for channel %s\n",
|
||||
cur_chnl->name);
|
||||
|
||||
while (curr) {
|
||||
service_id = be16_to_cpu(curr->service_id);
|
||||
tail = vidtv_psi_sdt_service_init(tail, service_id);
|
||||
tail = vidtv_psi_sdt_service_init(tail,
|
||||
service_id,
|
||||
curr->EIT_schedule,
|
||||
curr->EIT_present_following);
|
||||
if (!tail)
|
||||
goto free;
|
||||
|
||||
desc = vidtv_psi_desc_clone(curr->descriptor);
|
||||
if (!desc)
|
||||
goto free_tail;
|
||||
vidtv_psi_desc_assign(&tail->descriptor, desc);
|
||||
|
||||
if (!head)
|
||||
@ -144,6 +248,12 @@ static struct vidtv_psi_table_sdt_service
|
||||
}
|
||||
|
||||
return head;
|
||||
|
||||
free_tail:
|
||||
vidtv_psi_sdt_service_destroy(tail);
|
||||
free:
|
||||
vidtv_psi_sdt_service_destroy(head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vidtv_psi_table_pat_program*
|
||||
@ -174,6 +284,10 @@ vidtv_channel_pat_prog_cat_into_new(struct vidtv_mux *m)
|
||||
tail = vidtv_psi_pat_program_init(tail,
|
||||
serv_id,
|
||||
pid);
|
||||
if (!tail) {
|
||||
vidtv_psi_pat_program_destroy(head);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!head)
|
||||
head = tail;
|
||||
@ -183,30 +297,30 @@ vidtv_channel_pat_prog_cat_into_new(struct vidtv_mux *m)
|
||||
|
||||
cur_chnl = cur_chnl->next;
|
||||
}
|
||||
/* Add the NIT table */
|
||||
vidtv_psi_pat_program_init(tail, 0, TS_NIT_PID);
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
/*
|
||||
* Match channels to their respective PMT sections, then assign the
|
||||
* streams
|
||||
*/
|
||||
static void
|
||||
vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
|
||||
struct vidtv_psi_table_pmt **sections,
|
||||
u32 nsections)
|
||||
{
|
||||
/*
|
||||
* Match channels to their respective PMT sections, then assign the
|
||||
* streams
|
||||
*/
|
||||
struct vidtv_psi_table_pmt *curr_section = NULL;
|
||||
struct vidtv_channel *cur_chnl = channels;
|
||||
|
||||
struct vidtv_psi_table_pmt_stream *s = NULL;
|
||||
struct vidtv_psi_table_pmt_stream *head = NULL;
|
||||
struct vidtv_psi_table_pmt_stream *tail = NULL;
|
||||
|
||||
struct vidtv_psi_table_pmt_stream *s = NULL;
|
||||
struct vidtv_channel *cur_chnl = channels;
|
||||
struct vidtv_psi_desc *desc = NULL;
|
||||
u32 j;
|
||||
u16 curr_id;
|
||||
u16 e_pid; /* elementary stream pid */
|
||||
u16 curr_id;
|
||||
u32 j;
|
||||
|
||||
while (cur_chnl) {
|
||||
for (j = 0; j < nsections; ++j) {
|
||||
@ -232,7 +346,8 @@ vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
|
||||
head = tail;
|
||||
|
||||
desc = vidtv_psi_desc_clone(s->descriptor);
|
||||
vidtv_psi_desc_assign(&tail->descriptor, desc);
|
||||
vidtv_psi_desc_assign(&tail->descriptor,
|
||||
desc);
|
||||
|
||||
s = s->next;
|
||||
}
|
||||
@ -246,17 +361,103 @@ vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
|
||||
}
|
||||
}
|
||||
|
||||
void vidtv_channel_si_init(struct vidtv_mux *m)
|
||||
static void
|
||||
vidtv_channel_destroy_service_list(struct vidtv_psi_desc_service_list_entry *e)
|
||||
{
|
||||
struct vidtv_psi_desc_service_list_entry *tmp;
|
||||
|
||||
while (e) {
|
||||
tmp = e;
|
||||
e = e->next;
|
||||
kfree(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static struct vidtv_psi_desc_service_list_entry
|
||||
*vidtv_channel_build_service_list(struct vidtv_psi_table_sdt_service *s)
|
||||
{
|
||||
struct vidtv_psi_desc_service_list_entry *curr_e = NULL;
|
||||
struct vidtv_psi_desc_service_list_entry *head_e = NULL;
|
||||
struct vidtv_psi_desc_service_list_entry *prev_e = NULL;
|
||||
struct vidtv_psi_desc *desc = s->descriptor;
|
||||
struct vidtv_psi_desc_service *s_desc;
|
||||
|
||||
while (s) {
|
||||
while (desc) {
|
||||
if (s->descriptor->type != SERVICE_DESCRIPTOR)
|
||||
goto next_desc;
|
||||
|
||||
s_desc = (struct vidtv_psi_desc_service *)desc;
|
||||
|
||||
curr_e = kzalloc(sizeof(*curr_e), GFP_KERNEL);
|
||||
if (!curr_e) {
|
||||
vidtv_channel_destroy_service_list(head_e);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
curr_e->service_id = s->service_id;
|
||||
curr_e->service_type = s_desc->service_type;
|
||||
|
||||
if (!head_e)
|
||||
head_e = curr_e;
|
||||
if (prev_e)
|
||||
prev_e->next = curr_e;
|
||||
|
||||
prev_e = curr_e;
|
||||
|
||||
next_desc:
|
||||
desc = desc->next;
|
||||
}
|
||||
s = s->next;
|
||||
}
|
||||
return head_e;
|
||||
}
|
||||
|
||||
int vidtv_channel_si_init(struct vidtv_mux *m)
|
||||
{
|
||||
struct vidtv_psi_desc_service_list_entry *service_list = NULL;
|
||||
struct vidtv_psi_table_pat_program *programs = NULL;
|
||||
struct vidtv_psi_table_sdt_service *services = NULL;
|
||||
struct vidtv_psi_table_eit_event *events = NULL;
|
||||
|
||||
m->si.pat = vidtv_psi_pat_table_init(m->transport_stream_id);
|
||||
if (!m->si.pat)
|
||||
return -ENOMEM;
|
||||
|
||||
m->si.sdt = vidtv_psi_sdt_table_init(m->transport_stream_id);
|
||||
m->si.sdt = vidtv_psi_sdt_table_init(m->network_id,
|
||||
m->transport_stream_id);
|
||||
if (!m->si.sdt)
|
||||
goto free_pat;
|
||||
|
||||
programs = vidtv_channel_pat_prog_cat_into_new(m);
|
||||
if (!programs)
|
||||
goto free_sdt;
|
||||
services = vidtv_channel_sdt_serv_cat_into_new(m);
|
||||
if (!services)
|
||||
goto free_programs;
|
||||
|
||||
events = vidtv_channel_eit_event_cat_into_new(m);
|
||||
if (!events)
|
||||
goto free_services;
|
||||
|
||||
/* look for a service descriptor for every service */
|
||||
service_list = vidtv_channel_build_service_list(services);
|
||||
if (!service_list)
|
||||
goto free_events;
|
||||
|
||||
/* use these descriptors to build the NIT */
|
||||
m->si.nit = vidtv_psi_nit_table_init(m->network_id,
|
||||
m->transport_stream_id,
|
||||
m->network_name,
|
||||
service_list);
|
||||
if (!m->si.nit)
|
||||
goto free_service_list;
|
||||
|
||||
m->si.eit = vidtv_psi_eit_table_init(m->network_id,
|
||||
m->transport_stream_id,
|
||||
programs->service_id);
|
||||
if (!m->si.eit)
|
||||
goto free_nit;
|
||||
|
||||
/* assemble all programs and assign to PAT */
|
||||
vidtv_psi_pat_program_assign(m->si.pat, programs);
|
||||
@ -264,31 +465,65 @@ void vidtv_channel_si_init(struct vidtv_mux *m)
|
||||
/* assemble all services and assign to SDT */
|
||||
vidtv_psi_sdt_service_assign(m->si.sdt, services);
|
||||
|
||||
m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat, m->pcr_pid);
|
||||
/* assemble all events and assign to EIT */
|
||||
vidtv_psi_eit_event_assign(m->si.eit, events);
|
||||
|
||||
m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat,
|
||||
m->pcr_pid);
|
||||
if (!m->si.pmt_secs)
|
||||
goto free_eit;
|
||||
|
||||
vidtv_channel_pmt_match_sections(m->channels,
|
||||
m->si.pmt_secs,
|
||||
m->si.pat->programs);
|
||||
m->si.pat->num_pmt);
|
||||
|
||||
vidtv_channel_destroy_service_list(service_list);
|
||||
|
||||
return 0;
|
||||
|
||||
free_eit:
|
||||
vidtv_psi_eit_table_destroy(m->si.eit);
|
||||
free_nit:
|
||||
vidtv_psi_nit_table_destroy(m->si.nit);
|
||||
free_service_list:
|
||||
vidtv_channel_destroy_service_list(service_list);
|
||||
free_events:
|
||||
vidtv_psi_eit_event_destroy(events);
|
||||
free_services:
|
||||
vidtv_psi_sdt_service_destroy(services);
|
||||
free_programs:
|
||||
vidtv_psi_pat_program_destroy(programs);
|
||||
free_sdt:
|
||||
vidtv_psi_sdt_table_destroy(m->si.sdt);
|
||||
free_pat:
|
||||
vidtv_psi_pat_table_destroy(m->si.pat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vidtv_channel_si_destroy(struct vidtv_mux *m)
|
||||
{
|
||||
u32 i;
|
||||
u16 num_programs = m->si.pat->programs;
|
||||
|
||||
vidtv_psi_pat_table_destroy(m->si.pat);
|
||||
|
||||
for (i = 0; i < num_programs; ++i)
|
||||
for (i = 0; i < m->si.pat->num_pmt; ++i)
|
||||
vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
|
||||
|
||||
kfree(m->si.pmt_secs);
|
||||
vidtv_psi_sdt_table_destroy(m->si.sdt);
|
||||
vidtv_psi_nit_table_destroy(m->si.nit);
|
||||
vidtv_psi_eit_table_destroy(m->si.eit);
|
||||
}
|
||||
|
||||
void vidtv_channels_init(struct vidtv_mux *m)
|
||||
int vidtv_channels_init(struct vidtv_mux *m)
|
||||
{
|
||||
/* this is the place to add new 'channels' for vidtv */
|
||||
m->channels = vidtv_channel_s302m_init(NULL, m->transport_stream_id);
|
||||
|
||||
if (!m->channels)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vidtv_channels_destroy(struct vidtv_mux *m)
|
||||
@ -302,6 +537,7 @@ void vidtv_channels_destroy(struct vidtv_mux *m)
|
||||
vidtv_psi_pat_program_destroy(curr->program);
|
||||
vidtv_psi_pmt_stream_destroy(curr->streams);
|
||||
vidtv_channel_encoder_destroy(curr->encoders);
|
||||
vidtv_psi_eit_event_destroy(curr->events);
|
||||
|
||||
tmp = curr;
|
||||
curr = curr->next;
|
||||
|
@ -9,6 +9,7 @@
|
||||
* When vidtv boots, it will create some hardcoded channels.
|
||||
* Their services will be concatenated to populate the SDT.
|
||||
* Their programs will be concatenated to populate the PAT
|
||||
* Their events will be concatenated to populate the EIT
|
||||
* For each program in the PAT, a PMT section will be created
|
||||
* The PMT section for a channel will be assigned its streams.
|
||||
* Every stream will have its corresponding encoder polled to produce TS packets
|
||||
@ -22,9 +23,10 @@
|
||||
#define VIDTV_CHANNEL_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "vidtv_psi.h"
|
||||
|
||||
#include "vidtv_encoder.h"
|
||||
#include "vidtv_mux.h"
|
||||
#include "vidtv_psi.h"
|
||||
|
||||
/**
|
||||
* struct vidtv_channel - A 'channel' abstraction
|
||||
@ -37,6 +39,7 @@
|
||||
* Every stream will have its corresponding encoder polled to produce TS packets
|
||||
* These packets may be interleaved by the mux and then delivered to the bridge
|
||||
*
|
||||
* @name: name of the channel
|
||||
* @transport_stream_id: a number to identify the TS, chosen at will.
|
||||
* @service: A _single_ service. Will be concatenated into the SDT.
|
||||
* @program_num: The link between PAT, PMT and SDT.
|
||||
@ -44,6 +47,7 @@
|
||||
* Will be concatenated into the PAT.
|
||||
* @streams: A stream loop used to populate the PMT section for 'program'
|
||||
* @encoders: A encoder loop. There must be one encoder for each stream.
|
||||
* @events: Optional event information. This will feed into the EIT.
|
||||
* @next: Optionally chain this channel.
|
||||
*/
|
||||
struct vidtv_channel {
|
||||
@ -54,6 +58,7 @@ struct vidtv_channel {
|
||||
struct vidtv_psi_table_pat_program *program;
|
||||
struct vidtv_psi_table_pmt_stream *streams;
|
||||
struct vidtv_encoder *encoders;
|
||||
struct vidtv_psi_table_eit_event *events;
|
||||
struct vidtv_channel *next;
|
||||
};
|
||||
|
||||
@ -61,14 +66,14 @@ struct vidtv_channel {
|
||||
* vidtv_channel_si_init - Init the PSI tables from the channels in the mux
|
||||
* @m: The mux containing the channels.
|
||||
*/
|
||||
void vidtv_channel_si_init(struct vidtv_mux *m);
|
||||
int vidtv_channel_si_init(struct vidtv_mux *m);
|
||||
void vidtv_channel_si_destroy(struct vidtv_mux *m);
|
||||
|
||||
/**
|
||||
* vidtv_channels_init - Init hardcoded, fake 'channels'.
|
||||
* @m: The mux to store the channels into.
|
||||
*/
|
||||
void vidtv_channels_init(struct vidtv_mux *m);
|
||||
int vidtv_channels_init(struct vidtv_mux *m);
|
||||
struct vidtv_channel
|
||||
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id);
|
||||
void vidtv_channels_destroy(struct vidtv_mux *m);
|
||||
|
@ -16,7 +16,6 @@
|
||||
#define CLOCK_UNIT_27MHZ 27000000
|
||||
#define VIDTV_SLEEP_USECS 10000
|
||||
#define VIDTV_MAX_SLEEP_USECS (2 * VIDTV_SLEEP_USECS)
|
||||
#define VIDTV_DEFAULT_TS_ID 0x744
|
||||
|
||||
u32 vidtv_memcpy(void *to,
|
||||
size_t to_offset,
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <media/dvb_frontend.h>
|
||||
|
||||
#include "vidtv_demod.h"
|
||||
@ -192,7 +193,6 @@ static void vidtv_demod_update_stats(struct dvb_frontend *fe)
|
||||
|
||||
c->cnr.stat[0].svalue = state->tuner_cnr;
|
||||
c->cnr.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
|
||||
|
||||
}
|
||||
|
||||
static int vidtv_demod_read_status(struct dvb_frontend *fe,
|
||||
|
@ -12,6 +12,7 @@
|
||||
#define VIDTV_DEMOD_H
|
||||
|
||||
#include <linux/dvb/frontend.h>
|
||||
|
||||
#include <media/dvb_frontend.h>
|
||||
|
||||
/**
|
||||
@ -19,6 +20,9 @@
|
||||
* modulation and fec_inner
|
||||
* @modulation: see enum fe_modulation
|
||||
* @fec: see enum fe_fec_rate
|
||||
* @cnr_ok: S/N threshold to consider the signal as OK. Below that, there's
|
||||
* a chance of losing sync.
|
||||
* @cnr_good: S/N threshold to consider the signal strong.
|
||||
*
|
||||
* This struct matches values for 'good' and 'ok' CNRs given the combination
|
||||
* of modulation and fec_inner in use. We might simulate some noise if the
|
||||
@ -52,13 +56,8 @@ struct vidtv_demod_config {
|
||||
* struct vidtv_demod_state - The demodulator state
|
||||
* @frontend: The frontend structure allocated by the demod.
|
||||
* @config: The config used to init the demod.
|
||||
* @poll_snr: The task responsible for periodically checking the simulated
|
||||
* signal quality, eventually dropping or reacquiring the TS lock.
|
||||
* @status: the demod status.
|
||||
* @cold_start: Whether the demod has not been init yet.
|
||||
* @poll_snr_thread_running: Whether the task responsible for periodically
|
||||
* checking the simulated signal quality is running.
|
||||
* @poll_snr_thread_restart: Whether we should restart the poll_snr task.
|
||||
* @tuner_cnr: current S/N ratio for the signal carrier
|
||||
*/
|
||||
struct vidtv_demod_state {
|
||||
struct dvb_frontend frontend;
|
||||
|
@ -28,7 +28,7 @@ struct vidtv_access_unit {
|
||||
struct vidtv_access_unit *next;
|
||||
};
|
||||
|
||||
/* Some musical notes, used by a tone generator */
|
||||
/* Some musical notes, used by a tone generator. Values are in Hz */
|
||||
enum musical_notes {
|
||||
NOTE_SILENT = 0,
|
||||
|
||||
@ -103,14 +103,16 @@ enum musical_notes {
|
||||
* @encoder_buf_sz: The encoder buffer size, in bytes
|
||||
* @encoder_buf_offset: Our byte position in the encoder buffer.
|
||||
* @sample_count: How many samples we have encoded in total.
|
||||
* @access_units: encoder payload units, used for clock references
|
||||
* @src_buf: The source of raw data to be encoded, encoder might set a
|
||||
* default if null.
|
||||
* @src_buf_sz: size of @src_buf.
|
||||
* @src_buf_offset: Our position in the source buffer.
|
||||
* @is_video_encoder: Whether this a video encoder (as opposed to audio)
|
||||
* @ctx: Encoder-specific state.
|
||||
* @stream_id: Examples: Audio streams (0xc0-0xdf), Video streams
|
||||
* (0xe0-0xef).
|
||||
* @es_id: The TS PID to use for the elementary stream in this encoder.
|
||||
* @es_pid: The TS PID to use for the elementary stream in this encoder.
|
||||
* @encode: Prepare enough AUs for the given amount of time.
|
||||
* @clear: Clear the encoder output.
|
||||
* @sync: Attempt to synchronize with this encoder.
|
||||
@ -131,9 +133,6 @@ struct vidtv_encoder {
|
||||
u32 encoder_buf_offset;
|
||||
|
||||
u64 sample_count;
|
||||
int last_duration;
|
||||
int note_offset;
|
||||
enum musical_notes last_tone;
|
||||
|
||||
struct vidtv_access_unit *access_units;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user