This is the 5.4.148 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmFLBPMACgkQONu9yGCS aT6BIQ//Wb4ZQJtEVvaKnda7vFwe8BoZzPGYZA4Imn9KERDRgHuavEuRfMQtKc2y YHwe/PD2JreuDHcd+Wz32xsdMe045xNvgiE1oGcxq0jNBvhJqANSmVTWpdqAquON cTmwsK3roa7ELC2g1WjrYZDv6CrCggqvbuM9AJ/cLITtd8zerhLdZo+CCDG/28cH EosrWvkBcaGmX+r/IBC86Rt6K2OFQ/3LLbb79L4vjKi5lopsm5CTAmfOfIk8p1gB mGB3PkQZnIqphBfqGXLGuljl4e+zb1SONrugUh78Egom393Ex34oo+RjWEGe9dV2 Stkuqo0GTi85X7JA7SGCA/xgF8A8yvaaLjQBsJsL9+2ji+GW+J7hfn4mE5h8H3Di UBjeLMFJA8Mge8Ng9xUSttvjRdwSTm0jWTS9SOl07w24b0pKYbMrQdWt2eI6CT+/ ytq3nCxNJZKeVcAVH+OJNrbSLYvMy/PgYvGTbzASkNmpAeyNiHOyBz1sRcoiAM9U QCWDdZyaqDKktqEyKHxK3opqPzbnHfZFFlCxR7Gw7vvR+itIGJEh/50RNv2F6vnu wzowrVxe+Bf1h7JiNEqLLVHdiuygRqjH1ygepGM4+3TVF4jYHzDISyrqlA/Se3Pg Hhvlzsbv7PH+KiApwBFjSeHTs5WOrokGMFQ7ZYFDpPkleWiywS0= =50Hk -----END PGP SIGNATURE----- Merge 5.4.148 into android11-5.4-lts Changes in 5.4.148 rtc: tps65910: Correct driver module alias btrfs: wake up async_delalloc_pages waiters after submit btrfs: reset replace target device to allocation state on close blk-zoned: allow zone management send operations without CAP_SYS_ADMIN blk-zoned: allow BLKREPORTZONE without CAP_SYS_ADMIN PCI/MSI: Skip masking MSI-X on Xen PV powerpc/perf/hv-gpci: Fix counter value parsing xen: fix setting of max_pfn in shared_info include/linux/list.h: add a macro to test if entry is pointing to the head 9p/xen: Fix end of loop tests for list_for_each_entry tools/thermal/tmon: Add cross compiling support pinctrl: stmfx: Fix hazardous u8[] to unsigned long cast pinctrl: ingenic: Fix incorrect pull up/down info soc: qcom: aoss: Fix the out of bound usage of cooling_devs soc: aspeed: lpc-ctrl: Fix boundary check for mmap soc: aspeed: p2a-ctrl: Fix boundary check for mmap arm64: head: avoid over-mapping in map_memory crypto: public_key: fix overflow during implicit conversion block: bfq: fix bfq_set_next_ioprio_data() power: supply: max17042: handle fails of reading status register dm crypt: Avoid percpu_counter spinlock contention in crypt_page_alloc() VMCI: fix NULL pointer dereference when unmapping queue pair media: uvc: don't do DMA on stack media: rc-loopback: return number of emitters rather than error Revert "dmaengine: imx-sdma: refine to load context only once" dmaengine: imx-sdma: remove duplicated sdma_load_context libata: add ATA_HORKAGE_NO_NCQ_TRIM for Samsung 860 and 870 SSDs ARM: 9105/1: atags_to_fdt: don't warn about stack size PCI/portdrv: Enable Bandwidth Notification only if port supports it PCI: Restrict ASMedia ASM1062 SATA Max Payload Size Supported PCI: Return ~0 data on pciconfig_read() CAP_SYS_ADMIN failure PCI: xilinx-nwl: Enable the clock through CCF PCI: aardvark: Fix checking for PIO status PCI: aardvark: Increase polling delay to 1.5s while waiting for PIO response PCI: aardvark: Fix masking and unmasking legacy INTx interrupts HID: input: do not report stylus battery state as "full" f2fs: quota: fix potential deadlock scsi: bsg: Remove support for SCSI_IOCTL_SEND_COMMAND IB/hfi1: Adjust pkey entry in index 0 RDMA/iwcm: Release resources if iw_cm module initialization fails docs: Fix infiniband uverbs minor number pinctrl: samsung: Fix pinctrl bank pin count vfio: Use config not menuconfig for VFIO_NOIOMMU powerpc/stacktrace: Include linux/delay.h RDMA/efa: Remove double QP type assignment f2fs: show f2fs instance in printk_ratelimited f2fs: reduce the scope of setting fsck tag when de->name_len is zero openrisc: don't printk() unconditionally dma-debug: fix debugfs initialization order SUNRPC: Fix potential memory corruption scsi: fdomain: Fix error return code in fdomain_probe() pinctrl: single: Fix error return code in pcs_parse_bits_in_pinctrl_entry() scsi: smartpqi: Fix an error code in pqi_get_raid_map() scsi: qedi: Fix error codes in qedi_alloc_global_queues() scsi: qedf: Fix error codes in qedf_alloc_global_queues() powerpc/config: Renable MTD_PHYSMAP_OF scsi: target: avoid per-loop XCOPY buffer allocations HID: i2c-hid: Fix Elan touchpad regression KVM: PPC: Book3S HV Nested: Reflect guest PMU in-use to L0 when guest SPRs are live platform/x86: dell-smbios-wmi: Add missing kfree in error-exit from run_smbios_call fscache: Fix cookie key hashing clk: at91: sam9x60: Don't use audio PLL clk: at91: clk-generated: pass the id of changeable parent at registration clk: at91: clk-generated: Limit the requested rate to our range KVM: PPC: Fix clearing never mapped TCEs in realmode f2fs: fix to account missing .skipped_gc_rwsem f2fs: fix unexpected ENOENT comes from f2fs_map_blocks() f2fs: fix to unmap pages from userspace process in punch_hole() MIPS: Malta: fix alignment of the devicetree buffer kbuild: Fix 'no symbols' warning when CONFIG_TRIM_UNUSD_KSYMS=y userfaultfd: prevent concurrent API initialization drm/amdgpu: Fix amdgpu_ras_eeprom_init() ASoC: atmel: ATMEL drivers don't need HAS_DMA media: dib8000: rewrite the init prbs logic crypto: mxs-dcp - Use sg_mapping_iter to copy data PCI: Use pci_update_current_state() in pci_enable_device_flags() tipc: keep the skb in rcv queue until the whole data is read iio: dac: ad5624r: Fix incorrect handling of an optional regulator. iavf: do not override the adapter state in the watchdog task iavf: fix locking of critical sections ARM: dts: qcom: apq8064: correct clock names video: fbdev: kyro: fix a DoS bug by restricting user input netlink: Deal with ESRCH error in nlmsg_notify() Smack: Fix wrong semantics in smk_access_entry() drm: avoid blocking in drm_clients_info's rcu section igc: Check if num of q_vectors is smaller than max before array access usb: host: fotg210: fix the endpoint's transactional opportunities calculation usb: host: fotg210: fix the actual_length of an iso packet usb: gadget: u_ether: fix a potential null pointer dereference USB: EHCI: ehci-mv: improve error handling in mv_ehci_enable() usb: gadget: composite: Allow bMaxPower=0 if self-powered staging: board: Fix uninitialized spinlock when attaching genpd tty: serial: jsm: hold port lock when reporting modem line changes drm/amd/display: Fix timer_per_pixel unit error drm/amd/amdgpu: Update debugfs link_settings output link_rate field in hex bpf/tests: Fix copy-and-paste error in double word test bpf/tests: Do not PASS tests without actually testing the result video: fbdev: asiliantfb: Error out if 'pixclock' equals zero video: fbdev: kyro: Error out if 'pixclock' equals zero video: fbdev: riva: Error out if 'pixclock' equals zero ipv4: ip_output.c: Fix out-of-bounds warning in ip_copy_addrs() flow_dissector: Fix out-of-bounds warnings s390/jump_label: print real address in a case of a jump label bug s390: make PCI mio support a machine flag serial: 8250: Define RX trigger levels for OxSemi 950 devices xtensa: ISS: don't panic in rs_init hvsi: don't panic on tty_register_driver failure serial: 8250_pci: make setup_port() parameters explicitly unsigned staging: ks7010: Fix the initialization of the 'sleep_status' structure samples: bpf: Fix tracex7 error raised on the missing argument ata: sata_dwc_460ex: No need to call phy_exit() befre phy_init() Bluetooth: skip invalid hci_sync_conn_complete_evt workqueue: Fix possible memory leaks in wq_numa_init() bonding: 3ad: fix the concurrency between __bond_release_one() and bond_3ad_state_machine_handler() arm64: tegra: Fix Tegra194 PCIe EP compatible string ASoC: Intel: bytcr_rt5640: Move "Platform Clock" routes to the maps for the matching in-/output media: imx258: Rectify mismatch of VTS value media: imx258: Limit the max analogue gain to 480 media: v4l2-dv-timings.c: fix wrong condition in two for-loops media: TDA1997x: fix tda1997x_query_dv_timings() return value media: tegra-cec: Handle errors of clk_prepare_enable() ARM: dts: imx53-ppd: Fix ACHC entry arm64: dts: qcom: sdm660: use reg value for memory node net: ethernet: stmmac: Do not use unreachable() in ipq806x_gmac_probe() drm/msm: mdp4: drop vblank get/put from prepare/complete_commit selftests/bpf: Fix xdp_tx.c prog section name Bluetooth: schedule SCO timeouts with delayed_work Bluetooth: avoid circular locks in sco_sock_connect net/mlx5: Fix variable type to match 64bit gpu: drm: amd: amdgpu: amdgpu_i2c: fix possible uninitialized-variable access in amdgpu_i2c_router_select_ddc_port() drm/display: fix possible null-pointer dereference in dcn10_set_clock() mac80211: Fix monitor MTU limit so that A-MSDUs get through ARM: tegra: tamonten: Fix UART pad setting arm64: tegra: Fix compatible string for Tegra132 CPUs arm64: dts: ls1046a: fix eeprom entries nvme-tcp: don't check blk_mq_tag_to_rq when receiving pdu data Bluetooth: Fix handling of LE Enhanced Connection Complete opp: Don't print an error if required-opps is missing serial: sh-sci: fix break handling for sysrq tcp: enable data-less, empty-cookie SYN with TFO_SERVER_COOKIE_NOT_REQD rpc: fix gss_svc_init cleanup on failure staging: rts5208: Fix get_ms_information() heap buffer size gfs2: Don't call dlm after protocol is unmounted usb: chipidea: host: fix port index underflow and UBSAN complains lockd: lockd server-side shouldn't set fl_ops drm/exynos: Always initialize mapping in exynos_drm_register_dma() m68knommu: only set CONFIG_ISA_DMA_API for ColdFire sub-arch btrfs: tree-log: check btrfs_lookup_data_extent return value ASoC: Intel: Skylake: Fix module configuration for KPB and MIXER ASoC: Intel: Skylake: Fix passing loadable flag for module of: Don't allow __of_attached_node_sysfs() without CONFIG_SYSFS mmc: sdhci-of-arasan: Check return value of non-void funtions mmc: rtsx_pci: Fix long reads when clock is prescaled selftests/bpf: Enlarge select() timeout for test_maps mmc: core: Return correct emmc response in case of ioctl error cifs: fix wrong release in sess_alloc_buffer() failed path Revert "USB: xhci: fix U1/U2 handling for hardware with XHCI_INTEL_HOST quirk set" usb: musb: musb_dsps: request_irq() after initializing musb usbip: give back URBs for unsent unlink requests during cleanup usbip:vhci_hcd USB port can get stuck in the disabled state ASoC: rockchip: i2s: Fix regmap_ops hang ASoC: rockchip: i2s: Fixup config for DAIFMT_DSP_A/B drm/amdkfd: Account for SH/SE count when setting up cu masks. iwlwifi: mvm: fix a memory leak in iwl_mvm_mac_ctxt_beacon_changed iwlwifi: mvm: avoid static queue number aliasing iwlwifi: mvm: fix access to BSS elements net/mlx5: DR, Enable QP retransmission parport: remove non-zero check on count ath9k: fix OOB read ar9300_eeprom_restore_internal ath9k: fix sleeping in atomic context net: fix NULL pointer reference in cipso_v4_doi_free fix array-index-out-of-bounds in taprio_change net: w5100: check return value after calling platform_get_resource() parisc: fix crash with signals and alloca ovl: fix BUG_ON() in may_delete() when called from ovl_cleanup() scsi: BusLogic: Fix missing pr_cont() use scsi: qla2xxx: Changes to support kdump kernel scsi: qla2xxx: Sync queue idx with queue_pair_map idx cpufreq: powernv: Fix init_chip_info initialization in numa=off s390/pv: fix the forcing of the swiotlb mm/hugetlb: initialize hugetlb_usage in mm_init mm,vmscan: fix divide by zero in get_scan_count memcg: enable accounting for pids in nested pid namespaces platform/chrome: cros_ec_proto: Send command again when timeout occurs lib/test_stackinit: Fix static initializer test net: dsa: lantiq_gswip: fix maximum frame length drm/msi/mdp4: populate priv->kms in mdp4_kms_init drm/amdgpu: Fix BUG_ON assert drm/panfrost: Simplify lock_region calculation drm/panfrost: Use u64 for size in lock_region drm/panfrost: Clamp lock region to Bifrost minimum btrfs: fix upper limit for max_inline for page size 64K xen: reset legacy rtc flag for PV domU bnx2x: Fix enabling network interfaces without VFs arm64/sve: Use correct size when reinitialising SVE state PM: base: power: don't try to use non-existing RTC for storing data PCI: Add AMD GPU multi-function power dependencies drm/amd/amdgpu: Increase HWIP_MAX_INSTANCE to 10 drm/etnaviv: return context from etnaviv_iommu_context_get drm/etnaviv: put submit prev MMU context when it exists drm/etnaviv: stop abusing mmu_context as FE running marker drm/etnaviv: keep MMU context across runtime suspend/resume drm/etnaviv: exec and MMU state is lost when resetting the GPU drm/etnaviv: fix MMU context leak on GPU reset drm/etnaviv: reference MMU context when setting up hardware state drm/etnaviv: add missing MMU context put when reaping MMU mapping s390/sclp: fix Secure-IPL facility detection x86/mm: Fix kern_addr_valid() to cope with existing but not present entries tipc: fix an use-after-free issue in tipc_recvmsg net-caif: avoid user-triggerable WARN_ON(1) ptp: dp83640: don't define PAGE0 dccp: don't duplicate ccid when cloning dccp sock net/l2tp: Fix reference count leak in l2tp_udp_recv_core r6040: Restore MDIO clock frequency after MAC reset tipc: increase timeout in tipc_sk_enqueue() perf machine: Initialize srcline string member in add_location struct net/mlx5: FWTrace, cancel work on alloc pd error flow net/mlx5: Fix potential sleeping in atomic context events: Reuse value read using READ_ONCE instead of re-reading it vhost_net: fix OoB on sendmsg() failure. net/af_unix: fix a data-race in unix_dgram_poll net: dsa: destroy the phylink instance on any error in dsa_slave_phy_setup tcp: fix tp->undo_retrans accounting in tcp_sacktag_one() qed: Handle management FW error dt-bindings: arm: Fix Toradex compatible typo ibmvnic: check failover_pending in login response KVM: PPC: Book3S HV: Tolerate treclaim. in fake-suspend mode changing registers net: hns3: pad the short tunnel frame before sending to hardware net: hns3: change affinity_mask to numa node range net: hns3: disable mac in flr process net: hns3: fix the timing issue of VF clearing interrupt sources mm/memory_hotplug: use "unsigned long" for PFN in zone_for_pfn_range() dt-bindings: mtd: gpmc: Fix the ECC bytes vs. OOB bytes equation mfd: db8500-prcmu: Adjust map to reality PCI: Add ACS quirks for NXP LX2xx0 and LX2xx2 platforms fuse: fix use after free in fuse_read_interrupt() mfd: Don't use irq_create_mapping() to resolve a mapping tracing/probes: Reject events which have the same name of existing one PCI: Add ACS quirks for Cavium multi-function devices Set fc_nlinfo in nh_create_ipv4, nh_create_ipv6 net: usb: cdc_mbim: avoid altsetting toggling for Telit LN920 block, bfq: honor already-setup queue merges PCI: ibmphp: Fix double unmap of io_mem ethtool: Fix an error code in cxgb2.c NTB: Fix an error code in ntb_msit_probe() NTB: perf: Fix an error code in perf_setup_inbuf() mfd: axp20x: Update AXP288 volatile ranges PCI: Fix pci_dev_str_match_path() alloc while atomic bug mfd: tqmx86: Clear GPIO IRQ resource when no IRQ is set KVM: arm64: Handle PSCI resets before userspace touches vCPU state PCI: Sync __pci_register_driver() stub for CONFIG_PCI=n mtd: rawnand: cafe: Fix a resource leak in the error handling path of 'cafe_nand_probe()' ARC: export clear_user_page() for modules perf unwind: Do not overwrite FEATURE_CHECK_LDFLAGS-libunwind-{x86,aarch64} net: dsa: b53: Fix calculating number of switch ports netfilter: socket: icmp6: fix use-after-scope fq_codel: reject silly quantum parameters qlcnic: Remove redundant unlock in qlcnic_pinit_from_rom ip_gre: validate csum_start only on pull net: renesas: sh_eth: Fix freeing wrong tx descriptor s390/bpf: Fix optimizing out zero-extensions s390/bpf: Fix 64-bit subtraction of the -0x80000000 constant Linux 5.4.148 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I8613b511cb543a7ce0d1623663fc1306aaa45af1
This commit is contained in:
commit
c4f92aff87
@ -3002,10 +3002,10 @@
|
||||
65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
|
||||
...
|
||||
127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
|
||||
128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
|
||||
129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
|
||||
192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
|
||||
193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
|
||||
...
|
||||
159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
|
||||
223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
|
||||
|
||||
232 char Biometric Devices
|
||||
0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
|
||||
|
@ -49,7 +49,7 @@ properties:
|
||||
- const: toradex,apalis_t30
|
||||
- const: nvidia,tegra30
|
||||
- items:
|
||||
- const: toradex,apalis_t30-eval-v1.1
|
||||
- const: toradex,apalis_t30-v1.1-eval
|
||||
- const: toradex,apalis_t30-eval
|
||||
- const: toradex,apalis_t30-v1.1
|
||||
- const: toradex,apalis_t30
|
||||
|
@ -122,7 +122,7 @@ on various other factors also like;
|
||||
so the device should have enough free bytes available its OOB/Spare
|
||||
area to accommodate ECC for entire page. In general following expression
|
||||
helps in determining if given device can accommodate ECC syndrome:
|
||||
"2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
|
||||
"2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
|
||||
where
|
||||
OOBSIZE number of bytes in OOB/spare area
|
||||
PAGESIZE number of bytes in main-area of device page
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 147
|
||||
SUBLEVEL = 148
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
|
||||
clear_page(to);
|
||||
clear_bit(PG_dc_clean, &page->flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(clear_user_page);
|
||||
|
||||
/**********************************************************************
|
||||
* Explicit Cache flush request from user space via syscall
|
||||
|
@ -90,6 +90,8 @@ $(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
|
||||
$(addprefix $(obj)/,$(libfdt_hdrs))
|
||||
|
||||
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
|
||||
CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
|
||||
CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
|
||||
OBJS += $(libfdt_objs) atags_to_fdt.o
|
||||
endif
|
||||
|
||||
|
@ -70,6 +70,12 @@
|
||||
clock-frequency = <11289600>;
|
||||
};
|
||||
|
||||
achc_24M: achc-clock {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <24000000>;
|
||||
};
|
||||
|
||||
sgtlsound: sound {
|
||||
compatible = "fsl,imx53-cpuvo-sgtl5000",
|
||||
"fsl,imx-audio-sgtl5000";
|
||||
@ -287,16 +293,13 @@
|
||||
&gpio4 12 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
|
||||
spidev0: spi@0 {
|
||||
compatible = "ge,achc";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <1000000>;
|
||||
};
|
||||
|
||||
spidev1: spi@1 {
|
||||
compatible = "ge,achc";
|
||||
reg = <1>;
|
||||
spi-max-frequency = <1000000>;
|
||||
spidev0: spi@1 {
|
||||
compatible = "ge,achc", "nxp,kinetis-k20";
|
||||
reg = <1>, <0>;
|
||||
vdd-supply = <®_3v3>;
|
||||
vdda-supply = <®_3v3>;
|
||||
clocks = <&achc_24M>;
|
||||
reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
gpioxra0: gpio@2 {
|
||||
|
@ -1261,9 +1261,9 @@
|
||||
<&mmcc DSI1_BYTE_CLK>,
|
||||
<&mmcc DSI_PIXEL_CLK>,
|
||||
<&mmcc DSI1_ESC_CLK>;
|
||||
clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
|
||||
"src_clk", "byte_clk", "pixel_clk",
|
||||
"core_clk";
|
||||
clock-names = "iface", "bus", "core_mmss",
|
||||
"src", "byte", "pixel",
|
||||
"core";
|
||||
|
||||
assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
|
||||
<&mmcc DSI1_ESC_SRC>,
|
||||
|
@ -185,8 +185,9 @@
|
||||
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
|
||||
"cdev1", "cdev2", "dap1", "dtb", "gma",
|
||||
"gmb", "gmc", "gmd", "gme", "gpu7",
|
||||
"gpv", "i2cp", "pta", "rm", "slxa",
|
||||
"slxk", "spia", "spib", "uac";
|
||||
"gpv", "i2cp", "irrx", "irtx", "pta",
|
||||
"rm", "slxa", "slxk", "spia", "spib",
|
||||
"uac";
|
||||
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
|
||||
nvidia,tristate = <TEGRA_PIN_DISABLE>;
|
||||
};
|
||||
@ -211,7 +212,7 @@
|
||||
conf_ddc {
|
||||
nvidia,pins = "ddc", "dta", "dtd", "kbca",
|
||||
"kbcb", "kbcc", "kbcd", "kbce", "kbcf",
|
||||
"sdc";
|
||||
"sdc", "uad", "uca";
|
||||
nvidia,pull = <TEGRA_PIN_PULL_UP>;
|
||||
nvidia,tristate = <TEGRA_PIN_DISABLE>;
|
||||
};
|
||||
@ -221,10 +222,9 @@
|
||||
"lvp0", "owc", "sdb";
|
||||
nvidia,tristate = <TEGRA_PIN_ENABLE>;
|
||||
};
|
||||
conf_irrx {
|
||||
nvidia,pins = "irrx", "irtx", "sdd", "spic",
|
||||
"spie", "spih", "uaa", "uab", "uad",
|
||||
"uca", "ucb";
|
||||
conf_sdd {
|
||||
nvidia,pins = "sdd", "spic", "spie", "spih",
|
||||
"uaa", "uab", "ucb";
|
||||
nvidia,pull = <TEGRA_PIN_PULL_UP>;
|
||||
nvidia,tristate = <TEGRA_PIN_ENABLE>;
|
||||
};
|
||||
|
@ -83,15 +83,9 @@
|
||||
};
|
||||
|
||||
eeprom@52 {
|
||||
compatible = "atmel,24c512";
|
||||
compatible = "onnn,cat24c04", "atmel,24c04";
|
||||
reg = <0x52>;
|
||||
};
|
||||
|
||||
eeprom@53 {
|
||||
compatible = "atmel,24c512";
|
||||
reg = <0x53>;
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -58,14 +58,9 @@
|
||||
};
|
||||
|
||||
eeprom@52 {
|
||||
compatible = "atmel,24c512";
|
||||
compatible = "onnn,cat24c05", "atmel,24c04";
|
||||
reg = <0x52>;
|
||||
};
|
||||
|
||||
eeprom@53 {
|
||||
compatible = "atmel,24c512";
|
||||
reg = <0x53>;
|
||||
};
|
||||
};
|
||||
|
||||
&i2c3 {
|
||||
|
@ -1082,13 +1082,13 @@
|
||||
|
||||
cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "nvidia,denver";
|
||||
compatible = "nvidia,tegra132-denver";
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
cpu@1 {
|
||||
device_type = "cpu";
|
||||
compatible = "nvidia,denver";
|
||||
compatible = "nvidia,tegra132-denver";
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
|
@ -1434,7 +1434,7 @@
|
||||
};
|
||||
|
||||
pcie_ep@14160000 {
|
||||
compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
||||
compatible = "nvidia,tegra194-pcie-ep";
|
||||
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
|
||||
reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
|
||||
0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
|
||||
@ -1466,7 +1466,7 @@
|
||||
};
|
||||
|
||||
pcie_ep@14180000 {
|
||||
compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
||||
compatible = "nvidia,tegra194-pcie-ep";
|
||||
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
|
||||
reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
|
||||
0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
|
||||
@ -1498,7 +1498,7 @@
|
||||
};
|
||||
|
||||
pcie_ep@141a0000 {
|
||||
compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
||||
compatible = "nvidia,tegra194-pcie-ep";
|
||||
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
|
||||
reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
|
||||
0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
|
||||
|
@ -20,7 +20,7 @@
|
||||
stdout-path = "serial0";
|
||||
};
|
||||
|
||||
memory {
|
||||
memory@40000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x40000000 0x0 0x20000000>;
|
||||
};
|
||||
|
@ -65,8 +65,8 @@
|
||||
#define EARLY_KASLR (0)
|
||||
#endif
|
||||
|
||||
#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
|
||||
- ((vstart) >> (shift)) + 1 + EARLY_KASLR)
|
||||
#define EARLY_ENTRIES(vstart, vend, shift) \
|
||||
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
|
||||
|
||||
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
|
||||
|
||||
|
@ -498,7 +498,7 @@ size_t sve_state_size(struct task_struct const *task)
|
||||
void sve_alloc(struct task_struct *task)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(current));
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ ENDPROC(preserve_boot_args)
|
||||
* to be composed of multiple pages. (This effectively scales the end index).
|
||||
*
|
||||
* vstart: virtual address of start of range
|
||||
* vend: virtual address of end of range
|
||||
* vend: virtual address of end of range - we map [vstart, vend]
|
||||
* shift: shift used to transform virtual address into index
|
||||
* ptrs: number of entries in page table
|
||||
* istart: index in table corresponding to vstart
|
||||
@ -232,17 +232,18 @@ ENDPROC(preserve_boot_args)
|
||||
*
|
||||
* tbl: location of page table
|
||||
* rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
|
||||
* vstart: start address to map
|
||||
* vend: end address to map - we map [vstart, vend]
|
||||
* vstart: virtual address of start of range
|
||||
* vend: virtual address of end of range - we map [vstart, vend - 1]
|
||||
* flags: flags to use to map last level entries
|
||||
* phys: physical address corresponding to vstart - physical memory is contiguous
|
||||
* pgds: the number of pgd entries
|
||||
*
|
||||
* Temporaries: istart, iend, tmp, count, sv - these need to be different registers
|
||||
* Preserves: vstart, vend, flags
|
||||
* Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
|
||||
* Preserves: vstart, flags
|
||||
* Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
|
||||
*/
|
||||
.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
|
||||
sub \vend, \vend, #1
|
||||
add \rtbl, \tbl, #PAGE_SIZE
|
||||
mov \sv, \rtbl
|
||||
mov \count, #0
|
||||
|
@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
|
||||
|
||||
endif
|
||||
|
||||
if !MMU
|
||||
if COLDFIRE
|
||||
|
||||
config ISA_DMA_API
|
||||
def_bool !M5272
|
||||
|
@ -22,7 +22,7 @@
|
||||
#define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
|
||||
#define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
|
||||
|
||||
static unsigned char fdt_buf[16 << 10] __initdata;
|
||||
static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
|
||||
|
||||
/* determined physical memory size, not overridden by command line args */
|
||||
extern unsigned long physical_memsize;
|
||||
|
@ -547,6 +547,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
|
||||
l.bnf 1f // ext irq enabled, all ok.
|
||||
l.nop
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
l.addi r1,r1,-0x8
|
||||
l.movhi r3,hi(42f)
|
||||
l.ori r3,r3,lo(42f)
|
||||
@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
|
||||
.string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
|
||||
.align 4
|
||||
.previous
|
||||
#endif
|
||||
|
||||
l.ori r4,r4,SPR_SR_IEE // fix the bug
|
||||
// l.sw PT_SR(r1),r4
|
||||
|
@ -238,6 +238,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
|
||||
#endif
|
||||
|
||||
usp = (regs->gr[30] & ~(0x01UL));
|
||||
#ifdef CONFIG_64BIT
|
||||
if (is_compat_task()) {
|
||||
/* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
|
||||
usp = (compat_uint_t)usp;
|
||||
}
|
||||
#endif
|
||||
/*FIXME: frame_size parameter is unused, remove it. */
|
||||
frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
|
||||
|
||||
|
@ -39,6 +39,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
|
||||
# CONFIG_MTD_CFI_I2 is not set
|
||||
CONFIG_MTD_CFI_I4=y
|
||||
CONFIG_MTD_CFI_AMDSTD=y
|
||||
CONFIG_MTD_PHYSMAP=y
|
||||
CONFIG_MTD_PHYSMAP_OF=y
|
||||
# CONFIG_BLK_DEV is not set
|
||||
CONFIG_NETDEVICES=y
|
||||
|
@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static inline int ppc_get_pmu_inuse(void)
|
||||
{
|
||||
return get_paca()->pmcregs_in_use;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void power4_enable_pmcs(void);
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -177,10 +177,13 @@ static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
|
||||
idx -= stt->offset;
|
||||
page = stt->pages[idx / TCES_PER_PAGE];
|
||||
/*
|
||||
* page must not be NULL in real mode,
|
||||
* kvmppc_rm_ioba_validate() must have taken care of this.
|
||||
* kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
|
||||
* being cleared, otherwise it returns H_TOO_HARD and we skip this.
|
||||
*/
|
||||
WARN_ON_ONCE_RM(!page);
|
||||
if (!page) {
|
||||
WARN_ON_ONCE_RM(tce != 0);
|
||||
return;
|
||||
}
|
||||
tbl = kvmppc_page_address(page);
|
||||
|
||||
tbl[idx % TCES_PER_PAGE] = tce;
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include <asm/kvm_book3s.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/lppaca.h>
|
||||
#include <asm/pmc.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/page.h>
|
||||
@ -3559,6 +3560,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
if (kvmhv_on_pseries()) {
|
||||
barrier();
|
||||
if (vcpu->arch.vpa.pinned_addr) {
|
||||
struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
|
||||
get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
|
||||
} else {
|
||||
get_lppaca()->pmcregs_in_use = 1;
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
#endif
|
||||
kvmhv_load_guest_pmu(vcpu);
|
||||
|
||||
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
|
||||
@ -3693,6 +3706,13 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
||||
save_pmu |= nesting_enabled(vcpu->kvm);
|
||||
|
||||
kvmhv_save_guest_pmu(vcpu, save_pmu);
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
if (kvmhv_on_pseries()) {
|
||||
barrier();
|
||||
get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
|
||||
barrier();
|
||||
}
|
||||
#endif
|
||||
|
||||
vc->entry_exit_map = 0x101;
|
||||
vc->in_guest = 0;
|
||||
|
@ -3137,7 +3137,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
/* The following code handles the fake_suspend = 1 case */
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
stdu r1, -TM_FRAME_SIZE(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
@ -3152,10 +3152,42 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
nop
|
||||
|
||||
/*
|
||||
* It's possible that treclaim. may modify registers, if we have lost
|
||||
* track of fake-suspend state in the guest due to it using rfscv.
|
||||
* Save and restore registers in case this occurs.
|
||||
*/
|
||||
mfspr r3, SPRN_DSCR
|
||||
mfspr r4, SPRN_XER
|
||||
mfspr r5, SPRN_AMR
|
||||
/* SPRN_TAR would need to be saved here if the kernel ever used it */
|
||||
mfcr r12
|
||||
SAVE_NVGPRS(r1)
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_GPR(3, r1)
|
||||
SAVE_GPR(4, r1)
|
||||
SAVE_GPR(5, r1)
|
||||
stw r12, 8(r1)
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* We have to treclaim here because that's the only way to do S->N */
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
TRECLAIM(R3)
|
||||
|
||||
GET_PACA(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(3, r1)
|
||||
REST_GPR(4, r1)
|
||||
REST_GPR(5, r1)
|
||||
lwz r12, 8(r1)
|
||||
REST_NVGPRS(r1)
|
||||
mtspr SPRN_DSCR, r3
|
||||
mtspr SPRN_XER, r4
|
||||
mtspr SPRN_AMR, r5
|
||||
mtcr r12
|
||||
HMT_MEDIUM
|
||||
|
||||
/*
|
||||
* We were in fake suspend, so we are not going to save the
|
||||
* register state as the guest checkpointed state (since
|
||||
@ -3183,7 +3215,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
addi r1, r1, TM_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
@ -164,7 +164,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
|
||||
*/
|
||||
count = 0;
|
||||
for (i = offset; i < offset + length; i++)
|
||||
count |= arg->bytes[i] << (i - offset);
|
||||
count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
|
||||
|
||||
*value = count;
|
||||
out:
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define MACHINE_FLAG_NX BIT(15)
|
||||
#define MACHINE_FLAG_GS BIT(16)
|
||||
#define MACHINE_FLAG_SCC BIT(17)
|
||||
#define MACHINE_FLAG_PCI_MIO BIT(18)
|
||||
|
||||
#define LPP_MAGIC BIT(31)
|
||||
#define LPP_PID_MASK _AC(0xffffffff, UL)
|
||||
@ -106,6 +107,7 @@ extern unsigned long __swsusp_reset_dma;
|
||||
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
|
||||
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
|
||||
#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
|
||||
#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
|
||||
|
||||
/*
|
||||
* Console mode. Override with conmode=
|
||||
|
@ -252,6 +252,10 @@ static __init void detect_machine_facilities(void)
|
||||
clock_comparator_max = -1ULL >> 1;
|
||||
__ctl_set_bit(0, 53);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
|
||||
/* the control bit is set during PCI initialization */
|
||||
}
|
||||
}
|
||||
|
||||
static inline void save_vector_registers(void)
|
||||
|
@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
|
||||
unsigned char *ipe = (unsigned char *)expected;
|
||||
unsigned char *ipn = (unsigned char *)new;
|
||||
|
||||
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
||||
pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
|
||||
pr_emerg("Found: %6ph\n", ipc);
|
||||
pr_emerg("Expected: %6ph\n", ipe);
|
||||
pr_emerg("New: %6ph\n", ipn);
|
||||
|
@ -168,9 +168,9 @@ static void pv_init(void)
|
||||
return;
|
||||
|
||||
/* make sure bounce buffers are shared */
|
||||
swiotlb_force = SWIOTLB_FORCE;
|
||||
swiotlb_init(1);
|
||||
swiotlb_update_mem_attributes();
|
||||
swiotlb_force = SWIOTLB_FORCE;
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
|
@ -569,10 +569,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9080000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
|
||||
@ -594,17 +594,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9090000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
if (imm == -0x80000000) {
|
||||
/* algfi %dst,0x80000000 */
|
||||
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
|
||||
} else {
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* BPF_MUL
|
||||
@ -619,10 +624,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb90c0000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
|
||||
if (imm == 1)
|
||||
break;
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
if (imm != 1) {
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
|
||||
@ -675,6 +680,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
if (BPF_OP(insn->code) == BPF_MOD)
|
||||
/* lhgi %dst,0 */
|
||||
EMIT4_IMM(0xa7090000, dst_reg, 0);
|
||||
else
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
}
|
||||
/* lhi %w0,0 */
|
||||
@ -769,10 +776,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9820000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
|
||||
@ -793,10 +800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
|
||||
@ -818,10 +825,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
|
||||
@ -843,10 +850,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
|
||||
|
@ -854,7 +854,6 @@ static void zpci_mem_exit(void)
|
||||
}
|
||||
|
||||
static unsigned int s390_pci_probe __initdata = 1;
|
||||
static unsigned int s390_pci_no_mio __initdata;
|
||||
unsigned int s390_pci_force_floating __initdata;
|
||||
static unsigned int s390_pci_initialized;
|
||||
|
||||
@ -865,7 +864,7 @@ char * __init pcibios_setup(char *str)
|
||||
return NULL;
|
||||
}
|
||||
if (!strcmp(str, "nomio")) {
|
||||
s390_pci_no_mio = 1;
|
||||
S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
|
||||
return NULL;
|
||||
}
|
||||
if (!strcmp(str, "force_floating")) {
|
||||
@ -890,7 +889,7 @@ static int __init pci_base_init(void)
|
||||
if (!test_facility(69) || !test_facility(71))
|
||||
return 0;
|
||||
|
||||
if (test_facility(153) && !s390_pci_no_mio) {
|
||||
if (MACHINE_HAS_PCI_MIO) {
|
||||
static_branch_enable(&have_mio);
|
||||
ctl_set_bit(2, 5);
|
||||
}
|
||||
|
@ -1355,18 +1355,18 @@ int kern_addr_valid(unsigned long addr)
|
||||
return 0;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
if (!p4d_present(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
|
@ -1183,6 +1183,11 @@ static void __init xen_dom0_set_legacy_features(void)
|
||||
x86_platform.legacy.rtc = 1;
|
||||
}
|
||||
|
||||
static void __init xen_domu_set_legacy_features(void)
|
||||
{
|
||||
x86_platform.legacy.rtc = 0;
|
||||
}
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
{
|
||||
@ -1353,6 +1358,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
add_preferred_console("xenboot", 0, NULL);
|
||||
if (pci_xen)
|
||||
x86_init.pci.arch_init = pci_xen_init;
|
||||
x86_platform.set_legacy_features =
|
||||
xen_domu_set_legacy_features;
|
||||
} else {
|
||||
const struct dom0_vga_console_info *info =
|
||||
(void *)((char *)xen_start_info +
|
||||
|
@ -622,8 +622,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
|
||||
}
|
||||
|
||||
/* Expanded the p2m? */
|
||||
if (pfn > xen_p2m_last_pfn) {
|
||||
xen_p2m_last_pfn = pfn;
|
||||
if (pfn >= xen_p2m_last_pfn) {
|
||||
xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
|
||||
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
|
||||
}
|
||||
|
||||
|
@ -168,9 +168,13 @@ static const struct tty_operations serial_ops = {
|
||||
|
||||
int __init rs_init(void)
|
||||
{
|
||||
tty_port_init(&serial_port);
|
||||
int ret;
|
||||
|
||||
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
|
||||
if (!serial_driver)
|
||||
return -ENOMEM;
|
||||
|
||||
tty_port_init(&serial_port);
|
||||
|
||||
pr_info("%s %s\n", serial_name, serial_version);
|
||||
|
||||
@ -190,8 +194,15 @@ int __init rs_init(void)
|
||||
tty_set_operations(serial_driver, &serial_ops);
|
||||
tty_port_link_device(&serial_port, serial_driver, 0);
|
||||
|
||||
if (tty_register_driver(serial_driver))
|
||||
panic("Couldn't register serial driver\n");
|
||||
ret = tty_register_driver(serial_driver);
|
||||
if (ret) {
|
||||
pr_err("Couldn't register serial driver\n");
|
||||
tty_driver_kref_put(serial_driver);
|
||||
tty_port_destroy(&serial_port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2523,6 +2523,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||
* are likely to increase the throughput.
|
||||
*/
|
||||
bfqq->new_bfqq = new_bfqq;
|
||||
/*
|
||||
* The above assignment schedules the following redirections:
|
||||
* each time some I/O for bfqq arrives, the process that
|
||||
* generated that I/O is disassociated from bfqq and
|
||||
* associated with new_bfqq. Here we increases new_bfqq->ref
|
||||
* in advance, adding the number of processes that are
|
||||
* expected to be associated with new_bfqq as they happen to
|
||||
* issue I/O.
|
||||
*/
|
||||
new_bfqq->ref += process_refs;
|
||||
return new_bfqq;
|
||||
}
|
||||
@ -2582,6 +2591,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
{
|
||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||
|
||||
/* if a merge has already been setup, then proceed with that first */
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
/*
|
||||
* Do not perform queue merging if the device is non
|
||||
* rotational and performs internal queueing. In fact, such a
|
||||
@ -2636,9 +2649,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
if (bfq_too_late_for_merging(bfqq))
|
||||
return NULL;
|
||||
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
||||
return NULL;
|
||||
|
||||
@ -5004,7 +5014,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
||||
if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
|
||||
pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
|
||||
bfqq->new_ioprio);
|
||||
bfqq->new_ioprio = IOPRIO_BE_NR;
|
||||
bfqq->new_ioprio = IOPRIO_BE_NR - 1;
|
||||
}
|
||||
|
||||
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
|
||||
|
@ -250,9 +250,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
if (!blk_queue_is_zoned(q))
|
||||
return -ENOTTY;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -293,9 +290,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
if (!blk_queue_is_zoned(q))
|
||||
return -ENOTTY;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (!(mode & FMODE_WRITE))
|
||||
return -EBADF;
|
||||
|
||||
|
@ -371,10 +371,13 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case SG_GET_RESERVED_SIZE:
|
||||
case SG_SET_RESERVED_SIZE:
|
||||
case SG_EMULATED_HOST:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
|
||||
case SG_IO:
|
||||
return bsg_sg_io(bd->queue, file->f_mode, uarg);
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
@ -4556,6 +4556,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
|
||||
|
@ -1249,24 +1249,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq == NO_IRQ) {
|
||||
dev_err(&ofdev->dev, "no SATA DMA irq\n");
|
||||
err = -ENODEV;
|
||||
goto error_out;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SATA_DWC_OLD_DMA
|
||||
if (!of_find_property(np, "dmas", NULL)) {
|
||||
err = sata_dwc_dma_init_old(ofdev, hsdev);
|
||||
if (err)
|
||||
goto error_out;
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
|
||||
if (IS_ERR(hsdev->phy)) {
|
||||
err = PTR_ERR(hsdev->phy);
|
||||
hsdev->phy = NULL;
|
||||
goto error_out;
|
||||
}
|
||||
if (IS_ERR(hsdev->phy))
|
||||
return PTR_ERR(hsdev->phy);
|
||||
|
||||
err = phy_init(hsdev->phy);
|
||||
if (err)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mc146818rtc.h>
|
||||
|
||||
@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
unsigned int user_hash_value, file_hash_value;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return;
|
||||
|
||||
user_hash_value = user % USERHASH;
|
||||
file_hash_value = hash_string(lineno, file, FILEHASH);
|
||||
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
|
||||
@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
|
||||
|
||||
static int early_resume_init(void)
|
||||
{
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
hash_value_early_read = read_magic_time();
|
||||
register_pm_notifier(&pm_trace_nb);
|
||||
return 0;
|
||||
@ -277,6 +284,9 @@ static int late_resume_init(void)
|
||||
unsigned int val = hash_value_early_read;
|
||||
unsigned int user, file, dev;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
user = val % USERHASH;
|
||||
val = val / USERHASH;
|
||||
file = val % FILEHASH;
|
||||
|
@ -18,8 +18,6 @@
|
||||
|
||||
#define GENERATED_MAX_DIV 255
|
||||
|
||||
#define GCK_INDEX_DT_AUDIO_PLL 5
|
||||
|
||||
struct clk_generated {
|
||||
struct clk_hw hw;
|
||||
struct regmap *regmap;
|
||||
@ -29,7 +27,7 @@ struct clk_generated {
|
||||
u32 gckdiv;
|
||||
const struct clk_pcr_layout *layout;
|
||||
u8 parent_id;
|
||||
bool audio_pll_allowed;
|
||||
int chg_pid;
|
||||
};
|
||||
|
||||
#define to_clk_generated(hw) \
|
||||
@ -109,7 +107,7 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
|
||||
tmp_rate = parent_rate / div;
|
||||
tmp_diff = abs(req->rate - tmp_rate);
|
||||
|
||||
if (*best_diff < 0 || *best_diff > tmp_diff) {
|
||||
if (*best_diff < 0 || *best_diff >= tmp_diff) {
|
||||
*best_rate = tmp_rate;
|
||||
*best_diff = tmp_diff;
|
||||
req->best_parent_rate = parent_rate;
|
||||
@ -129,7 +127,16 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
|
||||
int i;
|
||||
u32 div;
|
||||
|
||||
for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
|
||||
/* do not look for a rate that is outside of our range */
|
||||
if (gck->range.max && req->rate > gck->range.max)
|
||||
req->rate = gck->range.max;
|
||||
if (gck->range.min && req->rate < gck->range.min)
|
||||
req->rate = gck->range.min;
|
||||
|
||||
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
|
||||
if (gck->chg_pid == i)
|
||||
continue;
|
||||
|
||||
parent = clk_hw_get_parent_by_index(hw, i);
|
||||
if (!parent)
|
||||
continue;
|
||||
@ -161,10 +168,10 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
|
||||
* that the only clks able to modify gck rate are those of audio IPs.
|
||||
*/
|
||||
|
||||
if (!gck->audio_pll_allowed)
|
||||
if (gck->chg_pid < 0)
|
||||
goto end;
|
||||
|
||||
parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
|
||||
parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
|
||||
if (!parent)
|
||||
goto end;
|
||||
|
||||
@ -271,8 +278,8 @@ struct clk_hw * __init
|
||||
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
|
||||
const struct clk_pcr_layout *layout,
|
||||
const char *name, const char **parent_names,
|
||||
u8 num_parents, u8 id, bool pll_audio,
|
||||
const struct clk_range *range)
|
||||
u8 num_parents, u8 id,
|
||||
const struct clk_range *range, int chg_pid)
|
||||
{
|
||||
struct clk_generated *gck;
|
||||
struct clk_init_data init;
|
||||
@ -287,15 +294,16 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
|
||||
init.ops = &generated_ops;
|
||||
init.parent_names = parent_names;
|
||||
init.num_parents = num_parents;
|
||||
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
|
||||
CLK_SET_RATE_PARENT;
|
||||
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
|
||||
if (chg_pid >= 0)
|
||||
init.flags |= CLK_SET_RATE_PARENT;
|
||||
|
||||
gck->id = id;
|
||||
gck->hw.init = &init;
|
||||
gck->regmap = regmap;
|
||||
gck->lock = lock;
|
||||
gck->range = *range;
|
||||
gck->audio_pll_allowed = pll_audio;
|
||||
gck->chg_pid = chg_pid;
|
||||
gck->layout = layout;
|
||||
|
||||
clk_generated_startup(gck);
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#define SYSTEM_MAX_ID 31
|
||||
|
||||
#define GCK_INDEX_DT_AUDIO_PLL 5
|
||||
|
||||
#ifdef CONFIG_HAVE_AT91_AUDIO_PLL
|
||||
static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
|
||||
{
|
||||
@ -135,7 +137,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
|
||||
return;
|
||||
|
||||
for_each_child_of_node(np, gcknp) {
|
||||
bool pll_audio = false;
|
||||
int chg_pid = INT_MIN;
|
||||
|
||||
if (of_property_read_u32(gcknp, "reg", &id))
|
||||
continue;
|
||||
@ -152,12 +154,12 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
|
||||
if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
|
||||
(id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
|
||||
id == GCK_ID_CLASSD))
|
||||
pll_audio = true;
|
||||
chg_pid = GCK_INDEX_DT_AUDIO_PLL;
|
||||
|
||||
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
|
||||
&dt_pcr_layout, name,
|
||||
parent_names, num_parents,
|
||||
id, pll_audio, &range);
|
||||
id, &range, chg_pid);
|
||||
if (IS_ERR(hw))
|
||||
continue;
|
||||
|
||||
|
@ -118,8 +118,8 @@ struct clk_hw * __init
|
||||
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
|
||||
const struct clk_pcr_layout *layout,
|
||||
const char *name, const char **parent_names,
|
||||
u8 num_parents, u8 id, bool pll_audio,
|
||||
const struct clk_range *range);
|
||||
u8 num_parents, u8 id,
|
||||
const struct clk_range *range, int chg_pid);
|
||||
|
||||
struct clk_hw * __init
|
||||
at91_clk_register_h32mx(struct regmap *regmap, const char *name,
|
||||
|
@ -124,7 +124,6 @@ static const struct {
|
||||
char *n;
|
||||
u8 id;
|
||||
struct clk_range r;
|
||||
bool pll;
|
||||
} sam9x60_gck[] = {
|
||||
{ .n = "flex0_gclk", .id = 5, },
|
||||
{ .n = "flex1_gclk", .id = 6, },
|
||||
@ -144,11 +143,9 @@ static const struct {
|
||||
{ .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
|
||||
{ .n = "flex11_gclk", .id = 32, },
|
||||
{ .n = "flex12_gclk", .id = 33, },
|
||||
{ .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
|
||||
.pll = true, },
|
||||
{ .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, },
|
||||
{ .n = "pit64b_gclk", .id = 37, },
|
||||
{ .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
|
||||
.pll = true, },
|
||||
{ .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, },
|
||||
{ .n = "tcb1_gclk", .id = 45, },
|
||||
{ .n = "dbgu_gclk", .id = 47, },
|
||||
};
|
||||
@ -285,8 +282,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
|
||||
sam9x60_gck[i].n,
|
||||
parent_names, 6,
|
||||
sam9x60_gck[i].id,
|
||||
sam9x60_gck[i].pll,
|
||||
&sam9x60_gck[i].r);
|
||||
&sam9x60_gck[i].r, INT_MIN);
|
||||
if (IS_ERR(hw))
|
||||
goto err_free;
|
||||
|
||||
|
@ -115,21 +115,20 @@ static const struct {
|
||||
char *n;
|
||||
u8 id;
|
||||
struct clk_range r;
|
||||
bool pll;
|
||||
int chg_pid;
|
||||
} sama5d2_gck[] = {
|
||||
{ .n = "sdmmc0_gclk", .id = 31, },
|
||||
{ .n = "sdmmc1_gclk", .id = 32, },
|
||||
{ .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "isc_gclk", .id = 46, },
|
||||
{ .n = "pdmic_gclk", .id = 48, },
|
||||
{ .n = "i2s0_gclk", .id = 54, .pll = true },
|
||||
{ .n = "i2s1_gclk", .id = 55, .pll = true },
|
||||
{ .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, },
|
||||
{ .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, },
|
||||
{ .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 },
|
||||
.pll = true },
|
||||
{ .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
|
||||
{ .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
|
||||
{ .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "tcb1_gclk", .id = 36, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "pwm_gclk", .id = 38, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
|
||||
{ .n = "isc_gclk", .id = 46, .chg_pid = INT_MIN, },
|
||||
{ .n = "pdmic_gclk", .id = 48, .chg_pid = INT_MIN, },
|
||||
{ .n = "i2s0_gclk", .id = 54, .chg_pid = 5, },
|
||||
{ .n = "i2s1_gclk", .id = 55, .chg_pid = 5, },
|
||||
{ .n = "can0_gclk", .id = 56, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
|
||||
{ .n = "can1_gclk", .id = 57, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
|
||||
{ .n = "classd_gclk", .id = 59, .chg_pid = 5, .r = { .min = 0, .max = 100000000 }, },
|
||||
};
|
||||
|
||||
static const struct clk_programmable_layout sama5d2_programmable_layout = {
|
||||
@ -317,8 +316,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
|
||||
sama5d2_gck[i].n,
|
||||
parent_names, 6,
|
||||
sama5d2_gck[i].id,
|
||||
sama5d2_gck[i].pll,
|
||||
&sama5d2_gck[i].r);
|
||||
&sama5d2_gck[i].r,
|
||||
sama5d2_gck[i].chg_pid);
|
||||
if (IS_ERR(hw))
|
||||
goto err_free;
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#define MAX_PSTATE_SHIFT 32
|
||||
#define LPSTATE_SHIFT 48
|
||||
#define GPSTATE_SHIFT 56
|
||||
#define MAX_NR_CHIPS 32
|
||||
|
||||
#define MAX_RAMP_DOWN_TIME 5120
|
||||
/*
|
||||
@ -1050,12 +1051,20 @@ static int init_chip_info(void)
|
||||
unsigned int *chip;
|
||||
unsigned int cpu, i;
|
||||
unsigned int prev_chip_id = UINT_MAX;
|
||||
cpumask_t *chip_cpu_mask;
|
||||
int ret = 0;
|
||||
|
||||
chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
|
||||
if (!chip)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate a chip cpu mask large enough to fit mask for all chips */
|
||||
chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!chip_cpu_mask) {
|
||||
ret = -ENOMEM;
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
unsigned int id = cpu_to_chip_id(cpu);
|
||||
|
||||
@ -1063,22 +1072,25 @@ static int init_chip_info(void)
|
||||
prev_chip_id = id;
|
||||
chip[nr_chips++] = id;
|
||||
}
|
||||
cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
|
||||
}
|
||||
|
||||
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
|
||||
if (!chips) {
|
||||
ret = -ENOMEM;
|
||||
goto free_and_return;
|
||||
goto out_free_chip_cpu_mask;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_chips; i++) {
|
||||
chips[i].id = chip[i];
|
||||
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
|
||||
cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
|
||||
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
|
||||
for_each_cpu(cpu, &chips[i].mask)
|
||||
per_cpu(chip_info, cpu) = &chips[i];
|
||||
}
|
||||
|
||||
out_free_chip_cpu_mask:
|
||||
kfree(chip_cpu_mask);
|
||||
free_and_return:
|
||||
kfree(chip);
|
||||
return ret;
|
||||
|
@ -298,21 +298,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist *src = req->src;
|
||||
const int nents = sg_nents(req->src);
|
||||
int dst_nents = sg_nents(dst);
|
||||
|
||||
const int out_off = DCP_BUF_SZ;
|
||||
uint8_t *in_buf = sdcp->coh->aes_in_buf;
|
||||
uint8_t *out_buf = sdcp->coh->aes_out_buf;
|
||||
|
||||
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
|
||||
uint32_t dst_off = 0;
|
||||
uint8_t *src_buf = NULL;
|
||||
uint32_t last_out_len = 0;
|
||||
|
||||
uint8_t *key = sdcp->coh->aes_key;
|
||||
|
||||
int ret = 0;
|
||||
int split = 0;
|
||||
unsigned int i, len, clen, rem = 0, tlen = 0;
|
||||
unsigned int i, len, clen, tlen = 0;
|
||||
int init = 0;
|
||||
bool limit_hit = false;
|
||||
|
||||
@ -330,7 +329,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
|
||||
}
|
||||
|
||||
for_each_sg(req->src, src, nents, i) {
|
||||
for_each_sg(req->src, src, sg_nents(src), i) {
|
||||
src_buf = sg_virt(src);
|
||||
len = sg_dma_len(src);
|
||||
tlen += len;
|
||||
@ -355,34 +354,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
* submit the buffer.
|
||||
*/
|
||||
if (actx->fill == out_off || sg_is_last(src) ||
|
||||
limit_hit) {
|
||||
limit_hit) {
|
||||
ret = mxs_dcp_run_aes(actx, req, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
init = 0;
|
||||
|
||||
out_tmp = out_buf;
|
||||
sg_pcopy_from_buffer(dst, dst_nents, out_buf,
|
||||
actx->fill, dst_off);
|
||||
dst_off += actx->fill;
|
||||
last_out_len = actx->fill;
|
||||
while (dst && actx->fill) {
|
||||
if (!split) {
|
||||
dst_buf = sg_virt(dst);
|
||||
dst_off = 0;
|
||||
}
|
||||
rem = min(sg_dma_len(dst) - dst_off,
|
||||
actx->fill);
|
||||
|
||||
memcpy(dst_buf + dst_off, out_tmp, rem);
|
||||
out_tmp += rem;
|
||||
dst_off += rem;
|
||||
actx->fill -= rem;
|
||||
|
||||
if (dst_off == sg_dma_len(dst)) {
|
||||
dst = sg_next(dst);
|
||||
split = 0;
|
||||
} else {
|
||||
split = 1;
|
||||
}
|
||||
}
|
||||
actx->fill = 0;
|
||||
}
|
||||
} while (len);
|
||||
|
||||
|
@ -377,7 +377,6 @@ struct sdma_channel {
|
||||
unsigned long watermark_level;
|
||||
u32 shp_addr, per_addr;
|
||||
enum dma_status status;
|
||||
bool context_loaded;
|
||||
struct imx_dma_data data;
|
||||
struct work_struct terminate_worker;
|
||||
};
|
||||
@ -988,9 +987,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (sdmac->context_loaded)
|
||||
return 0;
|
||||
|
||||
if (sdmac->direction == DMA_DEV_TO_MEM)
|
||||
load_address = sdmac->pc_from_device;
|
||||
else if (sdmac->direction == DMA_DEV_TO_DEV)
|
||||
@ -1033,8 +1029,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
||||
|
||||
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
|
||||
|
||||
sdmac->context_loaded = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1074,7 +1068,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
|
||||
sdmac->desc = NULL;
|
||||
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&sdmac->vc, &head);
|
||||
sdmac->context_loaded = false;
|
||||
}
|
||||
|
||||
static int sdma_disable_channel_async(struct dma_chan *chan)
|
||||
@ -1141,7 +1134,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
|
||||
static int sdma_config_channel(struct dma_chan *chan)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
int ret;
|
||||
|
||||
sdma_disable_channel(chan);
|
||||
|
||||
@ -1181,9 +1173,7 @@ static int sdma_config_channel(struct dma_chan *chan)
|
||||
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
|
||||
}
|
||||
|
||||
ret = sdma_load_context(sdmac);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
||||
@ -1335,7 +1325,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
||||
|
||||
sdmac->event_id0 = 0;
|
||||
sdmac->event_id1 = 0;
|
||||
sdmac->context_loaded = false;
|
||||
|
||||
sdma_set_channel_priority(sdmac, 0);
|
||||
|
||||
|
@ -762,7 +762,7 @@ enum amd_hw_ip_block_type {
|
||||
MAX_HWIP
|
||||
};
|
||||
|
||||
#define HWIP_MAX_INSTANCE 8
|
||||
#define HWIP_MAX_INSTANCE 10
|
||||
|
||||
struct amd_powerplay {
|
||||
void *pp_handle;
|
||||
|
@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||
void
|
||||
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 val;
|
||||
u8 val = 0;
|
||||
|
||||
if (!amdgpu_connector->router.ddc_valid)
|
||||
return;
|
||||
|
@ -200,7 +200,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
c++;
|
||||
}
|
||||
|
||||
BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
|
||||
BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
|
||||
|
||||
placement->num_placement = c;
|
||||
placement->placement = places;
|
||||
|
@ -138,7 +138,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
||||
return ret;
|
||||
}
|
||||
|
||||
__decode_table_header_from_buff(hdr, &buff[2]);
|
||||
__decode_table_header_from_buff(hdr, buff);
|
||||
|
||||
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
|
||||
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
|
||||
|
@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
|
||||
uint32_t *se_mask)
|
||||
{
|
||||
struct kfd_cu_info cu_info;
|
||||
uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
|
||||
int i, se, sh, cu = 0;
|
||||
|
||||
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
|
||||
int i, se, sh, cu;
|
||||
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
|
||||
|
||||
if (cu_mask_count > cu_info.cu_active_number)
|
||||
cu_mask_count = cu_info.cu_active_number;
|
||||
|
||||
/* Exceeding these bounds corrupts the stack and indicates a coding error.
|
||||
* Returning with no CU's enabled will hang the queue, which should be
|
||||
* attention grabbing.
|
||||
*/
|
||||
if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
|
||||
pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
|
||||
return;
|
||||
}
|
||||
if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
|
||||
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
|
||||
cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
|
||||
return;
|
||||
}
|
||||
/* Count active CUs per SH.
|
||||
*
|
||||
* Some CUs in an SH may be disabled. HW expects disabled CUs to be
|
||||
* represented in the high bits of each SH's enable mask (the upper and lower
|
||||
* 16 bits of se_mask) and will take care of the actual distribution of
|
||||
* disabled CUs within each SH automatically.
|
||||
* Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
|
||||
*
|
||||
* See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
|
||||
*/
|
||||
for (se = 0; se < cu_info.num_shader_engines; se++)
|
||||
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
|
||||
cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
||||
cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
||||
|
||||
/* Symmetrically map cu_mask to all SEs:
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0;
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0;
|
||||
* ... (if # SE is 4)
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit1;
|
||||
/* Symmetrically map cu_mask to all SEs & SHs:
|
||||
* se_mask programs up to 2 SH in the upper and lower 16 bits.
|
||||
*
|
||||
* Examples
|
||||
* Assuming 1 SH/SE, 4 SEs:
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0
|
||||
* ...
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit1
|
||||
* ...
|
||||
*
|
||||
* Assuming 2 SH/SE, 4 SEs
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
|
||||
* ...
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
|
||||
* cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
|
||||
* ...
|
||||
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
|
||||
* ...
|
||||
*
|
||||
* First ensure all CUs are disabled, then enable user specified CUs.
|
||||
*/
|
||||
se = 0;
|
||||
for (i = 0; i < cu_mask_count; i++) {
|
||||
if (cu_mask[i / 32] & (1 << (i % 32)))
|
||||
se_mask[se] |= 1 << cu;
|
||||
for (i = 0; i < cu_info.num_shader_engines; i++)
|
||||
se_mask[i] = 0;
|
||||
|
||||
do {
|
||||
se++;
|
||||
if (se == cu_info.num_shader_engines) {
|
||||
se = 0;
|
||||
cu++;
|
||||
i = 0;
|
||||
for (cu = 0; cu < 16; cu++) {
|
||||
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
|
||||
for (se = 0; se < cu_info.num_shader_engines; se++) {
|
||||
if (cu_per_sh[se][sh] > cu) {
|
||||
if (cu_mask[i / 32] & (1 << (i % 32)))
|
||||
se_mask[se] |= 1 << (cu + sh * 16);
|
||||
i++;
|
||||
if (i == cu_mask_count)
|
||||
return;
|
||||
}
|
||||
}
|
||||
} while (cu >= cu_per_se[se] && cu < 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "kfd_priv.h"
|
||||
|
||||
#define KFD_MAX_NUM_SE 8
|
||||
#define KFD_MAX_NUM_SH_PER_SE 2
|
||||
|
||||
/**
|
||||
* struct mqd_manager
|
||||
|
@ -95,29 +95,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
str_len = strlen("Current: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
|
||||
str_len = strlen("Current: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
|
||||
link->cur_link_settings.lane_count,
|
||||
link->cur_link_settings.link_rate,
|
||||
link->cur_link_settings.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Verified: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
|
||||
str_len = strlen("Verified: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
|
||||
link->verified_link_cap.lane_count,
|
||||
link->verified_link_cap.link_rate,
|
||||
link->verified_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Reported: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
|
||||
str_len = strlen("Reported: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
|
||||
link->reported_link_cap.lane_count,
|
||||
link->reported_link_cap.link_rate,
|
||||
link->reported_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Preferred: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
|
||||
str_len = strlen("Preferred: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
|
||||
link->preferred_link_setting.lane_count,
|
||||
link->preferred_link_setting.link_rate,
|
||||
link->preferred_link_setting.link_spread);
|
||||
|
@ -3264,13 +3264,12 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
|
||||
struct dc_clock_config clock_cfg = {0};
|
||||
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
|
||||
|
||||
if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
|
||||
dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
||||
context, clock_type, &clock_cfg);
|
||||
|
||||
if (!dc->clk_mgr->funcs->get_clock)
|
||||
if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
|
||||
return DC_FAIL_UNSUPPORTED_1;
|
||||
|
||||
dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
||||
context, clock_type, &clock_cfg);
|
||||
|
||||
if (clk_khz > clock_cfg.max_clock_khz)
|
||||
return DC_FAIL_CLK_EXCEED_MAX;
|
||||
|
||||
@ -3288,7 +3287,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
|
||||
else
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
||||
if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
|
||||
if (dc->clk_mgr->funcs->update_clocks)
|
||||
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
|
||||
context, true);
|
||||
return DC_OK;
|
||||
|
@ -2232,7 +2232,7 @@ void dcn20_set_mcif_arb_params(
|
||||
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
}
|
||||
wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
|
||||
wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
|
||||
wb_arb_params->slice_lines = 32;
|
||||
wb_arb_params->arbitration_slice = 2;
|
||||
wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
|
||||
|
@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
|
||||
struct task_struct *task;
|
||||
bool is_current_master = drm_is_current_master(priv);
|
||||
|
||||
rcu_read_lock(); /* locks pid_task()->comm */
|
||||
task = pid_task(priv->pid, PIDTYPE_PID);
|
||||
@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
|
||||
task ? task->comm : "<unknown>",
|
||||
pid_vnr(priv->pid),
|
||||
priv->minor->index,
|
||||
drm_is_current_master(priv) ? 'y' : 'n',
|
||||
is_current_master ? 'y' : 'n',
|
||||
priv->authenticated ? 'y' : 'n',
|
||||
from_kuid_munged(seq_user_ns(m), uid),
|
||||
priv->magic);
|
||||
|
@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
if (switch_mmu_context) {
|
||||
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
|
||||
|
||||
etnaviv_iommu_context_get(mmu_context);
|
||||
gpu->mmu_context = mmu_context;
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
|
||||
etnaviv_iommu_context_put(old_context);
|
||||
}
|
||||
|
||||
|
@ -304,8 +304,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
|
||||
list_del(&mapping->obj_node);
|
||||
}
|
||||
|
||||
etnaviv_iommu_context_get(mmu_context);
|
||||
mapping->context = mmu_context;
|
||||
mapping->context = etnaviv_iommu_context_get(mmu_context);
|
||||
mapping->use = 1;
|
||||
|
||||
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
|
||||
|
@ -534,8 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
goto err_submit_objects;
|
||||
|
||||
submit->ctx = file->driver_priv;
|
||||
etnaviv_iommu_context_get(submit->ctx->mmu);
|
||||
submit->mmu_context = submit->ctx->mmu;
|
||||
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
|
||||
submit->exec_state = args->exec_state;
|
||||
submit->flags = args->flags;
|
||||
|
||||
|
@ -545,6 +545,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
||||
/* We rely on the GPU running, so program the clock */
|
||||
etnaviv_gpu_update_clock(gpu);
|
||||
|
||||
gpu->fe_running = false;
|
||||
gpu->exec_state = -1;
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -607,19 +613,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
|
||||
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
|
||||
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
|
||||
}
|
||||
|
||||
gpu->fe_running = true;
|
||||
}
|
||||
|
||||
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
|
||||
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_iommu_context *context)
|
||||
{
|
||||
u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
|
||||
&gpu->mmu_context->cmdbuf_mapping);
|
||||
u16 prefetch;
|
||||
u32 address;
|
||||
|
||||
/* setup the MMU */
|
||||
etnaviv_iommu_restore(gpu, gpu->mmu_context);
|
||||
etnaviv_iommu_restore(gpu, context);
|
||||
|
||||
/* Start command processor */
|
||||
prefetch = etnaviv_buffer_init(gpu);
|
||||
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
|
||||
&gpu->mmu_context->cmdbuf_mapping);
|
||||
|
||||
etnaviv_gpu_start_fe(gpu, address, prefetch);
|
||||
}
|
||||
@ -790,7 +800,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
||||
/* Now program the hardware */
|
||||
mutex_lock(&gpu->lock);
|
||||
etnaviv_gpu_hw_init(gpu);
|
||||
gpu->exec_state = -1;
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
@ -994,8 +1003,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
|
||||
spin_unlock(&gpu->event_spinlock);
|
||||
|
||||
etnaviv_gpu_hw_init(gpu);
|
||||
gpu->exec_state = -1;
|
||||
gpu->mmu_context = NULL;
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
@ -1306,14 +1313,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!gpu->mmu_context) {
|
||||
etnaviv_iommu_context_get(submit->mmu_context);
|
||||
gpu->mmu_context = submit->mmu_context;
|
||||
etnaviv_gpu_start_fe_idleloop(gpu);
|
||||
} else {
|
||||
etnaviv_iommu_context_get(gpu->mmu_context);
|
||||
submit->prev_mmu_context = gpu->mmu_context;
|
||||
}
|
||||
if (!gpu->fe_running)
|
||||
etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
|
||||
|
||||
if (submit->prev_mmu_context)
|
||||
etnaviv_iommu_context_put(submit->prev_mmu_context);
|
||||
submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
|
||||
|
||||
if (submit->nr_pmrs) {
|
||||
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
|
||||
@ -1530,7 +1535,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
|
||||
|
||||
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
if (gpu->initialized && gpu->mmu_context) {
|
||||
if (gpu->initialized && gpu->fe_running) {
|
||||
/* Replace the last WAIT with END */
|
||||
mutex_lock(&gpu->lock);
|
||||
etnaviv_buffer_end(gpu);
|
||||
@ -1543,8 +1548,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
|
||||
*/
|
||||
etnaviv_gpu_wait_idle(gpu, 100);
|
||||
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = NULL;
|
||||
gpu->fe_running = false;
|
||||
}
|
||||
|
||||
gpu->exec_state = -1;
|
||||
@ -1692,6 +1696,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
|
||||
etnaviv_gpu_hw_suspend(gpu);
|
||||
#endif
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
|
||||
if (gpu->initialized) {
|
||||
etnaviv_cmdbuf_free(&gpu->buffer);
|
||||
etnaviv_iommu_global_fini(gpu);
|
||||
|
@ -101,6 +101,7 @@ struct etnaviv_gpu {
|
||||
struct workqueue_struct *wq;
|
||||
struct drm_gpu_scheduler sched;
|
||||
bool initialized;
|
||||
bool fe_running;
|
||||
|
||||
/* 'ring'-buffer: */
|
||||
struct etnaviv_cmdbuf buffer;
|
||||
|
@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
|
||||
u32 pgtable;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
/* set base addresses */
|
||||
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
|
||||
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
|
||||
|
@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
|
||||
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
|
||||
return;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
prefetch = etnaviv_buffer_config_mmuv2(gpu,
|
||||
(u32)v2_context->mtlb_dma,
|
||||
(u32)context->global->bad_page_dma);
|
||||
@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
|
||||
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
|
||||
return;
|
||||
|
||||
if (gpu->mmu_context)
|
||||
etnaviv_iommu_context_put(gpu->mmu_context);
|
||||
gpu->mmu_context = etnaviv_iommu_context_get(context);
|
||||
|
||||
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
|
||||
lower_32_bits(context->global->v2.pta_dma));
|
||||
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
|
||||
|
@ -204,6 +204,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
|
||||
*/
|
||||
list_for_each_entry_safe(m, n, &list, scan_node) {
|
||||
etnaviv_iommu_remove_mapping(context, m);
|
||||
etnaviv_iommu_context_put(m->context);
|
||||
m->context = NULL;
|
||||
list_del_init(&m->mmu_node);
|
||||
list_del_init(&m->scan_node);
|
||||
|
@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
|
||||
struct etnaviv_iommu_context *
|
||||
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
|
||||
struct etnaviv_cmdbuf_suballoc *suballoc);
|
||||
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
||||
static inline struct etnaviv_iommu_context *
|
||||
etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->refcount);
|
||||
return ctx;
|
||||
}
|
||||
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
|
||||
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
|
||||
|
@ -140,6 +140,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
|
||||
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
|
||||
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
|
||||
mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
||||
else
|
||||
mapping = ERR_PTR(-ENODEV);
|
||||
|
||||
if (IS_ERR(mapping))
|
||||
return PTR_ERR(mapping);
|
||||
|
@ -108,13 +108,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
|
||||
|
||||
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
/* see 119ecb7fd */
|
||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
|
||||
drm_crtc_vblank_get(crtc);
|
||||
}
|
||||
|
||||
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
|
||||
@ -133,12 +126,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
|
||||
|
||||
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
/* see 119ecb7fd */
|
||||
for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
|
||||
drm_crtc_vblank_put(crtc);
|
||||
}
|
||||
|
||||
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
||||
@ -418,6 +405,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev->dev);
|
||||
struct mdp4_platform_config *config = mdp4_get_config(pdev);
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct mdp4_kms *mdp4_kms;
|
||||
struct msm_kms *kms = NULL;
|
||||
struct msm_gem_address_space *aspace;
|
||||
@ -432,7 +420,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
|
||||
mdp_kms_init(&mdp4_kms->base, &kms_funcs);
|
||||
|
||||
kms = &mdp4_kms->base.base;
|
||||
priv->kms = &mdp4_kms->base.base;
|
||||
kms = priv->kms;
|
||||
|
||||
mdp4_kms->dev = dev;
|
||||
|
||||
|
@ -52,25 +52,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
|
||||
}
|
||||
|
||||
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
||||
u64 iova, size_t size)
|
||||
u64 iova, u64 size)
|
||||
{
|
||||
u8 region_width;
|
||||
u64 region = iova & PAGE_MASK;
|
||||
/*
|
||||
* fls returns:
|
||||
* 1 .. 32
|
||||
*
|
||||
* 10 + fls(num_pages)
|
||||
* results in the range (11 .. 42)
|
||||
|
||||
/* The size is encoded as ceil(log2) minus(1), which may be calculated
|
||||
* with fls. The size must be clamped to hardware bounds.
|
||||
*/
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
|
||||
region_width = 10 + fls(size >> PAGE_SHIFT);
|
||||
if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
|
||||
/* not pow2, so must go up to the next pow2 */
|
||||
region_width += 1;
|
||||
}
|
||||
size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
|
||||
region_width = fls64(size - 1) - 1;
|
||||
region |= region_width;
|
||||
|
||||
/* Lock the region that needs to be updated */
|
||||
@ -81,7 +72,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
||||
|
||||
|
||||
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
||||
u64 iova, size_t size, u32 op)
|
||||
u64 iova, u64 size, u32 op)
|
||||
{
|
||||
if (as_nr < 0)
|
||||
return 0;
|
||||
@ -98,7 +89,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
||||
|
||||
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size, u32 op)
|
||||
u64 iova, u64 size, u32 op)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -115,7 +106,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
||||
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
|
||||
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
|
||||
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
|
||||
@ -131,7 +122,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
||||
|
||||
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
|
||||
{
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
|
||||
@ -231,7 +222,7 @@ static size_t get_pgsize(u64 addr, size_t size)
|
||||
|
||||
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size)
|
||||
u64 iova, u64 size)
|
||||
{
|
||||
if (mmu->as < 0)
|
||||
return;
|
||||
|
@ -318,6 +318,8 @@
|
||||
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
|
||||
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
|
||||
|
||||
#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
|
||||
|
||||
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
|
||||
#define gpu_read(dev, reg) readl(dev->iomem + reg)
|
||||
|
||||
|
@ -415,8 +415,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
|
||||
|
||||
if (dev->battery_status == HID_BATTERY_UNKNOWN)
|
||||
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
|
||||
else if (dev->battery_capacity == 100)
|
||||
val->intval = POWER_SUPPLY_STATUS_FULL;
|
||||
else
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
break;
|
||||
|
@ -178,8 +178,6 @@ static const struct i2c_hid_quirks {
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_BOGUS_IRQ },
|
||||
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_RESET_ON_RESUME },
|
||||
{ I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
|
||||
@ -190,7 +188,8 @@ static const struct i2c_hid_quirks {
|
||||
* Sending the wakeup after reset actually break ELAN touchscreen controller
|
||||
*/
|
||||
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
|
||||
I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
|
||||
I2C_HID_QUIRK_BOGUS_IRQ },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -229,7 +229,7 @@ static int ad5624r_probe(struct spi_device *spi)
|
||||
if (!indio_dev)
|
||||
return -ENOMEM;
|
||||
st = iio_priv(indio_dev);
|
||||
st->reg = devm_regulator_get(&spi->dev, "vcc");
|
||||
st->reg = devm_regulator_get_optional(&spi->dev, "vref");
|
||||
if (!IS_ERR(st->reg)) {
|
||||
ret = regulator_enable(st->reg);
|
||||
if (ret)
|
||||
@ -240,6 +240,22 @@ static int ad5624r_probe(struct spi_device *spi)
|
||||
goto error_disable_reg;
|
||||
|
||||
voltage_uv = ret;
|
||||
} else {
|
||||
if (PTR_ERR(st->reg) != -ENODEV)
|
||||
return PTR_ERR(st->reg);
|
||||
/* Backwards compatibility. This naming is not correct */
|
||||
st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
|
||||
if (!IS_ERR(st->reg)) {
|
||||
ret = regulator_enable(st->reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regulator_get_voltage(st->reg);
|
||||
if (ret < 0)
|
||||
goto error_disable_reg;
|
||||
|
||||
voltage_uv = ret;
|
||||
}
|
||||
}
|
||||
|
||||
spi_set_drvdata(spi, indio_dev);
|
||||
|
@ -1187,29 +1187,34 @@ static int __init iw_cm_init(void)
|
||||
|
||||
ret = iwpm_init(RDMA_NL_IWCM);
|
||||
if (ret)
|
||||
pr_err("iw_cm: couldn't init iwpm\n");
|
||||
else
|
||||
rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
|
||||
return ret;
|
||||
|
||||
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
|
||||
if (!iwcm_wq)
|
||||
return -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
||||
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
|
||||
iwcm_ctl_table);
|
||||
if (!iwcm_ctl_table_hdr) {
|
||||
pr_err("iw_cm: couldn't register sysctl paths\n");
|
||||
destroy_workqueue(iwcm_wq);
|
||||
return -ENOMEM;
|
||||
goto err_sysctl;
|
||||
}
|
||||
|
||||
rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
|
||||
return 0;
|
||||
|
||||
err_sysctl:
|
||||
destroy_workqueue(iwcm_wq);
|
||||
err_alloc:
|
||||
iwpm_exit(RDMA_NL_IWCM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __exit iw_cm_cleanup(void)
|
||||
{
|
||||
rdma_nl_unregister(RDMA_NL_IWCM);
|
||||
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
|
||||
destroy_workqueue(iwcm_wq);
|
||||
rdma_nl_unregister(RDMA_NL_IWCM);
|
||||
iwpm_exit(RDMA_NL_IWCM);
|
||||
}
|
||||
|
||||
|
@ -745,7 +745,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
rq_entry_inserted = true;
|
||||
qp->qp_handle = create_qp_resp.qp_handle;
|
||||
qp->ibqp.qp_num = create_qp_resp.qp_num;
|
||||
qp->ibqp.qp_type = init_attr->qp_type;
|
||||
qp->max_send_wr = init_attr->cap.max_send_wr;
|
||||
qp->max_recv_wr = init_attr->cap.max_recv_wr;
|
||||
qp->max_send_sge = init_attr->cap.max_send_sge;
|
||||
|
@ -664,12 +664,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
||||
|
||||
ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
|
||||
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
|
||||
|
||||
if (loopback) {
|
||||
dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
|
||||
!default_pkey_idx);
|
||||
ppd->pkeys[!default_pkey_idx] = 0x8001;
|
||||
}
|
||||
ppd->pkeys[0] = 0x8001;
|
||||
|
||||
INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
|
||||
INIT_WORK(&ppd->link_up_work, handle_link_up);
|
||||
|
@ -2092,7 +2092,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
|
||||
struct crypt_config *cc = pool_data;
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
|
||||
/*
|
||||
* Note, percpu_counter_read_positive() may over (and under) estimate
|
||||
* the current usage by at most (batch - 1) * num_online_cpus() pages,
|
||||
* but avoids potential spinlock contention of an exact result.
|
||||
*/
|
||||
if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
|
||||
likely(gfp_mask & __GFP_NORETRY))
|
||||
return NULL;
|
||||
|
||||
|
@ -2107,32 +2107,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
|
||||
dib8000_write_word(state, 117 + mode, ana_fe[mode]);
|
||||
}
|
||||
|
||||
static const u16 lut_prbs_2k[14] = {
|
||||
0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
|
||||
static const u16 lut_prbs_2k[13] = {
|
||||
0x423, 0x009, 0x5C7,
|
||||
0x7A6, 0x3D8, 0x527,
|
||||
0x7FF, 0x79B, 0x3D6,
|
||||
0x3A2, 0x53B, 0x2F4,
|
||||
0x213
|
||||
};
|
||||
static const u16 lut_prbs_4k[14] = {
|
||||
0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
|
||||
|
||||
static const u16 lut_prbs_4k[13] = {
|
||||
0x208, 0x0C3, 0x7B9,
|
||||
0x423, 0x5C7, 0x3D8,
|
||||
0x7FF, 0x3D6, 0x53B,
|
||||
0x213, 0x029, 0x0D0,
|
||||
0x48E
|
||||
};
|
||||
static const u16 lut_prbs_8k[14] = {
|
||||
0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
|
||||
|
||||
static const u16 lut_prbs_8k[13] = {
|
||||
0x740, 0x069, 0x7DD,
|
||||
0x208, 0x7B9, 0x5C7,
|
||||
0x7FF, 0x53B, 0x029,
|
||||
0x48E, 0x4C4, 0x367,
|
||||
0x684
|
||||
};
|
||||
|
||||
static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
|
||||
{
|
||||
int sub_channel_prbs_group = 0;
|
||||
int prbs_group;
|
||||
|
||||
sub_channel_prbs_group = (subchannel / 3) + 1;
|
||||
dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
|
||||
sub_channel_prbs_group = subchannel / 3;
|
||||
if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
|
||||
return 0;
|
||||
|
||||
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
|
||||
case TRANSMISSION_MODE_2K:
|
||||
return lut_prbs_2k[sub_channel_prbs_group];
|
||||
prbs_group = lut_prbs_2k[sub_channel_prbs_group];
|
||||
break;
|
||||
case TRANSMISSION_MODE_4K:
|
||||
return lut_prbs_4k[sub_channel_prbs_group];
|
||||
prbs_group = lut_prbs_4k[sub_channel_prbs_group];
|
||||
break;
|
||||
default:
|
||||
case TRANSMISSION_MODE_8K:
|
||||
return lut_prbs_8k[sub_channel_prbs_group];
|
||||
prbs_group = lut_prbs_8k[sub_channel_prbs_group];
|
||||
}
|
||||
|
||||
dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
|
||||
sub_channel_prbs_group, subchannel, prbs_group);
|
||||
|
||||
return prbs_group;
|
||||
}
|
||||
|
||||
static void dib8000_set_13seg_channel(struct dib8000_state *state)
|
||||
@ -2409,10 +2432,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
|
||||
/* TSB or ISDBT ? apply it now */
|
||||
if (c->isdbt_sb_mode) {
|
||||
dib8000_set_sb_channel(state);
|
||||
if (c->isdbt_sb_subchannel < 14)
|
||||
init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
|
||||
else
|
||||
init_prbs = 0;
|
||||
init_prbs = dib8000_get_init_prbs(state,
|
||||
c->isdbt_sb_subchannel);
|
||||
} else {
|
||||
dib8000_set_13seg_channel(state);
|
||||
init_prbs = 0xfff;
|
||||
@ -3004,6 +3025,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
|
||||
|
||||
unsigned long *timeout = &state->timeout;
|
||||
unsigned long now = jiffies;
|
||||
u16 init_prbs;
|
||||
#ifdef DIB8000_AGC_FREEZE
|
||||
u16 agc1, agc2;
|
||||
#endif
|
||||
@ -3302,8 +3324,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
|
||||
break;
|
||||
|
||||
case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
|
||||
if (state->subchannel <= 41) {
|
||||
dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
|
||||
init_prbs = dib8000_get_init_prbs(state, state->subchannel);
|
||||
|
||||
if (init_prbs) {
|
||||
dib8000_set_subchannel_prbs(state, init_prbs);
|
||||
*tune_state = CT_DEMOD_STEP_9;
|
||||
} else {
|
||||
*tune_state = CT_DEMOD_STOP;
|
||||
|
@ -22,7 +22,7 @@
|
||||
#define IMX258_CHIP_ID 0x0258
|
||||
|
||||
/* V_TIMING internal */
|
||||
#define IMX258_VTS_30FPS 0x0c98
|
||||
#define IMX258_VTS_30FPS 0x0c50
|
||||
#define IMX258_VTS_30FPS_2K 0x0638
|
||||
#define IMX258_VTS_30FPS_VGA 0x034c
|
||||
#define IMX258_VTS_MAX 0xffff
|
||||
@ -46,7 +46,7 @@
|
||||
/* Analog gain control */
|
||||
#define IMX258_REG_ANALOG_GAIN 0x0204
|
||||
#define IMX258_ANA_GAIN_MIN 0
|
||||
#define IMX258_ANA_GAIN_MAX 0x1fff
|
||||
#define IMX258_ANA_GAIN_MAX 480
|
||||
#define IMX258_ANA_GAIN_STEP 1
|
||||
#define IMX258_ANA_GAIN_DEFAULT 0x0
|
||||
|
||||
|
@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
|
||||
struct v4l2_dv_timings *timings)
|
||||
{
|
||||
struct tda1997x_state *state = to_state(sd);
|
||||
int ret;
|
||||
|
||||
v4l_dbg(1, debug, state->client, "%s\n", __func__);
|
||||
memset(timings, 0, sizeof(struct v4l2_dv_timings));
|
||||
mutex_lock(&state->lock);
|
||||
tda1997x_detect_std(state, timings);
|
||||
ret = tda1997x_detect_std(state, timings);
|
||||
mutex_unlock(&state->lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
|
||||
|
@ -366,7 +366,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
clk_prepare_enable(cec->clk);
|
||||
ret = clk_prepare_enable(cec->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* set context info. */
|
||||
cec->dev = &pdev->dev;
|
||||
@ -446,9 +450,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
|
||||
|
||||
dev_notice(&pdev->dev, "Resuming\n");
|
||||
|
||||
clk_prepare_enable(cec->clk);
|
||||
|
||||
return 0;
|
||||
return clk_prepare_enable(cec->clk);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -42,7 +42,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
|
||||
|
||||
if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
|
||||
dprintk("invalid tx mask: %u\n", mask);
|
||||
return -EINVAL;
|
||||
return 2;
|
||||
}
|
||||
|
||||
dprintk("setting tx mask: %u\n", mask);
|
||||
|
@ -894,8 +894,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
|
||||
{
|
||||
struct uvc_fh *handle = fh;
|
||||
struct uvc_video_chain *chain = handle->chain;
|
||||
u8 *buf;
|
||||
int ret;
|
||||
u8 i;
|
||||
|
||||
if (chain->selector == NULL ||
|
||||
(chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
|
||||
@ -903,22 +903,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
|
||||
return 0;
|
||||
}
|
||||
|
||||
buf = kmalloc(1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
|
||||
chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
||||
&i, 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
buf, 1);
|
||||
if (!ret)
|
||||
*input = *buf - 1;
|
||||
|
||||
*input = i - 1;
|
||||
return 0;
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
|
||||
{
|
||||
struct uvc_fh *handle = fh;
|
||||
struct uvc_video_chain *chain = handle->chain;
|
||||
u8 *buf;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
ret = uvc_acquire_privileges(handle);
|
||||
if (ret < 0)
|
||||
@ -934,10 +939,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
|
||||
if (input >= chain->selector->bNrInPins)
|
||||
return -EINVAL;
|
||||
|
||||
i = input + 1;
|
||||
return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
|
||||
chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
||||
&i, 1);
|
||||
buf = kmalloc(1, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
*buf = input + 1;
|
||||
ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
|
||||
chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
||||
buf, 1);
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int uvc_ioctl_queryctrl(struct file *file, void *fh,
|
||||
|
@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
|
||||
if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
|
||||
fnc, fnc_handle) &&
|
||||
v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
|
||||
@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
const struct v4l2_bt_timings *bt =
|
||||
&v4l2_dv_timings_presets[i].bt;
|
||||
|
||||
|
@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
|
||||
if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
|
||||
line += 1;
|
||||
|
||||
handle_nested_irq(irq_create_mapping(ab8500->domain, line));
|
||||
handle_nested_irq(irq_find_mapping(ab8500->domain, line));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
|
||||
|
||||
static const struct regmap_range axp288_volatile_ranges[] = {
|
||||
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
|
||||
regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
|
||||
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
|
||||
regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
|
||||
regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
|
||||
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
|
||||
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
|
||||
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
|
||||
regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
|
||||
regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
|
||||
regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
|
||||
};
|
||||
|
@ -1695,22 +1695,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
|
||||
}
|
||||
|
||||
static const unsigned long db8500_armss_freqs[] = {
|
||||
200000000,
|
||||
400000000,
|
||||
800000000,
|
||||
199680000,
|
||||
399360000,
|
||||
798720000,
|
||||
998400000
|
||||
};
|
||||
|
||||
/* The DB8520 has slightly higher ARMSS max frequency */
|
||||
static const unsigned long db8520_armss_freqs[] = {
|
||||
200000000,
|
||||
400000000,
|
||||
800000000,
|
||||
199680000,
|
||||
399360000,
|
||||
798720000,
|
||||
1152000000
|
||||
};
|
||||
|
||||
|
||||
|
||||
static long round_armss_rate(unsigned long rate)
|
||||
{
|
||||
unsigned long freq = 0;
|
||||
|
@ -1091,7 +1091,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
|
||||
|
||||
if (variant->id_val == STMPE801_ID ||
|
||||
variant->id_val == STMPE1600_ID) {
|
||||
int base = irq_create_mapping(stmpe->domain, 0);
|
||||
int base = irq_find_mapping(stmpe->domain, 0);
|
||||
|
||||
handle_nested_irq(base);
|
||||
return IRQ_HANDLED;
|
||||
@ -1119,7 +1119,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
|
||||
while (status) {
|
||||
int bit = __ffs(status);
|
||||
int line = bank * 8 + bit;
|
||||
int nestedirq = irq_create_mapping(stmpe->domain, line);
|
||||
int nestedirq = irq_find_mapping(stmpe->domain, line);
|
||||
|
||||
handle_nested_irq(nestedirq);
|
||||
status &= ~(1 << bit);
|
||||
|
@ -187,7 +187,7 @@ static irqreturn_t tc3589x_irq(int irq, void *data)
|
||||
|
||||
while (status) {
|
||||
int bit = __ffs(status);
|
||||
int virq = irq_create_mapping(tc3589x->domain, bit);
|
||||
int virq = irq_find_mapping(tc3589x->domain, bit);
|
||||
|
||||
handle_nested_irq(virq);
|
||||
status &= ~(1 << bit);
|
||||
|
@ -210,6 +210,8 @@ static int tqmx86_probe(struct platform_device *pdev)
|
||||
|
||||
/* Assumes the IRQ resource is first. */
|
||||
tqmx_gpio_resources[0].start = gpio_irq;
|
||||
} else {
|
||||
tqmx_gpio_resources[0].flags = 0;
|
||||
}
|
||||
|
||||
ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
|
||||
|
@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
|
||||
struct wm8994 *wm8994 = data;
|
||||
|
||||
while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
|
||||
handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
|
||||
handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -2238,7 +2238,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
|
||||
|
||||
result = VMCI_SUCCESS;
|
||||
|
||||
if (context_id != VMCI_HOST_CONTEXT_ID) {
|
||||
if (context_id != VMCI_HOST_CONTEXT_ID &&
|
||||
!QPBROKERSTATE_HAS_MEM(entry)) {
|
||||
struct vmci_qp_page_store page_store;
|
||||
|
||||
page_store.pages = guest_mem;
|
||||
@ -2345,7 +2346,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (context_id != VMCI_HOST_CONTEXT_ID) {
|
||||
if (context_id != VMCI_HOST_CONTEXT_ID &&
|
||||
QPBROKERSTATE_HAS_MEM(entry)) {
|
||||
qp_acquire_queue_mutex(entry->produce_q);
|
||||
result = qp_save_headers(entry);
|
||||
if (result < VMCI_SUCCESS)
|
||||
|
@ -597,6 +597,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
||||
}
|
||||
|
||||
mmc_wait_for_req(card->host, &mrq);
|
||||
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
|
||||
|
||||
if (cmd.error) {
|
||||
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
|
||||
@ -646,8 +647,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
||||
if (idata->ic.postsleep_min_us)
|
||||
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
|
||||
|
||||
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
|
||||
|
||||
if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
|
||||
/*
|
||||
* Ensure RPMB/R1B command has completed by polling CMD13
|
||||
|
@ -539,23 +539,6 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_data *data = mrq->data;
|
||||
|
||||
if (host->sg_count < 0) {
|
||||
data->error = host->sg_count;
|
||||
dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
|
||||
__func__, host->sg_count);
|
||||
return data->error;
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
return sd_read_long_data(host, mrq);
|
||||
|
||||
return sd_write_long_data(host, mrq);
|
||||
}
|
||||
|
||||
static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
|
||||
{
|
||||
rtsx_pci_write_register(host->pcr, SD_CFG1,
|
||||
@ -568,6 +551,33 @@ static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
|
||||
SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
|
||||
}
|
||||
|
||||
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_data *data = mrq->data;
|
||||
int err;
|
||||
|
||||
if (host->sg_count < 0) {
|
||||
data->error = host->sg_count;
|
||||
dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
|
||||
__func__, host->sg_count);
|
||||
return data->error;
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
if (host->initial_mode)
|
||||
sd_disable_initial_mode(host);
|
||||
|
||||
err = sd_read_long_data(host, mrq);
|
||||
|
||||
if (host->initial_mode)
|
||||
sd_enable_initial_mode(host);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
return sd_write_long_data(host, mrq);
|
||||
}
|
||||
|
||||
static void sd_normal_rw(struct realtek_pci_sdmmc *host,
|
||||
struct mmc_request *mrq)
|
||||
{
|
||||
|
@ -192,7 +192,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
* through low speeds without power cycling.
|
||||
*/
|
||||
sdhci_set_clock(host, host->max_clk);
|
||||
phy_power_on(sdhci_arasan->phy);
|
||||
if (phy_power_on(sdhci_arasan->phy)) {
|
||||
pr_err("%s: Cannot power on phy.\n",
|
||||
mmc_hostname(host->mmc));
|
||||
return;
|
||||
}
|
||||
|
||||
sdhci_arasan->is_phy_on = true;
|
||||
|
||||
/*
|
||||
@ -228,7 +233,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
msleep(20);
|
||||
|
||||
if (ctrl_phy) {
|
||||
phy_power_on(sdhci_arasan->phy);
|
||||
if (phy_power_on(sdhci_arasan->phy)) {
|
||||
pr_err("%s: Cannot power on phy.\n",
|
||||
mmc_hostname(host->mmc));
|
||||
return;
|
||||
}
|
||||
|
||||
sdhci_arasan->is_phy_on = true;
|
||||
}
|
||||
}
|
||||
@ -416,7 +426,9 @@ static int sdhci_arasan_suspend(struct device *dev)
|
||||
ret = phy_power_off(sdhci_arasan->phy);
|
||||
if (ret) {
|
||||
dev_err(dev, "Cannot power off phy.\n");
|
||||
sdhci_resume_host(host);
|
||||
if (sdhci_resume_host(host))
|
||||
dev_err(dev, "Cannot resume host.\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
sdhci_arasan->is_phy_on = false;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user