Merge "Merge android11-5.4.61+ (364ec3d) into msm-5.4"

This commit is contained in:
qctecmdr 2021-02-24 20:57:37 -08:00 committed by Gerrit - the friendly Code Review server
commit 18afa4d355
17 changed files with 299 additions and 173 deletions

View File

@ -1 +1 @@
LTS_5.4.61_5327444e834f
LTS_5.4.61_364ec3d27d09

View File

@ -30,6 +30,7 @@
bpf_trace_run4
__breadahead
bus_register
bus_set_iommu
bus_unregister
call_rcu
cancel_delayed_work
@ -110,6 +111,7 @@
device_destroy
device_initialize
device_init_wakeup
device_link_add
device_match_fwnode
device_match_name
device_property_present
@ -180,6 +182,10 @@
drm_dev_register
drm_err
drm_ioctl
drm_mm_init
drm_mm_insert_node_in_range
drm_mm_remove_node
drm_mm_takedown
drm_mode_config_cleanup
drm_mode_config_init
drm_open
@ -281,7 +287,11 @@
iommu_alloc_resv_region
iommu_attach_device
iommu_detach_device
iommu_device_link
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
iommu_device_unlink
iommu_device_unregister
iommu_dma_get_resv_regions
iommu_domain_alloc
@ -292,6 +302,7 @@
iommu_fwspec_free
iommu_get_dma_cookie
iommu_get_domain_for_dev
iommu_group_alloc
iommu_group_get
iommu_group_get_for_dev
iommu_group_get_iommudata
@ -995,6 +1006,7 @@
skb_checksum_help
strim
xfrm_lookup
completion_done
# required by kfifo_buf.ko
devres_add
@ -1144,6 +1156,7 @@
# required by pvrsrvkm.ko
autoremove_wake_function
bpf_trace_run6
bpf_trace_run7
bpf_trace_run8
cache_line_size
@ -1196,6 +1209,7 @@
get_unused_fd_flags
idr_preload
idr_replace
ion_query_heaps_kernel
kill_pid
ksize
kthread_freezable_should_stop
@ -1207,6 +1221,7 @@
of_modalias_node
on_each_cpu
prepare_to_wait
proc_remove
put_unused_fd
_raw_read_lock_bh
_raw_read_unlock_bh
@ -1222,6 +1237,7 @@
strsep
sync_file_create
sync_file_get_fence
__task_pid_nr_ns
trace_print_symbols_seq
trace_set_clr_event
unmap_mapping_range
@ -1993,6 +2009,7 @@
# required by usb_f_mtp.ko
usb_os_desc_prepare_interf_dir
usb_string_id
config_group_init_type_name
# required by usbserial.ko
device_del
@ -2549,3 +2566,7 @@
kobject_init_and_add
sched_setscheduler_nocheck
strpbrk
# required by sprd_wdf.ko
kstrtoull_from_user
smpboot_register_percpu_thread

View File

@ -1015,7 +1015,10 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
return probe.present ? 0 : -ENODEV;
}
static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event),
const struct attribute_group *events,
const struct attribute_group *format)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
@ -1034,144 +1037,127 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
cpu_pmu->name = name;
cpu_pmu->map_event = map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
events : &armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
format : &armv8_pmuv3_format_attr_group;
return 0;
}
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
return armv8_pmu_init(cpu_pmu, "armv8_pmuv3",
armv8_pmuv3_map_event, NULL, NULL);
}
cpu_pmu->name = "armv8_pmuv3";
cpu_pmu->map_event = armv8_pmuv3_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a35";
cpu_pmu->map_event = armv8_a53_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35",
armv8_a53_map_event, NULL, NULL);
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53",
armv8_a53_map_event, NULL, NULL);
}
cpu_pmu->name = "armv8_cortex_a53";
cpu_pmu->map_event = armv8_a53_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57",
armv8_a57_map_event, NULL, NULL);
}
cpu_pmu->name = "armv8_cortex_a57";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cortex_a72";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72",
armv8_a57_map_event, NULL, NULL);
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73",
armv8_a73_map_event, NULL, NULL);
}
cpu_pmu->name = "armv8_cortex_a73";
cpu_pmu->map_event = armv8_a73_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75",
armv8_pmuv3_map_event, NULL, NULL);
}
return 0;
static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1",
armv8_pmuv3_map_event, NULL, NULL);
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_cavium_thunder";
cpu_pmu->map_event = armv8_thunder_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder",
armv8_thunder_map_event, NULL, NULL);
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
if (ret)
return ret;
cpu_pmu->name = "armv8_brcm_vulcan";
cpu_pmu->map_event = armv8_vulcan_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
return 0;
return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan",
armv8_vulcan_map_event, NULL, NULL);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
{.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
{.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
{.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
{.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
{.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
{},

View File

@ -67,3 +67,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_regs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_map_util_freq);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_em_pd_energy);

View File

@ -200,6 +200,7 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
@ -583,6 +584,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
mtk->sch_array = sch_array;
INIT_LIST_HEAD(&mtk->bw_ep_list_new);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@ -601,19 +604,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
int bw_index;
int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
@ -632,40 +630,35 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth!\n");
if (is_fs_or_ls(udev->speed))
drop_tt(udev);
kfree(sch_ep);
return -ENOSPC;
}
list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
| EP_BREPEAT(sch_ep->repeat));
xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
sch_ep->offset, sch_ep->repeat);
list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_list_new);
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk);
static void xhci_mtk_drop_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
struct mu3h_sch_ep_info *sch_ep)
{
struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
int bw_index = get_bw_index(xhci, udev, sch_ep->ep);
struct mu3h_sch_bw_info *sch_bw = &mtk->sch_array[bw_index];
update_bus_bw(sch_bw, sch_ep, 0);
list_del(&sch_ep->endpoint);
if (sch_ep->sch_tt) {
list_del(&sch_ep->tt_endpoint);
drop_tt(udev);
}
kfree(sch_ep);
}
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
@ -675,7 +668,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
@ -694,17 +687,73 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
update_bus_bw(sch_bw, sch_ep, 0);
list_del(&sch_ep->endpoint);
if (is_fs_or_ls(udev->speed)) {
list_del(&sch_ep->tt_endpoint);
drop_tt(udev);
}
kfree(sch_ep);
break;
xhci_mtk_drop_ep(mtk, udev, sch_ep);
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index, ret;
dev_dbg(&udev->dev, "%s\n", __func__);
list_for_each_entry(sch_ep, &mtk->bw_ep_list_new, endpoint) {
bw_index = get_bw_index(xhci, udev, sch_ep->ep);
sch_bw = &mtk->sch_array[bw_index];
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth!\n");
return -ENOSPC;
}
}
list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_list_new, endpoint) {
struct xhci_ep_ctx *ep_ctx;
struct usb_host_endpoint *ep = sch_ep->ep;
unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &mtk->sch_array[bw_index];
list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count)
| EP_BBM(sch_ep->burst_mode));
ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
| EP_BREPEAT(sch_ep->repeat));
xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
sch_ep->offset, sch_ep->repeat);
}
return xhci_check_bandwidth(hcd, udev);
}
EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
struct mu3h_sch_ep_info *sch_ep, *tmp;
dev_dbg(&udev->dev, "%s\n", __func__);
list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_list_new, endpoint) {
xhci_mtk_drop_ep(mtk, udev, sch_ep);
}
xhci_reset_bandwidth(hcd, udev);
}
EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);

View File

@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
.check_bandwidth = xhci_mtk_check_bandwidth,
.reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;

View File

@ -130,6 +130,7 @@ struct mu3c_ippc_regs {
struct xhci_hcd_mtk {
struct device *dev;
struct usb_hcd *hcd;
struct list_head bw_ep_list_new;
struct mu3h_sch_bw_info *sch_array;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
@ -166,6 +167,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
@ -179,6 +182,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
{
}
static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
struct usb_device *udev)
{
return 0;
}
static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
struct usb_device *udev)
{
}
#endif
#endif /* _XHCI_MTK_H_ */

View File

@ -2900,7 +2900,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
@ -2997,7 +2997,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
return ret;
}
static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
@ -5558,6 +5558,10 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
if (over->check_bandwidth)
drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);

View File

@ -1918,6 +1918,8 @@ struct xhci_driver_overrides {
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
@ -2072,6 +2074,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);

View File

@ -29,4 +29,7 @@
#define ANDROID_VENDOR_DATA(n) u64 android_vendor_data##n
#define ANDROID_VENDOR_DATA_ARRAY(n, s) u64 android_vendor_data##n[s]
#define ANDROID_OEM_DATA(n) u64 android_oem_data##n
#define ANDROID_OEM_DATA_ARRAY(n, s) u64 android_oem_data##n[s]
#endif /* _ANDROID_VENDOR_H */

View File

@ -2,20 +2,18 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#if !defined(_TRACE_RESTRICTED_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RESTRICTED_PREEMPTIRQ_H
#ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS
#undef TRACE_SYSTEM
#define TRACE_SYSTEM restricted_preemptirq
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_RESTRICTED_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RESTRICTED_PREEMPTIRQ_H
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#if defined(CONFIG_PREEMPTIRQ_TRACEPOINTS) && \
defined(CONFIG_ANDROID_VENDOR_HOOKS)
#ifdef CONFIG_TRACE_IRQFLAGS
DECLARE_RESTRICTED_HOOK(restricted_irq_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
@ -28,6 +26,8 @@ DECLARE_RESTRICTED_HOOK(restricted_irq_enable,
#else
#define trace_restricted_irq_enable(ip, parent_ip)
#define trace_restricted_irq_disable(ip, parent_ip)
#define register_trace_restricted_irq_enable(...)
#define register_trace_restricted_irq_disable(...)
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
@ -42,15 +42,20 @@ DECLARE_RESTRICTED_HOOK(restricted_preempt_enable,
#else
#define trace_restricted_preempt_enable(ip, parent_ip)
#define trace_restricted_preempt_disable(ip, parent_ip)
#define register_trace_restricted_preempt_enable(...)
#define register_trace_restricted_preempt_disable(...)
#endif /* CONFIG_TRACE_PREEMPT_TOGGLE */
#include <trace/define_trace.h>
#else /* ! CONFIG_PREEMPTIRQ_TRACEPOINTS */
#else /* CONFIG_PREEMPTIRQ_TRACEPOINTS && CONFIG_ANDROID_VENDOR_HOOKS */
#define trace_restricted_irq_enable(...)
#define trace_restricted_irq_disable(...)
#define register_trace_restricted_irq_enable(...)
#define register_trace_restricted_irq_disable(...)
#define trace_restricted_preempt_enable(...)
#define trace_restricted_preempt_disable(...)
#endif /* ! CONFIG_PREEMPTIRQ_TRACEPOINTS */
#define register_trace_restricted_preempt_enable(...)
#define register_trace_restricted_preempt_disable(...)
#endif /* CONFIG_PREEMPTIRQ_TRACEPOINTS && CONFIG_ANDROID_VENDOR_HOOKS */
#endif /* TRACE_RESTRICTED_PREEMPTIRQ_H || TRACE_HEADER_MULTI_READ */
#include <trace/define_trace.h>

View File

@ -10,7 +10,6 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
struct task_struct;
DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
@ -70,22 +69,21 @@ struct sched_group;
DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_group,
TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
TP_ARGS(busiest, dst_rq, out_balance), 1);
#else
#define trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag, wake_flags, new_cpu)
#define trace_android_rvh_select_task_rq_rt(p, prev_cpu, sd_flag, wake_flags, new_cpu)
#define trace_android_rvh_select_fallback_rq(cpu, p, dest_cpu)
#define trace_android_vh_scheduler_tick(rq)
#define trace_android_rvh_enqueue_task(rq, p)
#define trace_android_rvh_dequeue_task(rq, p)
#define trace_android_rvh_can_migrate_task(p, dst_cpu, can_migrate)
#define trace_android_rvh_find_lowest_rq(p, local_cpu_mask, lowest_cpu)
#define trace_android_rvh_prepare_prio_fork(p)
#define trace_android_rvh_finish_prio_fork(p)
#define trace_android_rvh_rtmutex_prepare_setprio(p, pi_task)
#define trace_android_rvh_set_user_nice(p, nice)
#define trace_android_rvh_setscheduler(p)
#define trace_android_rvh_find_busiest_group(busiest, dst_rq, out_balance)
#endif
DECLARE_HOOK(android_vh_map_util_freq,
TP_PROTO(unsigned long util, unsigned long freq,
unsigned long cap, unsigned long *next_freq),
TP_ARGS(util, freq, cap, next_freq));
struct em_perf_domain;
DECLARE_HOOK(android_vh_em_pd_energy,
TP_PROTO(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long *energy),
TP_ARGS(pd, max_util, sum_util, energy));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -9,6 +9,8 @@
#include <linux/tracepoint.h>
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
#define DECLARE_HOOK DECLARE_TRACE
#ifdef TRACE_HEADER_MULTI_READ
@ -74,3 +76,10 @@
PARAMS(__data, args))
#endif /* TRACE_HEADER_MULTI_READ */
#else /* !CONFIG_TRACEPOINTS || !CONFIG_ANDROID_VENDOR_HOOKS */
/* suppress trace hooks */
#define DECLARE_HOOK DECLARE_EVENT_NOP
#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
#endif

View File

@ -881,8 +881,9 @@ static void put_pi_state(struct futex_pi_state *pi_state)
*/
if (pi_state->owner) {
struct task_struct *owner;
unsigned long flags;
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
owner = pi_state->owner;
if (owner) {
raw_spin_lock(&owner->pi_lock);
@ -890,7 +891,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
raw_spin_unlock(&owner->pi_lock);
}
rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
}
if (current->pi_state_cache) {
@ -1595,8 +1596,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (unlikely(should_fail_futex(true)))
if (unlikely(should_fail_futex(true))) {
ret = -EFAULT;
goto out_unlock;
}
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
@ -2513,10 +2516,22 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
}
/*
* Since we just failed the trylock; there must be an owner.
* The trylock just failed, so either there is an owner or
* there is a higher priority waiter than this one.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
BUG_ON(!newowner);
/*
* If the higher priority waiter has not yet taken over the
* rtmutex then newowner is NULL. We can't return here with
* that state because it's inconsistent vs. the user space
* state. So drop the locks and try again. It's a valid
* situation and not any different from the other retry
* conditions.
*/
if (unlikely(!newowner)) {
err = -EAGAIN;
goto handle_err;
}
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {

View File

@ -13,6 +13,7 @@
#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
#include <linux/sched/sysctl.h>
#include <trace/hooks/sched.h>
#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
@ -295,10 +296,15 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
unsigned long next_freq = 0;
trace_android_vh_map_util_freq(util, freq, max, &next_freq);
if (next_freq)
freq = next_freq;
else
freq = map_util_freq(util, freq, max);
freq = map_util_freq(util, freq, max);
trace_sugov_next_freq(policy->cpu, util, max, freq);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;

View File

@ -6903,6 +6903,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
#endif
unsigned long max_util = 0, sum_util = 0;
unsigned long energy = 0;
int cpu;
unsigned long cpu_util;
@ -6945,7 +6946,11 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
max_util = max(max_util, cpu_util);
}
return em_pd_energy(pd->em_pd, max_util, sum_util);
trace_android_vh_em_pd_energy(pd->em_pd, max_util, sum_util, &energy);
if (!energy)
energy = em_pd_energy(pd->em_pd, max_util, sum_util);
return energy;
}
#ifdef CONFIG_SCHED_WALT

View File

@ -29,7 +29,8 @@ static unsigned int one_million = 1000000;
static DEFINE_PER_CPU(u64, irq_disabled_ts);
static DEFINE_PER_CPU(u64, preempt_disabled_ts);
static void note_irq_disable(void *u1, unsigned long u2, unsigned long u3)
static void __attribute__((__unused__))
note_irq_disable(void *u1, unsigned long u2, unsigned long u3)
{
if (is_idle_task(current))
return;
@ -41,7 +42,8 @@ static void note_irq_disable(void *u1, unsigned long u2, unsigned long u3)
this_cpu_write(irq_disabled_ts, sched_clock());
}
static void test_irq_disable_long(void *u1, unsigned long u2, unsigned long u3)
static void __attribute__((__unused__))
test_irq_disable_long(void *u1, unsigned long u2, unsigned long u3)
{
u64 ts = this_cpu_read(irq_disabled_ts);
@ -69,12 +71,14 @@ static void test_irq_disable_long(void *u1, unsigned long u2, unsigned long u3)
}
}
static void note_preempt_disable(void *u1, unsigned long u2, unsigned long u3)
static void __attribute__((__unused__))
note_preempt_disable(void *u1, unsigned long u2, unsigned long u3)
{
this_cpu_write(preempt_disabled_ts, sched_clock());
}
static void test_preempt_disable_long(void *u1, unsigned long u2,
static void __attribute__((__unused__))
test_preempt_disable_long(void *u1, unsigned long u2,
unsigned long u3)
{
u64 ts = this_cpu_read(preempt_disabled_ts);