kgsl: hwsched: Add support for GMU tracepoints logging
In hardware scheduling some events originating in GMU are not captured on host side. The preemption tracepoints for preempt trigger and preempt done are examples. Such tracepoints need to be continued to be supported with HW scheduling for backwards compatibility and profiling or debugging. We therefore need a means to be able to log these events on the GMU with the timestamp of when they occur and convey them to the host such that kgsl can log them lazily to ftrace. Change-Id: Ib12e2341f928091ad3918841c267a8f2e92dc766 Signed-off-by: Hareesh Gundu <quic_hareeshg@quicinc.com>
This commit is contained in:
parent
d78f0a84de
commit
dc4f2c32db
@ -614,6 +614,7 @@ int a6xx_gmu_device_start(struct adreno_device *adreno_dev)
|
||||
u32 val = 0x00000100;
|
||||
u32 mask = 0x000001FF;
|
||||
|
||||
gmu_core_reset_trace_header(&gmu->trace);
|
||||
gmu_ao_sync_event(adreno_dev);
|
||||
|
||||
/* Check for 0xBABEFACE on legacy targets */
|
||||
@ -2945,6 +2946,9 @@ int a6xx_gmu_probe(struct kgsl_device *device,
|
||||
|
||||
set_bit(GMU_ENABLED, &device->gmu_core.flags);
|
||||
|
||||
/* Initialize to zero to detect trace packet loss */
|
||||
gmu->trace.seq_num = 0;
|
||||
|
||||
device->gmu_core.dev_ops = &a6xx_gmudev;
|
||||
|
||||
/* Set default GMU attributes */
|
||||
|
@ -47,6 +47,8 @@ struct a6xx_gmu_device {
|
||||
struct kgsl_memdesc *gmu_log;
|
||||
/** @vrb: GMU virtual register bank memory */
|
||||
struct kgsl_memdesc *vrb;
|
||||
/** @trace: gmu trace container */
|
||||
struct kgsl_gmu_trace trace;
|
||||
struct a6xx_hfi hfi;
|
||||
struct clk_bulk_data *clks;
|
||||
/** @num_clks: Number of entries in the @clks array */
|
||||
|
@ -261,6 +261,8 @@ static void a6xx_gmu_snapshot_memories(struct kgsl_device *device,
|
||||
desc.type = SNAPSHOT_GMU_MEM_DEBUG;
|
||||
else if (md == gmu->vrb)
|
||||
desc.type = SNAPSHOT_GMU_MEM_VRB;
|
||||
else if (md == gmu->trace.md)
|
||||
desc.type = SNAPSHOT_GMU_MEM_TRACE;
|
||||
else
|
||||
desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
|
||||
|
||||
|
@ -654,13 +654,32 @@ static int a6xx_hwsched_gmu_memory_init(struct adreno_device *adreno_dev)
|
||||
gmu->vrb = reserve_gmu_kernel_block(gmu, 0, GMU_VRB_SIZE,
|
||||
GMU_NONCACHED_KERNEL, 0);
|
||||
|
||||
if (IS_ERR(gmu->vrb))
|
||||
return PTR_ERR(gmu->vrb);
|
||||
|
||||
/* Populate size of the virtual register bank */
|
||||
if (!IS_ERR(gmu->vrb))
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr,
|
||||
VRB_SIZE_IDX, gmu->vrb->size >> 2);
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr, VRB_SIZE_IDX,
|
||||
gmu->vrb->size >> 2);
|
||||
}
|
||||
|
||||
return PTR_ERR_OR_ZERO(gmu->vrb);
|
||||
/* GMU trace log */
|
||||
if (IS_ERR_OR_NULL(gmu->trace.md)) {
|
||||
gmu->trace.md = reserve_gmu_kernel_block(gmu, 0,
|
||||
GMU_TRACE_SIZE, GMU_NONCACHED_KERNEL, 0);
|
||||
|
||||
if (IS_ERR(gmu->trace.md))
|
||||
return PTR_ERR(gmu->trace.md);
|
||||
|
||||
/* Pass trace buffer address to GMU through the VRB */
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr,
|
||||
VRB_TRACE_BUFFER_ADDR_IDX,
|
||||
gmu->trace.md->gmuaddr);
|
||||
|
||||
/* Initialize the GMU trace buffer header */
|
||||
gmu_core_trace_header_init(&gmu->trace);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int a6xx_hwsched_gmu_init(struct adreno_device *adreno_dev)
|
||||
@ -1334,6 +1353,15 @@ int a6xx_hwsched_add_to_minidump(struct adreno_device *adreno_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(a6xx_dev->gmu.trace.md)) {
|
||||
ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev,
|
||||
KGSL_GMU_TRACE_ENTRY,
|
||||
a6xx_dev->gmu.trace.md->hostptr,
|
||||
a6xx_dev->gmu.trace.md->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Dump HFI hwsched global mem alloc entries */
|
||||
for (i = 0; i < hw_hfi->mem_alloc_entries; i++) {
|
||||
struct hfi_mem_alloc_entry *entry = &hw_hfi->mem_alloc_table[i];
|
||||
|
@ -1433,17 +1433,24 @@ static int hfi_f2h_main(void *arg)
|
||||
{
|
||||
struct adreno_device *adreno_dev = arg;
|
||||
struct a6xx_hwsched_hfi *hfi = to_a6xx_hwsched_hfi(adreno_dev);
|
||||
struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_event_interruptible(hfi->f2h_wq, kthread_should_stop() ||
|
||||
(!(is_queue_empty(adreno_dev, HFI_MSG_ID) &&
|
||||
is_queue_empty(adreno_dev, HFI_DBG_ID)) &&
|
||||
(hfi->irq_mask & HFI_IRQ_MSGQ_MASK)));
|
||||
/* If msgq irq is enabled and msgq has messages to process */
|
||||
(((hfi->irq_mask & HFI_IRQ_MSGQ_MASK) &&
|
||||
!is_queue_empty(adreno_dev, HFI_MSG_ID)) ||
|
||||
/* Trace buffer has messages to process */
|
||||
!gmu_core_is_trace_empty(gmu->trace.md->hostptr) ||
|
||||
/* Dbgq has messages to process */
|
||||
!is_queue_empty(adreno_dev, HFI_DBG_ID)));
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
a6xx_hwsched_process_msgq(adreno_dev);
|
||||
gmu_core_process_trace_data(KGSL_DEVICE(adreno_dev),
|
||||
&gmu->pdev->dev, &gmu->trace);
|
||||
a6xx_hwsched_process_dbgq(adreno_dev, true);
|
||||
}
|
||||
|
||||
|
@ -349,6 +349,8 @@ int gen7_gmu_device_start(struct adreno_device *adreno_dev)
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
|
||||
|
||||
gmu_core_reset_trace_header(&gmu->trace);
|
||||
|
||||
gmu_ao_sync_event(adreno_dev);
|
||||
|
||||
/* Bring GMU out of reset */
|
||||
@ -2575,6 +2577,9 @@ int gen7_gmu_probe(struct kgsl_device *device,
|
||||
gmu->log_stream_enable = false;
|
||||
gmu->log_group_mask = 0x3;
|
||||
|
||||
/* Initialize to zero to detect trace packet loss */
|
||||
gmu->trace.seq_num = 0;
|
||||
|
||||
/* Disabled by default */
|
||||
gmu->stats_enable = false;
|
||||
/* Set default to CM3 busy cycles countable */
|
||||
|
@ -58,6 +58,8 @@ struct gen7_gmu_device {
|
||||
struct kgsl_memdesc *gpu_boot_scratch;
|
||||
/** @vrb: GMU virtual register bank memory */
|
||||
struct kgsl_memdesc *vrb;
|
||||
/** @trace: gmu trace container */
|
||||
struct kgsl_gmu_trace trace;
|
||||
struct gen7_hfi hfi;
|
||||
struct clk_bulk_data *clks;
|
||||
/** @num_clks: Number of entries in the @clks array */
|
||||
|
@ -140,6 +140,8 @@ static void gen7_gmu_snapshot_memories(struct kgsl_device *device,
|
||||
desc.type = SNAPSHOT_GMU_MEM_WARMBOOT;
|
||||
else if (md == gmu->vrb)
|
||||
desc.type = SNAPSHOT_GMU_MEM_VRB;
|
||||
else if (md == gmu->trace.md)
|
||||
desc.type = SNAPSHOT_GMU_MEM_TRACE;
|
||||
else
|
||||
desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
|
||||
|
||||
|
@ -878,13 +878,32 @@ static int gen7_hwsched_gmu_memory_init(struct adreno_device *adreno_dev)
|
||||
gmu->vrb = gen7_reserve_gmu_kernel_block(gmu, 0, GMU_VRB_SIZE,
|
||||
GMU_NONCACHED_KERNEL, 0);
|
||||
|
||||
if (IS_ERR(gmu->vrb))
|
||||
return PTR_ERR(gmu->vrb);
|
||||
|
||||
/* Populate size of the virtual register bank */
|
||||
if (!IS_ERR(gmu->vrb))
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr,
|
||||
VRB_SIZE_IDX, gmu->vrb->size >> 2);
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr, VRB_SIZE_IDX,
|
||||
gmu->vrb->size >> 2);
|
||||
}
|
||||
|
||||
return PTR_ERR_OR_ZERO(gmu->vrb);
|
||||
/* GMU trace log */
|
||||
if (IS_ERR_OR_NULL(gmu->trace.md)) {
|
||||
gmu->trace.md = gen7_reserve_gmu_kernel_block(gmu, 0,
|
||||
GMU_TRACE_SIZE, GMU_NONCACHED_KERNEL, 0);
|
||||
|
||||
if (IS_ERR(gmu->trace.md))
|
||||
return PTR_ERR(gmu->trace.md);
|
||||
|
||||
/* Pass trace buffer address to GMU through the VRB */
|
||||
gmu_core_set_vrb_register(gmu->vrb->hostptr,
|
||||
VRB_TRACE_BUFFER_ADDR_IDX,
|
||||
gmu->trace.md->gmuaddr);
|
||||
|
||||
/* Initialize the GMU trace buffer header */
|
||||
gmu_core_trace_header_init(&gmu->trace);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen7_hwsched_gmu_init(struct adreno_device *adreno_dev)
|
||||
@ -1863,6 +1882,15 @@ int gen7_hwsched_add_to_minidump(struct adreno_device *adreno_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(gen7_dev->gmu.trace.md)) {
|
||||
ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev,
|
||||
KGSL_GMU_TRACE_ENTRY,
|
||||
gen7_dev->gmu.trace.md->hostptr,
|
||||
gen7_dev->gmu.trace.md->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Dump HFI hwsched global mem alloc entries */
|
||||
for (i = 0; i < hw_hfi->mem_alloc_entries; i++) {
|
||||
struct hfi_mem_alloc_entry *entry = &hw_hfi->mem_alloc_table[i];
|
||||
|
@ -2636,17 +2636,24 @@ static int hfi_f2h_main(void *arg)
|
||||
{
|
||||
struct adreno_device *adreno_dev = arg;
|
||||
struct gen7_hwsched_hfi *hfi = to_gen7_hwsched_hfi(adreno_dev);
|
||||
struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_event_interruptible(hfi->f2h_wq, kthread_should_stop() ||
|
||||
(!(is_queue_empty(adreno_dev, HFI_MSG_ID) &&
|
||||
is_queue_empty(adreno_dev, HFI_DBG_ID)) &&
|
||||
(hfi->irq_mask & HFI_IRQ_MSGQ_MASK)));
|
||||
/* If msgq irq is enabled and msgq has messages to process */
|
||||
(((hfi->irq_mask & HFI_IRQ_MSGQ_MASK) &&
|
||||
!is_queue_empty(adreno_dev, HFI_MSG_ID)) ||
|
||||
/* Trace buffer has messages to process */
|
||||
!gmu_core_is_trace_empty(gmu->trace.md->hostptr) ||
|
||||
/* Dbgq has messages to process */
|
||||
!is_queue_empty(adreno_dev, HFI_DBG_ID)));
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
gen7_hwsched_process_msgq(adreno_dev);
|
||||
gmu_core_process_trace_data(KGSL_DEVICE(adreno_dev),
|
||||
&gmu->pdev->dev, &gmu->trace);
|
||||
gen7_hwsched_process_dbgq(adreno_dev, true);
|
||||
}
|
||||
|
||||
|
16
adreno_hfi.h
16
adreno_hfi.h
@ -488,6 +488,7 @@ enum hfi_msg_type {
|
||||
H2F_MSG_ISSUE_SYNCOBJ = 152,
|
||||
F2H_MSG_SYNCOBJ_QUERY = 153,
|
||||
H2F_MSG_WARMBOOT_CMD = 154,
|
||||
F2H_MSG_PROCESS_TRACE = 155,
|
||||
HFI_MAX_ID,
|
||||
};
|
||||
|
||||
@ -761,6 +762,21 @@ struct hfi_debug_cmd {
|
||||
u32 data;
|
||||
} __packed;
|
||||
|
||||
/* F2H */
|
||||
struct hfi_trace_cmd {
|
||||
u32 hdr;
|
||||
u32 version;
|
||||
u64 identifier;
|
||||
} __packed;
|
||||
|
||||
/* Trace packet definition */
|
||||
struct gmu_trace_packet {
|
||||
u32 hdr;
|
||||
u32 trace_id;
|
||||
u64 ticks;
|
||||
u32 payload[];
|
||||
} __packed;
|
||||
|
||||
/* F2H */
|
||||
struct hfi_gmu_cntr_register_cmd {
|
||||
u32 hdr;
|
||||
|
135
kgsl_gmu_core.c
135
kgsl_gmu_core.c
@ -6,9 +6,11 @@
|
||||
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#include "adreno.h"
|
||||
#include "adreno_trace.h"
|
||||
#include "kgsl_device.h"
|
||||
#include "kgsl_gmu_core.h"
|
||||
#include "kgsl_trace.h"
|
||||
@ -225,3 +227,136 @@ void gmu_core_set_vrb_register(void *ptr, u32 index, u32 val)
|
||||
/* Make sure the vrb write is posted before moving ahead */
|
||||
wmb();
|
||||
}
|
||||
|
||||
static void stream_trace_data(struct gmu_trace_packet *pkt)
|
||||
{
|
||||
switch (pkt->trace_id) {
|
||||
case GMU_TRACE_PREEMPT_TRIGGER: {
|
||||
struct trace_preempt_trigger *data =
|
||||
(struct trace_preempt_trigger *)pkt->payload;
|
||||
|
||||
trace_adreno_preempt_trigger(data->cur_rb, data->next_rb,
|
||||
data->ctx_switch_cntl, pkt->ticks);
|
||||
break;
|
||||
}
|
||||
case GMU_TRACE_PREEMPT_DONE: {
|
||||
struct trace_preempt_done *data =
|
||||
(struct trace_preempt_done *)pkt->payload;
|
||||
|
||||
trace_adreno_preempt_done(data->prev_rb, data->next_rb,
|
||||
data->ctx_switch_cntl, pkt->ticks);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
char str[64];
|
||||
|
||||
snprintf(str, sizeof(str),
|
||||
"Unsupported GMU trace id %d\n", pkt->trace_id);
|
||||
trace_kgsl_msg(str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void gmu_core_process_trace_data(struct kgsl_device *device,
|
||||
struct device *dev, struct kgsl_gmu_trace *trace)
|
||||
{
|
||||
struct gmu_trace_header *trace_hdr = trace->md->hostptr;
|
||||
u32 size, *buffer = trace->md->hostptr;
|
||||
struct gmu_trace_packet *pkt;
|
||||
u16 seq_num, num_pkts = 0;
|
||||
u32 ridx = readl(&trace_hdr->read_index);
|
||||
u32 widx = readl(&trace_hdr->write_index);
|
||||
|
||||
if (ridx == widx)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Don't process any traces and force set read_index to write_index if
|
||||
* previously encountered invalid trace packet
|
||||
*/
|
||||
if (trace->reset_hdr) {
|
||||
/* update read index to let f2h daemon to go to sleep */
|
||||
writel(trace_hdr->write_index, &trace_hdr->read_index);
|
||||
return;
|
||||
}
|
||||
|
||||
/* start reading trace buffer data */
|
||||
pkt = (struct gmu_trace_packet *)&buffer[trace_hdr->payload_offset + ridx];
|
||||
|
||||
/* Validate packet header */
|
||||
if (TRACE_PKT_GET_VALID_FIELD(pkt->hdr) != TRACE_PKT_VALID) {
|
||||
char str[128];
|
||||
|
||||
snprintf(str, sizeof(str),
|
||||
"Invalid trace packet found at read index: %d resetting trace header\n",
|
||||
trace_hdr->read_index);
|
||||
/*
|
||||
* GMU is not expected to write an invalid trace packet. This
|
||||
* condition can be true in case there is memory corruption. In
|
||||
* such scenario fastforward readindex to writeindex so the we
|
||||
* don't process any trace packets until we reset the trace
|
||||
* header in next slumber exit.
|
||||
*/
|
||||
dev_err_ratelimited(device->dev, "%s\n", str);
|
||||
trace_kgsl_msg(str);
|
||||
writel(trace_hdr->write_index, &trace_hdr->read_index);
|
||||
trace->reset_hdr = true;
|
||||
return;
|
||||
}
|
||||
|
||||
size = TRACE_PKT_GET_SIZE(pkt->hdr);
|
||||
|
||||
if (TRACE_PKT_GET_SKIP_FIELD(pkt->hdr))
|
||||
goto done;
|
||||
|
||||
seq_num = TRACE_PKT_GET_SEQNUM(pkt->hdr);
|
||||
num_pkts = seq_num - trace->seq_num;
|
||||
|
||||
/* Detect trace packet loss by tracking any gaps in the sequence number */
|
||||
if (num_pkts > 1) {
|
||||
char str[128];
|
||||
|
||||
snprintf(str, sizeof(str),
|
||||
"%d GMU trace packets dropped from sequence number: %d\n",
|
||||
num_pkts - 1, trace->seq_num);
|
||||
trace_kgsl_msg(str);
|
||||
}
|
||||
|
||||
trace->seq_num = seq_num;
|
||||
stream_trace_data(pkt);
|
||||
done:
|
||||
ridx = (ridx + size) % trace_hdr->payload_size;
|
||||
writel(ridx, &trace_hdr->read_index);
|
||||
}
|
||||
|
||||
bool gmu_core_is_trace_empty(struct gmu_trace_header *hdr)
|
||||
{
|
||||
return (readl(&hdr->read_index) == readl(&hdr->write_index)) ? true : false;
|
||||
}
|
||||
|
||||
void gmu_core_trace_header_init(struct kgsl_gmu_trace *trace)
|
||||
{
|
||||
struct gmu_trace_header *hdr = trace->md->hostptr;
|
||||
|
||||
hdr->threshold = TRACE_BUFFER_THRESHOLD;
|
||||
hdr->timeout = TRACE_TIMEOUT_MSEC;
|
||||
hdr->metadata = FIELD_PREP(GENMASK(31, 30), TRACE_MODE_DROP) |
|
||||
FIELD_PREP(GENMASK(3, 0), TRACE_HEADER_VERSION_1);
|
||||
hdr->cookie = trace->md->gmuaddr;
|
||||
hdr->size = trace->md->size;
|
||||
hdr->log_type = TRACE_LOGTYPE_HWSCHED;
|
||||
}
|
||||
|
||||
void gmu_core_reset_trace_header(struct kgsl_gmu_trace *trace)
|
||||
{
|
||||
struct gmu_trace_header *hdr = trace->md->hostptr;
|
||||
|
||||
if (!trace->reset_hdr)
|
||||
return;
|
||||
|
||||
memset(hdr, 0, sizeof(struct gmu_trace_header));
|
||||
/* Reset sequence number to detect trace packet loss */
|
||||
trace->seq_num = 0;
|
||||
gmu_core_trace_header_init(trace);
|
||||
trace->reset_hdr = false;
|
||||
}
|
||||
|
145
kgsl_gmu_core.h
145
kgsl_gmu_core.h
@ -181,6 +181,122 @@ enum gmu_vrb_idx {
|
||||
VRB_TRACE_BUFFER_ADDR_IDX = 2,
|
||||
};
|
||||
|
||||
/* For GMU Trace */
|
||||
#define GMU_TRACE_SIZE SZ_16K
|
||||
|
||||
/* Trace header defines */
|
||||
/* Logtype to decode the trace pkt data */
|
||||
#define TRACE_LOGTYPE_HWSCHED 1
|
||||
/* Trace buffer threshold for GMU to send F2H message */
|
||||
#define TRACE_BUFFER_THRESHOLD 80
|
||||
/*
|
||||
* GMU Trace timer value to check trace packet consumption. GMU timer handler tracks the
|
||||
* readindex, If it's not moved since last timer fired, GMU will send the f2h message to
|
||||
* drain trace packets. GMU Trace Timer will be restarted if the readindex is moving.
|
||||
*/
|
||||
#define TRACE_TIMEOUT_MSEC 5
|
||||
|
||||
/* Trace metadata defines */
|
||||
/* Trace drop mode hint for GMU to drop trace packets when trace buffer is full */
|
||||
#define TRACE_MODE_DROP 1
|
||||
/* Trace buffer header version */
|
||||
#define TRACE_HEADER_VERSION_1 1
|
||||
|
||||
/* Trace packet defines */
|
||||
#define TRACE_PKT_VALID 1
|
||||
#define TRACE_PKT_SEQ_MASK GENMASK(15, 0)
|
||||
#define TRACE_PKT_SZ_MASK GENMASK(27, 16)
|
||||
#define TRACE_PKT_SZ_SHIFT 16
|
||||
#define TRACE_PKT_VALID_MASK GENMASK(31, 31)
|
||||
#define TRACE_PKT_SKIP_MASK GENMASK(30, 30)
|
||||
#define TRACE_PKT_VALID_SHIFT 31
|
||||
#define TRACE_PKT_SKIP_SHIFT 30
|
||||
|
||||
#define TRACE_PKT_GET_SEQNUM(hdr) ((hdr) & TRACE_PKT_SEQ_MASK)
|
||||
#define TRACE_PKT_GET_SIZE(hdr) (((hdr) & TRACE_PKT_SZ_MASK) >> TRACE_PKT_SZ_SHIFT)
|
||||
#define TRACE_PKT_GET_VALID_FIELD(hdr) (((hdr) & TRACE_PKT_VALID_MASK) >> TRACE_PKT_VALID_SHIFT)
|
||||
#define TRACE_PKT_GET_SKIP_FIELD(hdr) (((hdr) & TRACE_PKT_SKIP_MASK) >> TRACE_PKT_SKIP_SHIFT)
|
||||
|
||||
/*
|
||||
* Trace buffer header definition
|
||||
* Trace buffer header fields initialized/updated by KGSL and GMU
|
||||
* GMU input: Following header fields are initialized by KGSL
|
||||
* - @metadata, @threshold, @size, @cookie, @timeout, @log_type
|
||||
* - @readIndex updated by kgsl when traces messages are consumed.
|
||||
* GMU output: Following header fields are initialized by GMU only
|
||||
* - @magic, @payload_offset, @payload_size
|
||||
* - @write_index updated by GMU upon filling the trace messages
|
||||
*/
|
||||
struct gmu_trace_header {
|
||||
/** @magic: Initialized by GMU to check header is valid or not */
|
||||
u32 magic;
|
||||
/**
|
||||
* @metadata: Trace buffer metadata.Bit(31) Trace Mode to log tracepoints
|
||||
* messages, Bits [3:0] Version for header format changes.
|
||||
*/
|
||||
u32 metadata;
|
||||
/**
|
||||
* @threshold: % at which GMU to send f2h message to wakeup KMD to consume
|
||||
* tracepoints data. Set it to zero to disable thresholding. Threshold is %
|
||||
* of buffer full condition not the trace packet count. If GMU is continuously
|
||||
* writing to trace buffer makes it buffer full condition when KMD is not
|
||||
* consuming it. So GMU check the how much trace buffer % space is full based
|
||||
* on the threshold % value.If the trace packets are filling over % buffer full
|
||||
* condition GMU will send the f2h message for KMD to drain the trace messages.
|
||||
*/
|
||||
u32 threshold;
|
||||
/** @size: trace buffer allocation size in bytes */
|
||||
u32 size;
|
||||
/** @read_index: trace buffer read index in dwords */
|
||||
u32 read_index;
|
||||
/** @write_index: trace buffer write index in dwords */
|
||||
u32 write_index;
|
||||
/** @payload_offset: trace buffer payload dword offset */
|
||||
u32 payload_offset;
|
||||
/** @payload_size: trace buffer payload size in dword */
|
||||
u32 payload_size;
|
||||
/** cookie: cookie data sent through F2H_PROCESS_MESSAGE */
|
||||
u64 cookie;
|
||||
/**
|
||||
* timeout: GMU Trace Timer value in msec - zero to disable trace timer else
|
||||
* value for GMU trace timerhandler to send HFI msg.
|
||||
*/
|
||||
u32 timeout;
|
||||
/** @log_type: To decode the trace buffer data */
|
||||
u32 log_type;
|
||||
} __packed;
|
||||
|
||||
/* Trace ID definition */
|
||||
enum gmu_trace_id {
|
||||
GMU_TRACE_PREEMPT_TRIGGER = 1,
|
||||
GMU_TRACE_PREEMPT_DONE = 2,
|
||||
GMU_TRACE_MAX,
|
||||
};
|
||||
|
||||
struct trace_preempt_trigger {
|
||||
u32 cur_rb;
|
||||
u32 next_rb;
|
||||
u32 ctx_switch_cntl;
|
||||
} __packed;
|
||||
|
||||
struct trace_preempt_done {
|
||||
u32 prev_rb;
|
||||
u32 next_rb;
|
||||
u32 ctx_switch_cntl;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct kgsl_gmu_trace - wrapper for gmu trace memory object
|
||||
*/
|
||||
struct kgsl_gmu_trace {
|
||||
/** @md: gmu trace memory descriptor */
|
||||
struct kgsl_memdesc *md;
|
||||
/* @seq_num: GMU trace packet sequence number to detect drop packet count */
|
||||
u16 seq_num;
|
||||
/* @reset_hdr: To reset trace buffer header incase of invalid packet */
|
||||
bool reset_hdr;
|
||||
};
|
||||
|
||||
/* GMU memdesc entries */
|
||||
#define GMU_KERNEL_ENTRIES 16
|
||||
|
||||
@ -378,4 +494,33 @@ void gmu_core_dev_force_first_boot(struct kgsl_device *device);
|
||||
*/
|
||||
void gmu_core_set_vrb_register(void *ptr, u32 index, u32 val);
|
||||
|
||||
/**
|
||||
* gmu_core_process_trace_data - Process gmu trace buffer data writes to default linux trace buffer
|
||||
* @device: Pointer to KGSL device
|
||||
* @dev: GMU device instance
|
||||
* @trace: GMU trace memory pointer
|
||||
*/
|
||||
void gmu_core_process_trace_data(struct kgsl_device *device,
|
||||
struct device *dev, struct kgsl_gmu_trace *trace);
|
||||
|
||||
/**
|
||||
* gmu_core_is_trace_empty - Check for trace buffer empty/full status
|
||||
* @hdr: Pointer to gmu trace header
|
||||
*
|
||||
* Return: true if readidex equl to writeindex else false
|
||||
*/
|
||||
bool gmu_core_is_trace_empty(struct gmu_trace_header *hdr);
|
||||
|
||||
/**
|
||||
* gmu_core_trace_header_init - Initialize the GMU trace buffer header
|
||||
* @trace: Pointer to kgsl gmu trace
|
||||
*/
|
||||
void gmu_core_trace_header_init(struct kgsl_gmu_trace *trace);
|
||||
|
||||
/**
|
||||
* gmu_core_reset_trace_header - Reset GMU trace buffer header
|
||||
* @trace: Pointer to kgsl gmu trace
|
||||
*/
|
||||
void gmu_core_reset_trace_header(struct kgsl_gmu_trace *trace);
|
||||
|
||||
#endif /* __KGSL_GMU_CORE_H */
|
||||
|
@ -217,6 +217,7 @@ struct kgsl_snapshot_ib_v2 {
|
||||
#define SNAPSHOT_GMU_MEM_HW_FENCE 0x07
|
||||
#define SNAPSHOT_GMU_MEM_WARMBOOT 0x08
|
||||
#define SNAPSHOT_GMU_MEM_VRB 0x09
|
||||
#define SNAPSHOT_GMU_MEM_TRACE 0x0a
|
||||
|
||||
/* GMU memory section data */
|
||||
struct kgsl_snapshot_gmu_mem {
|
||||
|
@ -18,6 +18,7 @@
|
||||
#define KGSL_SCRATCH_ENTRY "kgsl_scratch"
|
||||
#define KGSL_MEMSTORE_ENTRY "kgsl_memstore"
|
||||
#define KGSL_GMU_LOG_ENTRY "kgsl_gmu_log"
|
||||
#define KGSL_GMU_TRACE_ENTRY "kgsl_gmu_trace"
|
||||
#define KGSL_HFIMEM_ENTRY "kgsl_hfi_mem"
|
||||
#define KGSL_GMU_DUMPMEM_ENTRY "kgsl_gmu_dump_mem"
|
||||
#define KGSL_GMU_RB_ENTRY "kgsl_gmu_rb"
|
||||
|
Loading…
Reference in New Issue
Block a user