Merge 8bdffe141e on remote branch

Change-Id: I692f9f775261a7dd0250e4f5ef41706ced7be3ab
This commit is contained in:
Linux Build Service Account 2024-06-19 03:24:49 -07:00
commit b6b35a89f6
14 changed files with 258 additions and 142 deletions

View File

@ -99,6 +99,14 @@ config DOMAIN_ID_SECURE_CAMERA
domain ID based security architecture.
VC based security can be achieved with this.
config CSF_2_5_SECURE_CAMERA
bool "enable CSF2.5 feature flow"
help
This is to enable Call flow for CSF2.5
enabled platforms. this config differentiates
between csf2.0 and csf 2.5 compliant
scm calls.
config DYNAMIC_FD_PORT_CONFIG
bool "enable dynamic FD port config feature"
help

View File

@ -9,6 +9,7 @@ CONFIG_SPECTRA_LLCC_STALING := y
CONFIG_SPECTRA_USE_RPMH_DRV_API := y
CONFIG_SPECTRA_USE_CLK_CRM_API := y
CONFIG_DOMAIN_ID_SECURE_CAMERA := y
CONFIG_CSF_2_5_SECURE_CAMERA := y
# Flags to pass into C preprocessor
ccflags-y += -DCONFIG_SPECTRA_ISP=1
@ -19,6 +20,7 @@ ccflags-y += -DCONFIG_SPECTRA_LLCC_STALING=1
ccflags-y += -DCONFIG_SPECTRA_USE_RPMH_DRV_API=1
ccflags-y += -DCONFIG_SPECTRA_USE_CLK_CRM_API=1
ccflags-y += -DCONFIG_DOMAIN_ID_SECURE_CAMERA=1
ccflags-y += -DCONFIG_CSF_2_5_SECURE_CAMERA=1
# External Dependencies
KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1

View File

@ -11,6 +11,7 @@ CONFIG_SPECTRA_USE_RPMH_DRV_API := y
CONFIG_SPECTRA_USE_CLK_CRM_API := y
CONFIG_DOMAIN_ID_SECURE_CAMERA := y
CONFIG_DYNAMIC_FD_PORT_CONFIG := y
CONFIG_CSF_2_5_SECURE_CAMERA := y
# Flags to pass into C preprocessor
ccflags-y += -DCONFIG_SPECTRA_ISP=1
@ -23,6 +24,7 @@ ccflags-y += -DCONFIG_SPECTRA_USE_RPMH_DRV_API=1
ccflags-y += -DCONFIG_SPECTRA_USE_CLK_CRM_API=1
ccflags-y += -DCONFIG_DOMAIN_ID_SECURE_CAMERA=1
ccflags-y += -DCONFIG_DYNAMIC_FD_PORT_CONFIG=1
ccflags-y += -DCONFIG_CSF_2_5_SECURE_CAMERA=1
# External Dependencies
KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1

View File

@ -6,6 +6,7 @@ CONFIG_SPECTRA_ICP := y
CONFIG_SPECTRA_TFE := y
CONFIG_SPECTRA_CRE := y
CONFIG_SPECTRA_SENSOR := y
CONFIG_CSF_2_5_SECURE_CAMERA := y
# Flags to pass into C preprocessor
ccflags-y += -DCONFIG_SPECTRA_ISP=1
@ -13,3 +14,4 @@ ccflags-y += -DCONFIG_SPECTRA_ICP=1
ccflags-y += -DCONFIG_SPECTRA_TFE=1
ccflags-y += -DCONFIG_SPECTRA_CRE=1
ccflags-y += -DCONFIG_SPECTRA_SENSOR=1
ccflags-y += -DCONFIG_CSF_2_5_SECURE_CAMERA=1

View File

@ -2442,10 +2442,6 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
if (is_shdr_en && !is_shdr_master)
tfe_ctx->is_shdr_slave = true;
CAM_INFO(CAM_ISP, "ctx %d TFE index %d is_dual=%d is_shdr=%d shdr_master=%d",
tfe_ctx->ctx_index, tfe_ctx->base[0].idx, tfe_ctx->is_dual,
is_shdr_en, is_shdr_master);
for (i = 0; i < acquire_hw_info->num_inputs; i++) {
cam_tfe_hw_mgr_preprocess_port(tfe_ctx, &in_port[i], &num_pix_port_per_in,
&num_rdi_port_per_in, &num_pd_port_per_in, &pdaf_enable, &lcr_enable);
@ -3505,6 +3501,8 @@ static int cam_tfe_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
cam_tfe_mgr_csid_change_halt_mode(ctx,
CAM_TFE_CSID_HALT_MODE_INTERNAL);
CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
/* Stop the master CSID path first */
cam_tfe_mgr_csid_stop_hw(ctx, &ctx->res_list_tfe_csid,
master_base_idx, csid_halt_type);

View File

@ -997,8 +997,6 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
path_data->bayer_bin = reserve->in_port->bayer_bin;
path_data->qcfa_bin = reserve->in_port->qcfa_bin;
path_data->crop_enable = reserve->crop_enable;
path_data->is_shdr_master = reserve->in_port->is_shdr_master;
path_data->is_shdr = reserve->in_port->shdr_en;
csid_hw->event_cb = reserve->event_cb;
csid_hw->event_cb_priv = reserve->event_cb_prv;
@ -1067,6 +1065,38 @@ end:
return rc;
}
static void cam_tfe_csid_send_secure_info(
struct cam_tfe_csid_hw *csid_hw)
{
struct cam_tfe_csid_secure_info secure_info;
const struct cam_tfe_csid_reg_offset *csid_reg;
int phy_sel = 0;
csid_reg = csid_hw->csid_info->csid_reg;
secure_info.lane_cfg = csid_hw->csi2_rx_cfg.lane_cfg;
secure_info.cdm_hw_idx_mask = 0;
secure_info.vc_mask = 0;
secure_info.csid_hw_idx_mask = BIT(csid_hw->hw_intf->hw_idx);
CAM_DBG(CAM_ISP,
"PHY secure info for CSID[%u], lane_cfg: 0x%x, tfe: 0x%x, cdm: 0x%x, vc_mask: 0x%llx",
csid_hw->hw_intf->hw_idx, secure_info.lane_cfg, secure_info.csid_hw_idx_mask,
secure_info.cdm_hw_idx_mask, secure_info.vc_mask);
phy_sel = (int)(csid_hw->csi2_rx_cfg.phy_sel - csid_reg->csi2_reg->phy_sel_base);
if (phy_sel < 0) {
CAM_WARN(CAM_ISP, "Can't notify csiphy, incorrect phy selected=%d",
phy_sel);
} else {
secure_info.phy_sel = (uint32_t)phy_sel;
CAM_DBG(CAM_ISP, "Notify CSIPHY: %d", phy_sel);
cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
CAM_SUBDEV_MESSAGE_DOMAIN_ID_SECURE_PARAMS, (void *)&secure_info);
}
}
static int cam_tfe_csid_enable_csi2(
struct cam_tfe_csid_hw *csid_hw)
{
@ -1248,8 +1278,8 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
return rc;
}
CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
csid_hw->hw_intf->hw_idx);
CAM_DBG(CAM_ISP, "CSID:%d init CSID HW is_secure: %d",
csid_hw->hw_intf->hw_idx, csid_hw->is_secure);
rc = cam_soc_util_get_clk_level(soc_info, csid_hw->clk_rate,
soc_info->src_clk_idx, &clk_lvl);
@ -1331,6 +1361,9 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
path_data->res_sof_cnt = 0;
}
if (csid_hw->is_secure)
cam_tfe_csid_send_secure_info(csid_hw);
return rc;
@ -1669,31 +1702,33 @@ static int cam_tfe_csid_enable_pxl_path(
CAM_DBG(CAM_ISP, "Enable IPP path");
if ((path_data->is_shdr && path_data->is_shdr_master) ||
(path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
/* Set halt mode for master */
val = (TFE_CSID_HALT_MODE_MASTER << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
(path_data->is_shdr))
/* Set halt mode for slave */
val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_EXTERNAL << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
/* Set master or slave path */
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
/* Set halt mode as master */
val = (TFE_CSID_HALT_MODE_MASTER <<
pxl_reg->halt_mode_shift) |
(pxl_reg->halt_master_sel_master_val <<
pxl_reg->halt_master_sel_shift);
else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
/* Set halt mode as slave and set master idx */
val = (TFE_CSID_HALT_MODE_SLAVE << pxl_reg->halt_mode_shift);
else
/* Set halt mode for default */
val = (TFE_CSID_HALT_MODE_INTERNAL << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
/* Default is internal halt mode */
val = 1 << pxl_reg->halt_master_sel_shift;
/*
* Resume at frame boundary if Master or No Sync.
* Slave will get resume command from Master.
*/
if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
CAM_DBG(CAM_ISP, "CSID:%d sync_mode=%d IPP_Ctrl:0x%x is_shdr=%d shdr_master=%d",
csid_hw->hw_intf->hw_idx, path_data->sync_mode,
cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr),
path_data->is_shdr, path_data->is_shdr_master);
CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
csid_hw->hw_intf->hw_idx, val);
/* Enable the required pxl path interrupts */
val = TFE_CSID_PATH_INFO_RST_DONE |
@ -1769,8 +1804,7 @@ static int cam_tfe_csid_change_pxl_halt_mode(
static int cam_tfe_csid_disable_pxl_path(
struct cam_tfe_csid_hw *csid_hw,
struct cam_isp_resource_node *res,
enum cam_tfe_csid_halt_cmd stop_cmd,
bool csid_with_ppp_en)
enum cam_tfe_csid_halt_cmd stop_cmd)
{
int rc = 0;
uint32_t val = 0;
@ -1824,46 +1858,32 @@ static int cam_tfe_csid_disable_pxl_path(
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_irq_mask_addr);
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
/* configure Halt */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
val &= ~0x3;
val |= stop_cmd;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
}
if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
/* configure Halt for slave */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
val &= ~0xF;
val |= stop_cmd;
val |= (TFE_CSID_HALT_MODE_MASTER << 2);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
}
path_data->init_frame_drop = 0;
path_data->res_sof_cnt = 0;
if (((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) ||
(path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)) && !csid_with_ppp_en)
/* Set halt mode for dual master/slave without pdaf */
val = (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_EXTERNAL << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_HALT_IMMEDIATELY << pxl_reg->halt_cmd_shift);
else if ((path_data->is_shdr && path_data->is_shdr_master) ||
(path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
/* Set halt mode for shdr/dual master */
val = (TFE_CSID_HALT_MODE_MASTER << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
else if ((path_data->is_shdr && !path_data->is_shdr_master) ||
(path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)) {
/* Set halt mode for shdr/dual slave */
CAM_DBG(CAM_ISP, "skip programming halt mode for slave in %s case",
(path_data->is_shdr) ? "SHDR" : "Dual TFE");
goto end;
} else if (csid_with_ppp_en)
/* Set halt mode for single tfe + pdaf */
val = (TFE_CSID_HALT_MODE_INTERNAL << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_NONE << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
else
/* Set halt mode for default */
val = (TFE_CSID_HALT_MODE_GLOBAL << pxl_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_EXTERNAL << pxl_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_HALT_AT_FRAME_BOUNDARY << pxl_reg->halt_cmd_shift);
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
pxl_reg->csid_pxl_ctrl_addr);
CAM_DBG(CAM_ISP, "halt CSID:%d sync_mode:%d res_id:%d IPP path pxl_ctrl=0x%x",
csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_ctrl_addr));
end:
return rc;
}
@ -1898,29 +1918,34 @@ static int cam_tfe_csid_enable_ppp_path(
CAM_DBG(CAM_ISP, "CSID:%d Enable PPP path", csid_hw->hw_intf->hw_idx);
if ((path_data->is_shdr && path_data->is_shdr_master) ||
(path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
/* Set halt mode for master shdr/dual */
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
else if ((path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE) ||
(path_data->is_shdr))
/* Set halt mode for slave shdr/dual*/
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_INTERNAL2 << ppp_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
/* Set master or slave path */
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
/* Set halt mode as master */
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(ppp_reg->halt_master_sel_master_val <<
ppp_reg->halt_master_sel_shift);
else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
/* Set halt mode as slave and set master idx */
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(ppp_reg->halt_master_sel_slave_val <<
ppp_reg->halt_master_sel_shift);
else
/* Set halt mode for default */
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(TFE_CSID_HALT_CMD_SOURCE_INTERNAL1 << ppp_reg->halt_master_sel_shift) |
(CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY << ppp_reg->halt_cmd_shift);
/* Default is internal halt mode */
val = (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift) |
(ppp_reg->halt_master_sel_master_val <<
ppp_reg->halt_master_sel_shift);
/*
* Resume at frame boundary if Master or No Sync.
* Slave will get resume command from Master.
*/
if ((path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) && !path_data->init_frame_drop)
val |= CAM_TFE_CSID_RESUME_AT_FRAME_BOUNDARY;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr);
CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d PPP Ctrl val: 0x%x",
csid_hw->hw_intf->hw_idx, path_data->sync_mode,
cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
CAM_DBG(CAM_ISP, "CSID:%d PPP Ctrl val: 0x%x", csid_hw->hw_intf->hw_idx, val);
/* Enable the required ppp path interrupts */
val = TFE_CSID_PATH_INFO_RST_DONE | TFE_CSID_PATH_ERROR_FIFO_OVERFLOW |
@ -1949,6 +1974,7 @@ static int cam_tfe_csid_disable_ppp_path(
enum cam_tfe_csid_halt_cmd stop_cmd)
{
int rc = 0;
uint32_t val = 0;
const struct cam_tfe_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
struct cam_tfe_csid_path_cfg *path_data;
@ -1998,12 +2024,33 @@ static int cam_tfe_csid_disable_ppp_path(
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
ppp_reg->csid_pxl_irq_mask_addr);
CAM_DBG(CAM_ISP, "CSID:%d res_id:%d Skip prgramming halt mode for PPP path",
csid_hw->hw_intf->hw_idx, res->res_id);
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
/* configure Halt */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
ppp_reg->csid_pxl_ctrl_addr);
val &= ~0x3F;
val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
val |= (ppp_reg->halt_master_sel_master_val <<
ppp_reg->halt_master_sel_shift);
val |= stop_cmd;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
ppp_reg->csid_pxl_ctrl_addr);
}
CAM_DBG(CAM_ISP, "CSID:%d sync_mode:%d res_id:%d PPP path halt_ctrl_reg=0x%x",
csid_hw->hw_intf->hw_idx, path_data->sync_mode, res->res_id,
cam_io_r_mb(soc_info->reg_map[0].mem_base + ppp_reg->csid_pxl_ctrl_addr));
if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
stop_cmd == CAM_TFE_CSID_HALT_IMMEDIATELY) {
/* configure Halt for slave */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
ppp_reg->csid_pxl_ctrl_addr);
val &= ~0x3F;
val |= (TFE_CSID_HALT_MODE_SLAVE << ppp_reg->halt_mode_shift);
val |= (ppp_reg->halt_master_sel_slave_val <<
ppp_reg->halt_master_sel_shift);
val |= stop_cmd;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
ppp_reg->csid_pxl_ctrl_addr);
}
path_data->init_frame_drop = 0;
path_data->res_sof_cnt = 0;
@ -2307,9 +2354,6 @@ static int cam_tfe_csid_poll_stop_status(
uint32_t csid_status_addr = 0, val = 0, res_id = 0;
const struct cam_tfe_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
uint32_t csid_ctrl_reg = 0;
uint32_t csid_cfg0_reg = 0;
uint32_t csid_cfg1_reg = 0;
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
@ -2323,10 +2367,6 @@ static int cam_tfe_csid_poll_stop_status(
csid_status_addr =
csid_reg->ipp_reg->csid_pxl_status_addr;
csid_ctrl_reg = csid_reg->ipp_reg->csid_pxl_ctrl_addr;
csid_cfg0_reg = csid_reg->ipp_reg->csid_pxl_cfg0_addr;
csid_cfg1_reg = csid_reg->ipp_reg->csid_pxl_cfg1_addr;
if (csid_hw->ipp_res.res_state !=
CAM_ISP_RESOURCE_STATE_STREAMING)
continue;
@ -2334,9 +2374,6 @@ static int cam_tfe_csid_poll_stop_status(
} else if (res_id == CAM_TFE_CSID_PATH_RES_PPP) {
csid_status_addr =
csid_reg->ppp_reg->csid_pxl_status_addr;
csid_ctrl_reg = csid_reg->ppp_reg->csid_pxl_ctrl_addr;
csid_cfg0_reg = csid_reg->ppp_reg->csid_pxl_cfg0_addr;
csid_cfg1_reg = csid_reg->ppp_reg->csid_pxl_cfg1_addr;
if (csid_hw->ppp_res.res_state !=
CAM_ISP_RESOURCE_STATE_STREAMING)
@ -2345,9 +2382,6 @@ static int cam_tfe_csid_poll_stop_status(
} else {
csid_status_addr =
csid_reg->rdi_reg[res_id]->csid_rdi_status_addr;
csid_ctrl_reg = csid_reg->rdi_reg[res_id]->csid_rdi_ctrl_addr;
csid_cfg0_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg0_addr;
csid_cfg1_reg = csid_reg->rdi_reg[res_id]->csid_rdi_cfg1_addr;
if (csid_hw->rdi_res[res_id].res_state !=
CAM_ISP_RESOURCE_STATE_STREAMING)
@ -2368,13 +2402,6 @@ static int cam_tfe_csid_poll_stop_status(
if (rc < 0) {
CAM_ERR(CAM_ISP, "CSID:%d res:%d halt failed rc %d",
csid_hw->hw_intf->hw_idx, res_id, rc);
CAM_ERR(CAM_ISP, "CSID:%d status:0x%x ctrl_reg:0x%x cfg0:0x%x cfg1:0x%x",
csid_hw->hw_intf->hw_idx,
cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_status_addr),
cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_ctrl_reg),
cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg0_reg),
cam_io_r_mb(soc_info->reg_map[0].mem_base + csid_cfg1_reg));
rc = -ETIMEDOUT;
break;
}
@ -2699,8 +2726,10 @@ static int cam_tfe_csid_reserve(void *hw_priv,
return -EINVAL;
}
CAM_DBG(CAM_ISP, "res_type %d, CSID: %u",
reserv->res_type, csid_hw->hw_intf->hw_idx);
csid_hw->is_secure = reserv->out_port->secure_mode;
CAM_DBG(CAM_ISP, "res_type %d, CSID: %u is_secure: %d",
reserv->res_type, csid_hw->hw_intf->hw_idx, csid_hw->is_secure);
mutex_lock(&csid_hw->hw_info->hw_mutex);
rc = cam_tfe_csid_path_reserve(csid_hw, reserv);
@ -2746,6 +2775,7 @@ static int cam_tfe_csid_release(void *hw_priv,
csid_hw->event_cb = NULL;
csid_hw->event_cb_priv = NULL;
csid_hw->is_secure = false;
if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) ||
(res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) {
@ -3065,7 +3095,6 @@ static int cam_tfe_csid_stop(void *hw_priv,
struct cam_tfe_csid_hw_stop_args *csid_stop;
uint32_t i;
uint32_t res_mask = 0;
bool csid_with_ppp_en = false;
if (!hw_priv || !stop_args ||
(arg_size != sizeof(struct cam_tfe_csid_hw_stop_args))) {
@ -3081,13 +3110,9 @@ static int cam_tfe_csid_stop(void *hw_priv,
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_tfe_csid_hw *)csid_hw_info->core_info;
if (csid_hw->ppp_res.res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
csid_with_ppp_en = true;
CAM_DBG(CAM_ISP, "CSID:%d num_res %d csid_with_ppp_en:%d",
csid_hw->hw_intf->hw_idx, csid_stop->num_res,
csid_with_ppp_en);
CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
csid_hw->hw_intf->hw_idx,
csid_stop->num_res);
/* Stop the resource first */
for (i = 0; i < csid_stop->num_res; i++) {
@ -3100,7 +3125,7 @@ static int cam_tfe_csid_stop(void *hw_priv,
res_mask |= (1 << res->res_id);
if (res->res_id == CAM_TFE_CSID_PATH_RES_IPP)
rc = cam_tfe_csid_disable_pxl_path(csid_hw,
res, csid_stop->stop_cmd, csid_with_ppp_en);
res, csid_stop->stop_cmd);
else if (res->res_id == CAM_TFE_CSID_PATH_RES_PPP)
rc = cam_tfe_csid_disable_ppp_path(csid_hw,
res, csid_stop->stop_cmd);

View File

@ -160,7 +160,6 @@ struct cam_tfe_csid_pxl_reg_offset {
uint32_t early_eof_en_shift_val;
uint32_t halt_master_sel_shift;
uint32_t halt_mode_shift;
uint32_t halt_cmd_shift;
uint32_t halt_master_sel_master_val;
uint32_t halt_master_sel_slave_val;
uint32_t binning_supported;
@ -332,6 +331,33 @@ struct cam_tfe_csid_reg_offset {
const struct cam_tfe_csid_rdi_reg_offset *rdi_reg[CAM_TFE_CSID_RDI_MAX];
};
/**
* struct cam_tfe_csid_secure_info: Contains all relevant info to be
* programmed for targets supporting
* this feature
* @phy_sel: Intermediate value for this mask. CSID passes
* phy_sel.This variable's position at the top is to
* be left unchanged, to have it be used correctly
* in the cam_subdev_notify_message callback for
* csiphy
* @lane_cfg: This value is similar to lane_assign in the PHY
* driver, and is used to identify the particular
* PHY instance with which this IFE session is
* connected to.
* @vc_mask: Virtual channel masks (Unused for mobile usecase)
* @csid_hw_idx_mask: Bit position denoting CSID(s) in use for secure
* session
* @cdm_hw_idx_mask: Bit position denoting CDM in use for secure
* session
*/
struct cam_tfe_csid_secure_info {
uint32_t phy_sel;
uint32_t lane_cfg;
uint64_t vc_mask;
uint32_t csid_hw_idx_mask;
uint32_t cdm_hw_idx_mask;
};
/**
* struct cam_tfe_csid_hw_info- CSID HW info
*
@ -425,8 +451,7 @@ struct cam_tfe_csid_cid_data {
* one more frame than pix.
* @res_sof_cnt path resource sof count value. it used for initial
* frame drop
* @is_shdr_master flag to indicate path to be shdr master
* @is_shdr flag to indicate if shdr mode is enabled
*
*/
struct cam_tfe_csid_path_cfg {
struct vc_dt_data vc_dt[CAM_ISP_TFE_VC_DT_CFG];
@ -454,8 +479,6 @@ struct cam_tfe_csid_path_cfg {
uint32_t usage_type;
uint32_t init_frame_drop;
uint32_t res_sof_cnt;
bool is_shdr_master;
bool is_shdr;
};
/**
@ -515,6 +538,7 @@ struct cam_csid_evt_payload {
* @prev_boot_timestamp previous frame bootime stamp
* @prev_qtimer_ts previous frame qtimer csid timestamp
* @sync_clk sync clocks such that freq(TFE)>freq(CSID)>freq(CSIPHY)
* @is_secure Flag to denote secure operation
*
*/
struct cam_tfe_csid_hw {
@ -552,6 +576,7 @@ struct cam_tfe_csid_hw {
uint64_t prev_boot_timestamp;
uint64_t prev_qtimer_ts;
bool sync_clk;
bool is_secure;
};
int cam_tfe_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,

View File

@ -2494,8 +2494,8 @@ static int cam_tfe_camif_resource_start(
cam_io_w_mb(val, rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0);
CAM_DBG(CAM_ISP, "TFE:%d core_cfg_0 val:0x%x", core_info->core_index,
cam_io_r_mb(rsrc_data->mem_base + rsrc_data->common_reg->core_cfg_0));
CAM_DBG(CAM_ISP, "TFE:%d core_cfg 0 val:0x%x", core_info->core_index,
val);
if (cam_cpas_get_cpas_hw_version(&camera_hw_version))
CAM_ERR(CAM_ISP, "Failed to get HW version");

View File

@ -594,6 +594,7 @@ static void cam_csiphy_program_common_registers(
}
}
#ifndef CONFIG_CSF_2_5_SECURE_CAMERA
static int cam_csiphy_update_secure_info(struct csiphy_device *csiphy_dev, int32_t index)
{
uint64_t lane_assign_bitmask = 0;
@ -664,6 +665,7 @@ static int cam_csiphy_update_secure_info(struct csiphy_device *csiphy_dev, int32
return 0;
}
#endif
static int cam_csiphy_get_lane_enable(
struct csiphy_device *csiphy, int index,
@ -913,7 +915,7 @@ static int __cam_csiphy_parse_lane_info_cmd_buf(
csiphy_dev->csiphy_info[index].lane_enable |= lane_enable;
lane_assign >>= 4;
}
#ifndef CONFIG_CSF_2_5_SECURE_CAMERA
if (csiphy_dev->csiphy_info[index].secure_mode == 1) {
rc = cam_csiphy_update_secure_info(csiphy_dev, index);
if (rc) {
@ -922,7 +924,7 @@ static int __cam_csiphy_parse_lane_info_cmd_buf(
goto reset_settings;
}
}
#endif
CAM_DBG(CAM_CSIPHY,
"phy version:%d, phy_idx: %d, preamble_en: %u",
csiphy_dev->hw_version,

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "cam_csiphy_dev.h"
@ -11,8 +11,10 @@
#include <media/cam_sensor.h>
#include "camera_main.h"
#include <dt-bindings/msm-camera.h>
#include "cam_cpas_api.h"
#define CSIPHY_DEBUGFS_NAME_MAX_SIZE 10
#define CAM_MAX_PHYS_PER_CP_CTRL_REG 4
static struct dentry *root_dentry;
static inline void cam_csiphy_trigger_reg_dump(struct csiphy_device *csiphy_dev)
@ -33,7 +35,21 @@ static int cam_csiphy_format_secure_phy_lane_info(
{
struct cam_csiphy_param *param;
uint64_t phy_lane_sel_mask = 0;
uint32_t cpas_version;
uint32_t bit_offset_bet_phys_in_cp_ctrl;
int rc;
if (csiphy_dev->soc_info.index > MAX_SUPPORTED_PHY_IDX) {
CAM_ERR(CAM_CSIPHY, "Invalid PHY index: %u",
csiphy_dev->soc_info.index);
return -EINVAL;
}
rc = cam_cpas_get_cpas_hw_version(&cpas_version);
if (rc) {
CAM_ERR(CAM_CPAS, "Failed while getting CPAS Version");
return rc;
}
param = &csiphy_dev->csiphy_info[offset];
if (param->csiphy_3phase) {
@ -43,7 +59,6 @@ static int cam_csiphy_format_secure_phy_lane_info(
phy_lane_sel_mask |= LANE_1_SEL;
if (param->lane_enable & CPHY_LANE_2)
phy_lane_sel_mask |= LANE_2_SEL;
phy_lane_sel_mask <<= CPHY_LANE_SELECTION_SHIFT;
} else {
if (param->lane_enable & DPHY_LANE_0)
phy_lane_sel_mask |= LANE_0_SEL;
@ -53,16 +68,51 @@ static int cam_csiphy_format_secure_phy_lane_info(
phy_lane_sel_mask |= LANE_2_SEL;
if (param->lane_enable & DPHY_LANE_3)
phy_lane_sel_mask |= LANE_3_SEL;
phy_lane_sel_mask <<= DPHY_LANE_SELECTION_SHIFT;
}
if (csiphy_dev->soc_info.index > MAX_SUPPORTED_PHY_IDX) {
CAM_ERR(CAM_CSIPHY, "Invalid PHY index: %u",
csiphy_dev->soc_info.index);
return -EINVAL;
switch(cpas_version)
{
case CAM_CPAS_TITAN_665_V100:
bit_offset_bet_phys_in_cp_ctrl =
CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES + 1;
break;
default:
bit_offset_bet_phys_in_cp_ctrl =
CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES;
}
if (CAM_CPAS_TITAN_665_V100 == cpas_version)
{
if (csiphy_dev->soc_info.index < CAM_MAX_PHYS_PER_CP_CTRL_REG)
{
phy_lane_sel_mask = phy_lane_sel_mask <<
((csiphy_dev->soc_info.index * bit_offset_bet_phys_in_cp_ctrl) +
(!param->csiphy_3phase) *
(CAM_CSIPHY_MAX_CPHY_LANES));
}
else
{
phy_lane_sel_mask = phy_lane_sel_mask <<
((csiphy_dev->soc_info.index - CAM_MAX_PHYS_PER_CP_CTRL_REG) *
bit_offset_bet_phys_in_cp_ctrl +
(!param->csiphy_3phase) *
(CAM_CSIPHY_MAX_CPHY_LANES));
}
*mask = phy_lane_sel_mask;
}
else
{
if (param->csiphy_3phase)
{
phy_lane_sel_mask = phy_lane_sel_mask << CPHY_LANE_SELECTION_SHIFT;
}
else
{
phy_lane_sel_mask = phy_lane_sel_mask << DPHY_LANE_SELECTION_SHIFT;
}
phy_lane_sel_mask |= BIT(csiphy_dev->soc_info.index);
*mask = phy_lane_sel_mask;
}
phy_lane_sel_mask |= BIT(csiphy_dev->soc_info.index);
*mask = phy_lane_sel_mask;
CAM_DBG(CAM_CSIPHY, "Formatted PHY[%u] phy_lane_sel_mask: 0x%llx",
csiphy_dev->soc_info.index, *mask);

View File

@ -318,7 +318,7 @@ void cam_free_clear(const void * ptr)
}
#endif
#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA
#ifdef CONFIG_CSF_2_5_SECURE_CAMERA
int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
bool protect, int32_t offset, bool is_shutdown)
{

View File

@ -44,7 +44,7 @@
MODULE_IMPORT_NS(DMA_BUF);
#endif
#ifdef CONFIG_DOMAIN_ID_SECURE_CAMERA
#ifdef CONFIG_CSF_2_5_SECURE_CAMERA
#include <linux/IClientEnv.h>
#include <linux/ITrustedCameraDriver.h>
#include <linux/CTrustedCameraDriver.h>

View File

@ -11,3 +11,4 @@ CONFIG_MSM_MMRM=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_DOMAIN_ID_SECURE_CAMERA=y
CONFIG_DYNAMIC_FD_PORT_CONFIG=y
CONFIG_CSF_2_5_SECURE_CAMERA=y

View File

@ -4,3 +4,4 @@ CONFIG_SPECTRA_TFE=y
CONFIG_SPECTRA_CRE=y
CONFIG_SPECTRA_SENSOR=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_CSF_2_5_SECURE_CAMERA=y