Add 'qcom/opensource/datarmnet/' from commit 'c5d70f15d54c8489bd452d52729bcfcd301ab985'
git-subtree-dir: qcom/opensource/datarmnet git-subtree-mainline:235599e5f6
git-subtree-split:c5d70f15d5
Change-Id: repo: https://git.codelinaro.org/clo/la/platform/vendor/qcom/opensource/datarmnet tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
This commit is contained in:
commit
16648ad2df
36
qcom/opensource/datarmnet/BUILD.bazel
Normal file
36
qcom/opensource/datarmnet/BUILD.bazel
Normal file
@ -0,0 +1,36 @@
|
||||
load(":define_modules.bzl", "define_modules")
|
||||
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
|
||||
|
||||
define_modules("pineapple", "consolidate")
|
||||
|
||||
define_modules("pineapple", "gki")
|
||||
|
||||
define_modules("blair", "consolidate")
|
||||
|
||||
define_modules("blair", "gki")
|
||||
|
||||
define_modules("monaco", "consolidate")
|
||||
|
||||
define_modules("monaco", "gki")
|
||||
|
||||
define_modules("pitti", "consolidate")
|
||||
|
||||
define_modules("pitti", "gki")
|
||||
|
||||
define_modules("volcano", "consolidate")
|
||||
|
||||
define_modules("volcano", "gki")
|
||||
|
||||
package(
|
||||
default_visibility = [
|
||||
"//visibility:public",
|
||||
],
|
||||
)
|
||||
|
||||
ddk_headers(
|
||||
name = "rmnet_core_headers",
|
||||
hdrs = glob([
|
||||
"core/*.h",
|
||||
]),
|
||||
includes = ["core"],
|
||||
)
|
44
qcom/opensource/datarmnet/core/Android.mk
Normal file
44
qcom/opensource/datarmnet/core/Android.mk
Normal file
@ -0,0 +1,44 @@
|
||||
ifeq ($(TARGET_DATARMNET_ENABLE), true)
|
||||
ifneq ($(TARGET_BOARD_PLATFORM),qssi)
|
||||
RMNET_CORE_DLKM_PLATFORMS_LIST := pineapple
|
||||
RMNET_CORE_DLKM_PLATFORMS_LIST += blair
|
||||
RMNET_CORE_DLKM_PLATFORMS_LIST += monaco
|
||||
RMNET_CORE_DLKM_PLATFORMS_LIST += pitti
|
||||
RMNET_CORE_DLKM_PLATFORMS_LIST += volcano
|
||||
|
||||
ifeq ($(call is-board-platform-in-list, $(RMNET_CORE_DLKM_PLATFORMS_LIST)),true)
|
||||
#Make file to create RMNET_CORE DLKM
|
||||
LOCAL_PATH := $(call my-dir)
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
BOARD_COMMON_DIR ?= device/qcom/common
|
||||
|
||||
#Enabling BAZEL
|
||||
LOCAL_MODULE_DDK_BUILD := true
|
||||
|
||||
LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror
|
||||
LOCAL_CLANG :=true
|
||||
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
|
||||
LOCAL_MODULE := rmnet_core.ko
|
||||
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
|
||||
KBUILD_REQUIRED_KOS := ipam.ko
|
||||
DLKM_DIR := $(TOP)/$(BOARD_COMMON_DIR)/dlkm
|
||||
$(warning $(DLKM_DIR))
|
||||
include $(DLKM_DIR)/Build_external_kernelmodule.mk
|
||||
|
||||
######## Create RMNET_CTL DLKM ########
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror
|
||||
LOCAL_CLANG :=true
|
||||
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
|
||||
LOCAL_MODULE := rmnet_ctl.ko
|
||||
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
|
||||
KBUILD_REQUIRED_KOS := ipam.ko
|
||||
DLKM_DIR := $(TOP)/$(BOARD_COMMON_DIR)/dlkm
|
||||
$(warning $(DLKM_DIR))
|
||||
include $(DLKM_DIR)/Build_external_kernelmodule.mk
|
||||
|
||||
endif #End of Check for target
|
||||
endif #End of Check for qssi target
|
||||
endif #End of Check for datarmnet
|
36
qcom/opensource/datarmnet/core/Kbuild
Normal file
36
qcom/opensource/datarmnet/core/Kbuild
Normal file
@ -0,0 +1,36 @@
|
||||
ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_MONACO) $(CONFIG_ARCH_PITTI)))
|
||||
ccflags-y += -DRMNET_LA_PLATFORM
|
||||
endif
|
||||
|
||||
obj-m += rmnet_core.o
|
||||
|
||||
#core sources
|
||||
rmnet_core-y := \
|
||||
rmnet_config.o \
|
||||
rmnet_handlers.o \
|
||||
rmnet_descriptor.o \
|
||||
rmnet_genl.o \
|
||||
rmnet_map_command.o \
|
||||
rmnet_map_data.o \
|
||||
rmnet_module.o \
|
||||
rmnet_vnd.o
|
||||
|
||||
rmnet_core-y += \
|
||||
rmnet_ll.o \
|
||||
rmnet_ll_ipa.o
|
||||
|
||||
#DFC sources
|
||||
rmnet_core-y += \
|
||||
qmi_rmnet.o \
|
||||
wda_qmi.o \
|
||||
dfc_qmi.o \
|
||||
dfc_qmap.o \
|
||||
rmnet_qmap.o \
|
||||
rmnet_ll_qmap.o
|
||||
|
||||
ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_PITTI)))
|
||||
obj-m += rmnet_ctl.o
|
||||
rmnet_ctl-y := \
|
||||
rmnet_ctl_client.o \
|
||||
rmnet_ctl_ipa.o
|
||||
endif
|
29
qcom/opensource/datarmnet/core/Kconfig
Normal file
29
qcom/opensource/datarmnet/core/Kconfig
Normal file
@ -0,0 +1,29 @@
|
||||
#
|
||||
# RMNET MAP driver
|
||||
#
|
||||
|
||||
config RMNET_CORE
|
||||
default m
|
||||
depends on RMNET_MEM
|
||||
select GRO_CELLS
|
||||
help
|
||||
If you select this, you will enable the RMNET module which is used
|
||||
for handling data in the multiplexing and aggregation protocol (MAP)
|
||||
format in the embedded data path. RMNET devices can be attached to
|
||||
any IP mode physical device.
|
||||
|
||||
config RMNET_CTL
|
||||
default m
|
||||
help
|
||||
Enable the RMNET CTL module which is used for handling QMAP commands
|
||||
for flow control purposes.
|
||||
|
||||
config RMNET_LA_PLATFORM
|
||||
default y
|
||||
bool "RMNET platform support"
|
||||
depends on ARCH_PINEAPPLE || ARCH_BLAIR || ARCH_MONACO || ARCH_PITTI
|
||||
help
|
||||
Enable the functionality gated by the RMNET_LA_PLATFORM configuration
|
||||
in rmnet driver.
|
||||
This should be automatically set based on the target used for
|
||||
compilation.
|
50
qcom/opensource/datarmnet/core/Makefile
Normal file
50
qcom/opensource/datarmnet/core/Makefile
Normal file
@ -0,0 +1,50 @@
|
||||
ifeq ($(RELEASE_PACKAGE),1)
|
||||
EXTRA_CFLAGS+=-DRELEASE_PACKAGE
|
||||
endif
|
||||
LBITS ?= $(shell getconf LONG_BIT)
|
||||
ifeq ($(LBITS),64)
|
||||
CCFLAGS += -m64
|
||||
EXTRA_CFLAGS+=-DSYSTEM_IS_64
|
||||
else
|
||||
CCFLAGS += -m32
|
||||
endif
|
||||
|
||||
M ?= $(shell pwd)
|
||||
#obj-m := rmnet_core.o rmnet_ctl.o
|
||||
|
||||
rmnet_core-y += rmnet_config.o \
|
||||
rmnet_descriptor.o \
|
||||
rmnet_genl.o \
|
||||
rmnet_handlers.o \
|
||||
rmnet_map_command.o \
|
||||
rmnet_map_data.o \
|
||||
rmnet_module.o \
|
||||
rmnet_vnd.o \
|
||||
dfc_qmap.c \
|
||||
dfc_qmi.c \
|
||||
qmi_rmnet.0 \
|
||||
wda_qmi.0 \
|
||||
rmnet_ll.o \
|
||||
rmnet_ll_ipa.o \
|
||||
rmnet_qmap.o \
|
||||
rmnet_ll_qmap.o
|
||||
|
||||
ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE)))
|
||||
rmnet_ctl-y += rmnet_ctl_client.o \
|
||||
rmnet_ctl_ipa.o \
|
||||
rmnet_ctl_mhi.o
|
||||
endif
|
||||
|
||||
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
|
||||
|
||||
KBUILD_OPTIONS := RMNET_CORE_ROOT=$(PWD)
|
||||
KBUILD_OPTIONS += MODNAME?=rmnet_core
|
||||
|
||||
all:
|
||||
$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
|
||||
|
||||
modules_install:
|
||||
$(MAKE) -C $(KERNEL_SRC) M=$(M) modules_install
|
||||
|
||||
clean:
|
||||
$(MAKE) -C $(KERNEL_SRC) M=$(M) clean
|
34
qcom/opensource/datarmnet/core/Makefile.am
Normal file
34
qcom/opensource/datarmnet/core/Makefile.am
Normal file
@ -0,0 +1,34 @@
|
||||
ifeq ($(RELEASE_PACKAGE),1)
|
||||
EXTRA_CFLAGS+=-DRELEASE_PACKAGE
|
||||
endif
|
||||
LBITS := $(shell getconf LONG_BIT)
|
||||
ifeq ($(LBITS),64)
|
||||
CCFLAGS += -m64
|
||||
EXTRA_CFLAGS+=-DSYSTEM_IS_64
|
||||
else
|
||||
CCFLAGS += -m32
|
||||
endif
|
||||
|
||||
rmnet_core_dir = $(prefix)/rmnet_core
|
||||
rmnet_core_CFLAGS = -Werror
|
||||
|
||||
KERNEL_FLAGS ?= ARCH=arm
|
||||
|
||||
module = rmnet_core.ko
|
||||
kmake = $(MAKE) $(KERNEL_FLAGS) -C $(KERNEL_DIR) M=$(CURDIR)
|
||||
|
||||
$(module):
|
||||
$(kmake) modules
|
||||
|
||||
all-local: $(module)
|
||||
|
||||
install-exec-local: $(module)
|
||||
$(kmake) INSTALL_MOD_PATH=$(DESTDIR)$(prefix)/modules modules_install
|
||||
|
||||
# "make distclean" will always run clean-local in this directory,
|
||||
# regardless of the KERNELMODULES conditional. Therefore, ensure
|
||||
# KERNEL_DIR exists before running clean. Further, don't fail even
|
||||
# if there is a problem.
|
||||
clean-local:
|
||||
-test ! -d "$(KERNEL_DIR)" || $(kmake) clean
|
||||
|
385
qcom/opensource/datarmnet/core/dfc.h
Normal file
385
qcom/opensource/datarmnet/core/dfc.h
Normal file
@ -0,0 +1,385 @@
|
||||
/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/version.h>
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM dfc
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
||||
#ifndef RMNET_TRACE_INCLUDE_PATH
|
||||
#if defined(CONFIG_RMNET_LA_PLATFORM)
|
||||
#ifdef CONFIG_ARCH_KHAJE
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../../vendor/qcom/opensource/datarmnet/core
|
||||
#else
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
|
||||
#endif /* CONFIG_ARCH_KHAJE */
|
||||
#elif defined(__arch_um__)
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
|
||||
#else
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
|
||||
#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
|
||||
#endif /* RMNET_TRACE_INCLUDE_PATH */
|
||||
|
||||
#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_FILE dfc
|
||||
|
||||
#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_DFC_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(dfc_qmi_tc,
|
||||
|
||||
TP_PROTO(const char *name, u32 txq, int enable),
|
||||
|
||||
TP_ARGS(name, txq, enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u32, txq)
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name);
|
||||
__entry->txq = txq;
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s txq=%u %s",
|
||||
__get_str(dev_name),
|
||||
__entry->txq,
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_ind,
|
||||
|
||||
TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
|
||||
u16 seq_num, u8 ack_req, u32 ancillary),
|
||||
|
||||
TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req,
|
||||
ancillary),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, src)
|
||||
__field(int, idx)
|
||||
__field(u8, mid)
|
||||
__field(u8, bid)
|
||||
__field(u32, grant)
|
||||
__field(u16, seq)
|
||||
__field(u8, ack_req)
|
||||
__field(u32, ancillary)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->src = src;
|
||||
__entry->idx = idx;
|
||||
__entry->mid = mux_id;
|
||||
__entry->bid = bearer_id;
|
||||
__entry->grant = grant;
|
||||
__entry->seq = seq_num;
|
||||
__entry->ack_req = ack_req;
|
||||
__entry->ancillary = ancillary;
|
||||
),
|
||||
|
||||
TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u",
|
||||
__entry->src, __entry->idx, __entry->mid, __entry->bid,
|
||||
__entry->grant, __entry->seq, __entry->ack_req,
|
||||
__entry->ancillary)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_check,
|
||||
|
||||
TP_PROTO(const char *name, u8 bearer_id, unsigned int len,
|
||||
u32 mark, u32 grant),
|
||||
|
||||
TP_ARGS(name, bearer_id, len, mark, grant),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u8, bearer_id)
|
||||
__field(unsigned int, len)
|
||||
__field(u32, mark)
|
||||
__field(u32, grant)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name)
|
||||
__entry->bearer_id = bearer_id;
|
||||
__entry->len = len;
|
||||
__entry->mark = mark;
|
||||
__entry->grant = grant;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s bearer_id=%u skb_len=%u mark=%u current_grant=%u",
|
||||
__get_str(dev_name), __entry->bearer_id,
|
||||
__entry->len, __entry->mark, __entry->grant)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_info,
|
||||
|
||||
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type,
|
||||
u32 handle, int add),
|
||||
|
||||
TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u8, bid)
|
||||
__field(u32, fid)
|
||||
__field(int, ip)
|
||||
__field(u32, handle)
|
||||
__field(int, action)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name)
|
||||
__entry->bid = bearer_id;
|
||||
__entry->fid = flow_id;
|
||||
__entry->ip = ip_type;
|
||||
__entry->handle = handle;
|
||||
__entry->action = add;
|
||||
),
|
||||
|
||||
TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d txq=%d",
|
||||
__entry->action ? "add flow" : "delete flow",
|
||||
__get_str(dev_name),
|
||||
__entry->bid, __entry->fid, __entry->ip, __entry->handle)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_client_state_up,
|
||||
|
||||
TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface),
|
||||
|
||||
TP_ARGS(idx, instance, ep_type, iface),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, idx)
|
||||
__field(u32, instance)
|
||||
__field(u32, ep_type)
|
||||
__field(u32, iface)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = idx;
|
||||
__entry->instance = instance;
|
||||
__entry->ep_type = ep_type;
|
||||
__entry->iface = iface;
|
||||
),
|
||||
|
||||
TP_printk("DFC Client[%d] connect: instance=%u ep_type=%u iface_id=%u",
|
||||
__entry->idx, __entry->instance,
|
||||
__entry->ep_type, __entry->iface)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_client_state_down,
|
||||
|
||||
TP_PROTO(int idx, int from_cb),
|
||||
|
||||
TP_ARGS(idx, from_cb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, idx)
|
||||
__field(int, from_cb)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = idx;
|
||||
__entry->from_cb = from_cb;
|
||||
),
|
||||
|
||||
TP_printk("DFC Client[%d] exit: callback %d",
|
||||
__entry->idx, __entry->from_cb)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_qmap_cmd,
|
||||
|
||||
TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran),
|
||||
|
||||
TP_ARGS(mux_id, bearer_id, seq_num, type, tran),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, mid)
|
||||
__field(u8, bid)
|
||||
__field(u16, seq)
|
||||
__field(u8, type)
|
||||
__field(u32, tran)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mid = mux_id;
|
||||
__entry->bid = bearer_id;
|
||||
__entry->seq = seq_num;
|
||||
__entry->type = type;
|
||||
__entry->tran = tran;
|
||||
),
|
||||
|
||||
TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u",
|
||||
__entry->mid, __entry->bid, __entry->seq,
|
||||
__entry->type, __entry->tran)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_tx_link_status_ind,
|
||||
|
||||
TP_PROTO(int src, int idx, u8 status, u8 mux_id, u8 bearer_id),
|
||||
|
||||
TP_ARGS(src, idx, status, mux_id, bearer_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, src)
|
||||
__field(int, idx)
|
||||
__field(u8, status)
|
||||
__field(u8, mid)
|
||||
__field(u8, bid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->src = src;
|
||||
__entry->idx = idx;
|
||||
__entry->status = status;
|
||||
__entry->mid = mux_id;
|
||||
__entry->bid = bearer_id;
|
||||
),
|
||||
|
||||
TP_printk("src=%d [%d]: status=%u mux_id=%u bearer_id=%u",
|
||||
__entry->src, __entry->idx, __entry->status,
|
||||
__entry->mid, __entry->bid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_qmap,
|
||||
|
||||
TP_PROTO(const void *data, size_t len, bool in, u8 chn),
|
||||
|
||||
TP_ARGS(data, len, in, chn),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(bool, in)
|
||||
__field(size_t, len)
|
||||
__dynamic_array(u8, data, len)
|
||||
__field(u8, chn)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->in = in;
|
||||
__entry->len = len;
|
||||
memcpy(__get_dynamic_array(data), data, len);
|
||||
__entry->chn = chn;
|
||||
),
|
||||
|
||||
TP_printk("[0x%02x] %s %s", __entry->chn,
|
||||
__entry->in ? "<--" : "-->",
|
||||
__print_array(__get_dynamic_array(data), __entry->len,
|
||||
sizeof(u8)))
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_adjust_grant,
|
||||
|
||||
TP_PROTO(u8 mux_id, u8 bearer_id, u32 grant, u32 rx_bytes,
|
||||
u32 inflight, u32 a_grant),
|
||||
|
||||
TP_ARGS(mux_id, bearer_id, grant, rx_bytes, inflight, a_grant),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, mux_id)
|
||||
__field(u8, bearer_id)
|
||||
__field(u32, grant)
|
||||
__field(u32, rx_bytes)
|
||||
__field(u32, inflight)
|
||||
__field(u32, a_grant)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mux_id = mux_id;
|
||||
__entry->bearer_id = bearer_id;
|
||||
__entry->grant = grant;
|
||||
__entry->rx_bytes = rx_bytes;
|
||||
__entry->inflight = inflight;
|
||||
__entry->a_grant = a_grant;
|
||||
),
|
||||
|
||||
TP_printk("mid=%u bid=%u grant=%u rx=%u inflight=%u adjusted_grant=%u",
|
||||
__entry->mux_id, __entry->bearer_id, __entry->grant,
|
||||
__entry->rx_bytes, __entry->inflight, __entry->a_grant)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_watchdog,
|
||||
|
||||
TP_PROTO(u8 mux_id, u8 bearer_id, u8 event),
|
||||
|
||||
TP_ARGS(mux_id, bearer_id, event),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, mux_id)
|
||||
__field(u8, bearer_id)
|
||||
__field(u8, event)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mux_id = mux_id;
|
||||
__entry->bearer_id = bearer_id;
|
||||
__entry->event = event;
|
||||
),
|
||||
|
||||
TP_printk("mid=%u bid=%u event=%u",
|
||||
__entry->mux_id, __entry->bearer_id, __entry->event)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_ll_switch,
|
||||
|
||||
TP_PROTO(const char *cmd, u8 type, u8 num_bearer, void *bearers),
|
||||
|
||||
TP_ARGS(cmd, type, num_bearer, bearers),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(cmd_str, cmd)
|
||||
__field(u8, type)
|
||||
__field(u8, num_bearer)
|
||||
__dynamic_array(u8, bearers, num_bearer)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(cmd_str, cmd)
|
||||
__entry->type = type;
|
||||
__entry->num_bearer = num_bearer;
|
||||
memcpy(__get_dynamic_array(bearers), bearers, num_bearer);
|
||||
),
|
||||
|
||||
TP_printk("%s type=%u num_bearer=%u bearers={%s}",
|
||||
__get_str(cmd_str),
|
||||
__entry->type,
|
||||
__entry->num_bearer,
|
||||
__print_array(__get_dynamic_array(bearers),
|
||||
__entry->num_bearer, 1))
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_set_powersave_mode,
|
||||
|
||||
TP_PROTO(int enable),
|
||||
|
||||
TP_ARGS(enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("set powersave mode to %s",
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
#endif /* _TRACE_DFC_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
101
qcom/opensource/datarmnet/core/dfc_defs.h
Normal file
101
qcom/opensource/datarmnet/core/dfc_defs.h
Normal file
@ -0,0 +1,101 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _DFC_DEFS_H
|
||||
#define _DFC_DEFS_H
|
||||
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
#include "qmi_rmnet_i.h"
|
||||
|
||||
#define DFC_ACK_TYPE_DISABLE 1
|
||||
#define DFC_ACK_TYPE_THRESHOLD 2
|
||||
|
||||
#define DFC_MASK_TCP_BIDIR 0x1
|
||||
#define DFC_MASK_RAT_SWITCH 0x2
|
||||
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
|
||||
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
|
||||
|
||||
#define DFC_MAX_QOS_ID_V01 2
|
||||
|
||||
struct dfc_qmi_data {
|
||||
void *rmnet_port;
|
||||
struct workqueue_struct *dfc_wq;
|
||||
struct work_struct svc_arrive;
|
||||
struct qmi_handle handle;
|
||||
struct sockaddr_qrtr ssctl;
|
||||
struct svc_info svc;
|
||||
struct work_struct qmi_ind_work;
|
||||
struct list_head qmi_ind_q;
|
||||
spinlock_t qmi_ind_lock;
|
||||
int index;
|
||||
int restart_state;
|
||||
};
|
||||
|
||||
enum dfc_ip_type_enum_v01 {
|
||||
DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
|
||||
DFC_IPV4_TYPE_V01 = 0x4,
|
||||
DFC_IPV6_TYPE_V01 = 0x6,
|
||||
DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
|
||||
};
|
||||
|
||||
struct dfc_qos_id_type_v01 {
|
||||
u32 qos_id;
|
||||
enum dfc_ip_type_enum_v01 ip_type;
|
||||
};
|
||||
|
||||
struct dfc_flow_status_info_type_v01 {
|
||||
u8 subs_id;
|
||||
u8 mux_id;
|
||||
u8 bearer_id;
|
||||
u32 num_bytes;
|
||||
u16 seq_num;
|
||||
u8 qos_ids_len;
|
||||
struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
|
||||
u8 rx_bytes_valid;
|
||||
u32 rx_bytes;
|
||||
u8 ll_status;
|
||||
};
|
||||
|
||||
struct dfc_ancillary_info_type_v01 {
|
||||
u8 subs_id;
|
||||
u8 mux_id;
|
||||
u8 bearer_id;
|
||||
u32 reserved;
|
||||
};
|
||||
|
||||
struct dfc_flow_status_ind_msg_v01 {
|
||||
u8 flow_status_valid;
|
||||
u8 flow_status_len;
|
||||
struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
|
||||
u8 eod_ack_reqd_valid;
|
||||
u8 eod_ack_reqd;
|
||||
u8 ancillary_info_valid;
|
||||
u8 ancillary_info_len;
|
||||
struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
|
||||
};
|
||||
|
||||
struct dfc_bearer_info_type_v01 {
|
||||
u8 subs_id;
|
||||
u8 mux_id;
|
||||
u8 bearer_id;
|
||||
enum dfc_ip_type_enum_v01 ip_type;
|
||||
};
|
||||
|
||||
struct dfc_tx_link_status_ind_msg_v01 {
|
||||
u8 tx_status;
|
||||
u8 bearer_info_valid;
|
||||
u8 bearer_info_len;
|
||||
struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
|
||||
};
|
||||
|
||||
void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
|
||||
struct dfc_flow_status_ind_msg_v01 *ind,
|
||||
bool is_query);
|
||||
|
||||
void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
|
||||
struct dfc_tx_link_status_ind_msg_v01 *ind);
|
||||
|
||||
#endif /* _DFC_DEFS_H */
|
564
qcom/opensource/datarmnet/core/dfc_qmap.c
Normal file
564
qcom/opensource/datarmnet/core/dfc_qmap.c
Normal file
@ -0,0 +1,564 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <net/pkt_sched.h>
|
||||
#include <linux/module.h>
|
||||
#include "rmnet_qmap.h"
|
||||
#include "dfc_defs.h"
|
||||
#include "rmnet_qmi.h"
|
||||
#include "qmi_rmnet.h"
|
||||
#include "dfc.h"
|
||||
#include "rmnet_map.h"
|
||||
|
||||
#define QMAP_DFC_VER 1
|
||||
|
||||
struct qmap_dfc_config {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 cmd_id;
|
||||
u8 reserved;
|
||||
u8 tx_info:1;
|
||||
u8 reserved2:7;
|
||||
__be32 ep_type;
|
||||
__be32 iface_id;
|
||||
u32 reserved3;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_ind {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
__be16 seq_num;
|
||||
u8 reserved2;
|
||||
u8 tx_info_valid:1;
|
||||
u8 tx_info:1;
|
||||
u8 rx_bytes_valid:1;
|
||||
u8 reserved3:5;
|
||||
u8 bearer_id;
|
||||
u8 tcp_bidir:1;
|
||||
u8 bearer_status:3;
|
||||
u8 ll_status:1;
|
||||
u8 reserved4:3;
|
||||
__be32 grant;
|
||||
__be32 rx_bytes;
|
||||
u32 reserved6;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_query {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
u8 bearer_id;
|
||||
u8 reserved2;
|
||||
u32 reserved3;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_query_resp {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 bearer_id;
|
||||
u8 tcp_bidir:1;
|
||||
u8 rx_bytes_valid:1;
|
||||
u8 reserved:6;
|
||||
u8 invalid:1;
|
||||
u8 reserved2:7;
|
||||
__be32 grant;
|
||||
__be32 rx_bytes;
|
||||
u32 reserved4;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_end_marker_req {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
u8 bearer_id;
|
||||
u8 reserved2;
|
||||
u16 reserved3;
|
||||
__be16 seq_num;
|
||||
u32 reserved4;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_end_marker_cnf {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
u8 bearer_id;
|
||||
u8 reserved2;
|
||||
u16 reserved3;
|
||||
__be16 seq_num;
|
||||
u32 reserved4;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmapv5_cmd_hdr {
|
||||
u8 pad_len:6;
|
||||
u8 next_hdr:1;
|
||||
u8 cd_bit:1;
|
||||
u8 mux_id;
|
||||
__be16 pkt_len;
|
||||
struct rmnet_map_v5_csum_header csum_hdr;
|
||||
u8 cmd_name;
|
||||
u8 cmd_type:2;
|
||||
u8 reserved:6;
|
||||
u16 reserved2;
|
||||
__be32 tx_id;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmapv5_dfc_end_marker_cnf {
|
||||
struct qmapv5_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
u8 bearer_id;
|
||||
u8 reserved2;
|
||||
u16 reserved3;
|
||||
__be16 seq_num;
|
||||
u32 reserved4;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_powersave_req {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 allow:1;
|
||||
u8 autoshut:1;
|
||||
u8 reserved:6;
|
||||
u8 reserved2;
|
||||
u8 mode:1;
|
||||
u8 reserved3:7;
|
||||
__be32 ep_type;
|
||||
__be32 iface_id;
|
||||
u8 num_bearers;
|
||||
u8 bearer_id[PS_MAX_BEARERS];
|
||||
u8 reserved4[3];
|
||||
} __aligned(1);
|
||||
|
||||
static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
|
||||
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
|
||||
static struct dfc_qmi_data __rcu *qmap_dfc_data;
|
||||
static bool dfc_config_acked;
|
||||
|
||||
static void dfc_qmap_send_config(struct dfc_qmi_data *data);
|
||||
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
|
||||
struct rmnet_bearer_map *bearer,
|
||||
u16 seq, u32 tx_id);
|
||||
|
||||
static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_dfc_ind *cmd;
|
||||
|
||||
if (skb->len < sizeof(struct qmap_dfc_ind))
|
||||
return QMAP_CMD_INVALID;
|
||||
|
||||
cmd = (struct qmap_dfc_ind *)skb->data;
|
||||
|
||||
if (cmd->tx_info_valid) {
|
||||
memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
|
||||
qmap_tx_ind.tx_status = cmd->tx_info;
|
||||
qmap_tx_ind.bearer_info_valid = 1;
|
||||
qmap_tx_ind.bearer_info_len = 1;
|
||||
qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
|
||||
qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
|
||||
|
||||
dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
|
||||
|
||||
/* Ignore grant since it is always 0 */
|
||||
goto done;
|
||||
}
|
||||
|
||||
memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
|
||||
qmap_flow_ind.flow_status_valid = 1;
|
||||
qmap_flow_ind.flow_status_len = 1;
|
||||
qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
|
||||
qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
|
||||
qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
|
||||
qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
|
||||
qmap_flow_ind.flow_status[0].ll_status = cmd->ll_status;
|
||||
|
||||
if (cmd->rx_bytes_valid) {
|
||||
qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
|
||||
qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
|
||||
}
|
||||
|
||||
if (cmd->tcp_bidir) {
|
||||
qmap_flow_ind.ancillary_info_valid = 1;
|
||||
qmap_flow_ind.ancillary_info_len = 1;
|
||||
qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
|
||||
qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
|
||||
qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
|
||||
}
|
||||
|
||||
dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
|
||||
|
||||
done:
|
||||
return QMAP_CMD_ACK;
|
||||
}
|
||||
|
||||
static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_dfc_query_resp *cmd;
|
||||
|
||||
if (skb->len < sizeof(struct qmap_dfc_query_resp))
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
cmd = (struct qmap_dfc_query_resp *)skb->data;
|
||||
|
||||
if (cmd->invalid)
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
|
||||
qmap_flow_ind.flow_status_valid = 1;
|
||||
qmap_flow_ind.flow_status_len = 1;
|
||||
|
||||
qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
|
||||
qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
|
||||
qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
|
||||
qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
|
||||
|
||||
if (cmd->rx_bytes_valid) {
|
||||
qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
|
||||
qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
|
||||
}
|
||||
|
||||
if (cmd->tcp_bidir) {
|
||||
qmap_flow_ind.ancillary_info_valid = 1;
|
||||
qmap_flow_ind.ancillary_info_len = 1;
|
||||
qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
|
||||
qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
|
||||
qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
|
||||
}
|
||||
|
||||
dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
|
||||
|
||||
return QMAP_CMD_DONE;
|
||||
}
|
||||
|
||||
static int dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
|
||||
u8 bearer_id, u16 seq_num, u32 tx_id)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct qos_info *qos;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
int rc = QMAP_CMD_ACK;
|
||||
|
||||
dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
|
||||
if (!dev)
|
||||
return rc;
|
||||
|
||||
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return rc;
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
|
||||
if (!bearer) {
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (bearer->last_seq == seq_num && bearer->grant_size) {
|
||||
bearer->ack_req = 1;
|
||||
bearer->ack_txid = tx_id;
|
||||
} else {
|
||||
dfc_qmap_send_end_marker_cnf(qos, bearer, seq_num, tx_id);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
|
||||
return QMAP_CMD_DONE;
|
||||
}
|
||||
|
||||
static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_dfc_end_marker_req *cmd;
|
||||
|
||||
if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
|
||||
return QMAP_CMD_INVALID;
|
||||
|
||||
cmd = (struct qmap_dfc_end_marker_req *)skb->data;
|
||||
return dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
|
||||
ntohs(cmd->seq_num),
|
||||
ntohl(cmd->hdr.tx_id));
|
||||
}
|
||||
|
||||
int dfc_qmap_cmd_handler(struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_cmd_hdr *cmd;
|
||||
struct dfc_qmi_data *dfc;
|
||||
int rc = QMAP_CMD_DONE;
|
||||
|
||||
cmd = (struct qmap_cmd_hdr *)skb->data;
|
||||
|
||||
if (cmd->cmd_name == QMAP_DFC_QUERY) {
|
||||
if (cmd->cmd_type != QMAP_CMD_ACK)
|
||||
return rc;
|
||||
} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
|
||||
if (cmd->cmd_type == QMAP_CMD_ACK &&
|
||||
cmd->cmd_name == QMAP_DFC_CONFIG)
|
||||
dfc_config_acked = true;
|
||||
return rc;
|
||||
}
|
||||
|
||||
dfc = rcu_dereference(qmap_dfc_data);
|
||||
if (!dfc || READ_ONCE(dfc->restart_state))
|
||||
return rc;
|
||||
|
||||
/* Re-send DFC config once if needed */
|
||||
if (unlikely(!dfc_config_acked)) {
|
||||
dfc_qmap_send_config(dfc);
|
||||
dfc_config_acked = true;
|
||||
}
|
||||
|
||||
switch (cmd->cmd_name) {
|
||||
case QMAP_DFC_IND:
|
||||
rc = dfc_qmap_handle_ind(dfc, skb);
|
||||
qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
|
||||
break;
|
||||
|
||||
case QMAP_DFC_QUERY:
|
||||
rc = dfc_qmap_handle_query_resp(dfc, skb);
|
||||
break;
|
||||
|
||||
case QMAP_DFC_END_MARKER:
|
||||
rc = dfc_qmap_handle_end_marker_req(dfc, skb);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (cmd->cmd_type == QMAP_CMD_REQUEST)
|
||||
rc = QMAP_CMD_UNSUPPORTED;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dfc_qmap_send_config(struct dfc_qmi_data *data)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmap_dfc_config *dfc_config;
|
||||
unsigned int len = sizeof(struct qmap_dfc_config);
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
|
||||
memset(dfc_config, 0, len);
|
||||
|
||||
dfc_config->hdr.cd_bit = 1;
|
||||
dfc_config->hdr.mux_id = 0;
|
||||
dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
|
||||
dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
|
||||
dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
|
||||
dfc_config->hdr.tx_id = htonl(rmnet_qmap_next_txid());
|
||||
|
||||
dfc_config->cmd_ver = QMAP_DFC_VER;
|
||||
dfc_config->cmd_id = QMAP_DFC_IND;
|
||||
dfc_config->tx_info = 1;
|
||||
dfc_config->ep_type = htonl(data->svc.ep_type);
|
||||
dfc_config->iface_id = htonl(data->svc.iface_id);
|
||||
|
||||
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmap_dfc_query *dfc_query;
|
||||
unsigned int len = sizeof(struct qmap_dfc_query);
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
|
||||
memset(dfc_query, 0, len);
|
||||
|
||||
dfc_query->hdr.cd_bit = 1;
|
||||
dfc_query->hdr.mux_id = mux_id;
|
||||
dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
|
||||
dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
|
||||
dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
|
||||
dfc_query->hdr.tx_id = htonl(rmnet_qmap_next_txid());
|
||||
|
||||
dfc_query->cmd_ver = QMAP_DFC_VER;
|
||||
dfc_query->bearer_id = bearer_id;
|
||||
|
||||
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
|
||||
struct rmnet_bearer_map *bearer,
|
||||
u16 seq, u32 tx_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmapv5_dfc_end_marker_cnf *em_cnf;
|
||||
unsigned int len = sizeof(struct qmapv5_dfc_end_marker_cnf);
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
em_cnf = (struct qmapv5_dfc_end_marker_cnf *)skb_put(skb, len);
|
||||
memset(em_cnf, 0, len);
|
||||
|
||||
em_cnf->hdr.cd_bit = 1;
|
||||
em_cnf->hdr.next_hdr = 1;
|
||||
em_cnf->hdr.mux_id = qos->mux_id;
|
||||
em_cnf->hdr.pkt_len = htons(len -
|
||||
(QMAP_HDR_LEN +
|
||||
sizeof(struct rmnet_map_v5_csum_header)));
|
||||
em_cnf->hdr.csum_hdr.header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
|
||||
em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
|
||||
em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
|
||||
em_cnf->hdr.tx_id = htonl(tx_id);
|
||||
|
||||
em_cnf->cmd_ver = QMAP_DFC_VER;
|
||||
em_cnf->bearer_id = bearer->bearer_id;
|
||||
em_cnf->seq_num = htons(seq);
|
||||
|
||||
rmnet_qmap_send(skb, bearer->ch_switch.current_ch, true);
|
||||
}
|
||||
|
||||
static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmap_dfc_powersave_req *dfc_powersave;
|
||||
unsigned int len = sizeof(struct qmap_dfc_powersave_req);
|
||||
struct dfc_qmi_data *dfc;
|
||||
u32 ep_type = 0;
|
||||
u32 iface_id = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
dfc = rcu_dereference(qmap_dfc_data);
|
||||
if (dfc) {
|
||||
ep_type = dfc->svc.ep_type;
|
||||
iface_id = dfc->svc.iface_id;
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dfc_powersave = (struct qmap_dfc_powersave_req *)skb_put(skb, len);
|
||||
memset(dfc_powersave, 0, len);
|
||||
|
||||
dfc_powersave->hdr.cd_bit = 1;
|
||||
dfc_powersave->hdr.mux_id = 0;
|
||||
dfc_powersave->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
|
||||
dfc_powersave->hdr.cmd_name = QMAP_DFC_POWERSAVE;
|
||||
dfc_powersave->hdr.cmd_type = QMAP_CMD_REQUEST;
|
||||
dfc_powersave->hdr.tx_id = htonl(rmnet_qmap_next_txid());
|
||||
|
||||
dfc_powersave->cmd_ver = 3;
|
||||
dfc_powersave->mode = enable ? 1 : 0;
|
||||
|
||||
if (enable && num_bearers) {
|
||||
if (unlikely(num_bearers > PS_MAX_BEARERS))
|
||||
num_bearers = PS_MAX_BEARERS;
|
||||
dfc_powersave->allow = 1;
|
||||
dfc_powersave->autoshut = 1;
|
||||
dfc_powersave->num_bearers = num_bearers;
|
||||
memcpy(dfc_powersave->bearer_id, bearer_id, num_bearers);
|
||||
}
|
||||
|
||||
dfc_powersave->ep_type = htonl(ep_type);
|
||||
dfc_powersave->iface_id = htonl(iface_id);
|
||||
|
||||
return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
trace_dfc_set_powersave_mode(enable);
|
||||
return dfc_qmap_send_powersave(enable, num_bearers, bearer_id);
|
||||
}
|
||||
|
||||
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
|
||||
{
|
||||
struct rmnet_bearer_map *bearer;
|
||||
|
||||
if (type == DFC_ACK_TYPE_DISABLE) {
|
||||
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
|
||||
if (bearer)
|
||||
dfc_qmap_send_end_marker_cnf(qos, bearer,
|
||||
seq, bearer->ack_txid);
|
||||
} else if (type == DFC_ACK_TYPE_THRESHOLD) {
|
||||
dfc_qmap_send_query(qos->mux_id, bearer_id);
|
||||
}
|
||||
}
|
||||
|
||||
int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
struct dfc_qmi_data *data;
|
||||
|
||||
if (!port || !qmi)
|
||||
return -EINVAL;
|
||||
|
||||
/* Prevent double init */
|
||||
data = rcu_dereference(qmap_dfc_data);
|
||||
if (data)
|
||||
return -EINVAL;
|
||||
|
||||
data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->rmnet_port = port;
|
||||
data->index = index;
|
||||
memcpy(&data->svc, psvc, sizeof(data->svc));
|
||||
|
||||
qmi->dfc_clients[index] = (void *)data;
|
||||
rcu_assign_pointer(qmap_dfc_data, data);
|
||||
|
||||
rmnet_qmap_init(port);
|
||||
|
||||
trace_dfc_client_state_up(data->index, data->svc.instance,
|
||||
data->svc.ep_type, data->svc.iface_id);
|
||||
|
||||
pr_info("DFC QMAP init\n");
|
||||
|
||||
/* Currently if powersave ext is enabled, no need to do dfc config
|
||||
* which only enables tx_info */
|
||||
if (qmi->ps_ext) {
|
||||
dfc_config_acked = true;
|
||||
} else {
|
||||
dfc_config_acked = false;
|
||||
dfc_qmap_send_config(data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dfc_qmap_client_exit(void *dfc_data)
|
||||
{
|
||||
struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
|
||||
|
||||
if (!data) {
|
||||
pr_err("%s() data is null\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_dfc_client_state_down(data->index, 0);
|
||||
|
||||
rmnet_qmap_exit();
|
||||
|
||||
WRITE_ONCE(data->restart_state, 1);
|
||||
RCU_INIT_POINTER(qmap_dfc_data, NULL);
|
||||
synchronize_rcu();
|
||||
|
||||
kfree(data);
|
||||
|
||||
pr_info("DFC QMAP exit\n");
|
||||
}
|
1592
qcom/opensource/datarmnet/core/dfc_qmi.c
Normal file
1592
qcom/opensource/datarmnet/core/dfc_qmi.c
Normal file
File diff suppressed because it is too large
Load Diff
1512
qcom/opensource/datarmnet/core/qmi_rmnet.c
Normal file
1512
qcom/opensource/datarmnet/core/qmi_rmnet.c
Normal file
File diff suppressed because it is too large
Load Diff
180
qcom/opensource/datarmnet/core/qmi_rmnet.h
Normal file
180
qcom/opensource/datarmnet/core/qmi_rmnet.h
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _QMI_RMNET_H
|
||||
#define _QMI_RMNET_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#define CONFIG_QTI_QMI_RMNET 1
|
||||
#define CONFIG_QTI_QMI_DFC 1
|
||||
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
|
||||
|
||||
struct qmi_rmnet_ps_ind {
|
||||
void (*ps_on_handler)(void *port);
|
||||
void (*ps_off_handler)(void *port);
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_RMNET
|
||||
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
|
||||
int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
||||
int attr_len);
|
||||
void qmi_rmnet_enable_all_flows(struct net_device *dev);
|
||||
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
|
||||
void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
|
||||
u8 *bearer_id);
|
||||
#else
|
||||
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
||||
int attr_len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_enable_all_flows(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool
|
||||
qmi_rmnet_all_flows_enabled(struct net_device *dev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_prepare_ps_bearers(struct net_device *dev,
|
||||
u8 *num_bearers, u8 *bearer_id)
|
||||
{
|
||||
if (num_bearers)
|
||||
*num_bearers = 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_DFC
|
||||
void *qmi_rmnet_qos_init(struct net_device *real_dev,
|
||||
struct net_device *vnd_dev, u8 mux_id);
|
||||
void qmi_rmnet_qos_exit_pre(void *qos);
|
||||
void qmi_rmnet_qos_exit_post(void);
|
||||
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
|
||||
bool *drop, bool *is_low_latency);
|
||||
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len);
|
||||
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
|
||||
#else
|
||||
static inline void *
|
||||
qmi_rmnet_qos_init(struct net_device *real_dev,
|
||||
struct net_device *vnd_dev, u8 mux_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_qos_exit_pre(void *qos)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_qos_exit_post(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool qmi_rmnet_get_flow_state(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
bool *drop,
|
||||
bool *is_low_latency)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int qmi_rmnet_get_queue(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
|
||||
u8 *bearer_id);
|
||||
void qmi_rmnet_work_init(void *port);
|
||||
void qmi_rmnet_work_exit(void *port);
|
||||
void qmi_rmnet_work_maybe_restart(void *port, void *desc, struct sk_buff *skb);
|
||||
void qmi_rmnet_set_dl_msg_active(void *port);
|
||||
bool qmi_rmnet_ignore_grant(void *port);
|
||||
|
||||
int qmi_rmnet_ps_ind_register(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind);
|
||||
int qmi_rmnet_ps_ind_deregister(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind);
|
||||
void qmi_rmnet_ps_off_notify(void *port);
|
||||
void qmi_rmnet_ps_on_notify(void *port);
|
||||
|
||||
#else
|
||||
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void qmi_rmnet_work_init(void *port)
|
||||
{
|
||||
}
|
||||
static inline void qmi_rmnet_work_exit(void *port)
|
||||
{
|
||||
}
|
||||
static inline void qmi_rmnet_work_maybe_restart(void *port, void *desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
||||
}
|
||||
static inline void qmi_rmnet_set_dl_msg_active(void *port)
|
||||
{
|
||||
}
|
||||
static inline bool qmi_rmnet_ignore_grant(void *port)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
|
||||
{
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif /*_QMI_RMNET_H*/
|
300
qcom/opensource/datarmnet/core/qmi_rmnet_i.h
Normal file
300
qcom/opensource/datarmnet/core/qmi_rmnet_i.h
Normal file
@ -0,0 +1,300 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_QMI_I_H
|
||||
#define _RMNET_QMI_I_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/timer.h>
|
||||
#include <uapi/linux/rtnetlink.h>
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
|
||||
#define MAX_MQ_NUM 16
|
||||
#define MAX_CLIENT_NUM 2
|
||||
#define MAX_FLOW_NUM 32
|
||||
#define DEFAULT_GRANT 1
|
||||
#define DEFAULT_CALL_GRANT 20480
|
||||
#define DFC_MAX_BEARERS_V01 16
|
||||
#define DEFAULT_MQ_NUM 0
|
||||
#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
|
||||
#define INVALID_MQ 0xFF
|
||||
|
||||
#define DFC_MODE_SA 4
|
||||
#define PS_MAX_BEARERS 32
|
||||
|
||||
#define CONFIG_QTI_QMI_RMNET 1
|
||||
#define CONFIG_QTI_QMI_DFC 1
|
||||
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
|
||||
|
||||
extern int dfc_mode;
|
||||
extern int dfc_qmap;
|
||||
|
||||
struct qos_info;
|
||||
|
||||
enum {
|
||||
RMNET_CH_DEFAULT,
|
||||
RMNET_CH_LL,
|
||||
RMNET_CH_MAX,
|
||||
RMNET_CH_CTL = 0xFF
|
||||
};
|
||||
|
||||
enum rmnet_ch_switch_state {
|
||||
CH_SWITCH_NONE,
|
||||
CH_SWITCH_STARTED,
|
||||
CH_SWITCH_ACKED,
|
||||
CH_SWITCH_FAILED_RETRY
|
||||
};
|
||||
|
||||
struct rmnet_ch_switch {
|
||||
u8 current_ch;
|
||||
u8 switch_to_ch;
|
||||
u8 retry_left;
|
||||
u8 status_code;
|
||||
bool auto_switched;
|
||||
enum rmnet_ch_switch_state state;
|
||||
__be32 switch_txid;
|
||||
u32 flags;
|
||||
bool timer_quit;
|
||||
struct timer_list guard_timer;
|
||||
u32 nl_pid;
|
||||
u32 nl_seq;
|
||||
};
|
||||
|
||||
struct rmnet_bearer_map {
|
||||
struct list_head list;
|
||||
u8 bearer_id;
|
||||
int flow_ref;
|
||||
u32 grant_size;
|
||||
u32 grant_thresh;
|
||||
u16 seq;
|
||||
u8 ack_req;
|
||||
u32 last_grant;
|
||||
u16 last_seq;
|
||||
u32 bytes_in_flight;
|
||||
u32 last_adjusted_grant;
|
||||
bool tcp_bidir;
|
||||
bool rat_switch;
|
||||
bool tx_off;
|
||||
u32 ack_txid;
|
||||
u32 mq_idx;
|
||||
u32 ack_mq_idx;
|
||||
struct qos_info *qos;
|
||||
struct timer_list watchdog;
|
||||
bool watchdog_started;
|
||||
bool watchdog_quit;
|
||||
u32 watchdog_expire_cnt;
|
||||
struct rmnet_ch_switch ch_switch;
|
||||
};
|
||||
|
||||
struct rmnet_flow_map {
|
||||
struct list_head list;
|
||||
u8 bearer_id;
|
||||
u32 flow_id;
|
||||
int ip_type;
|
||||
u32 mq_idx;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
};
|
||||
|
||||
struct svc_info {
|
||||
u32 instance;
|
||||
u32 ep_type;
|
||||
u32 iface_id;
|
||||
};
|
||||
|
||||
struct mq_map {
|
||||
struct rmnet_bearer_map *bearer;
|
||||
bool is_ll_ch;
|
||||
bool drop_on_remove;
|
||||
};
|
||||
|
||||
struct qos_info {
|
||||
struct list_head list;
|
||||
u8 mux_id;
|
||||
struct net_device *real_dev;
|
||||
struct net_device *vnd_dev;
|
||||
struct list_head flow_head;
|
||||
struct list_head bearer_head;
|
||||
struct mq_map mq[MAX_MQ_NUM];
|
||||
u32 tran_num;
|
||||
spinlock_t qos_lock;
|
||||
struct rmnet_bearer_map *removed_bearer;
|
||||
};
|
||||
|
||||
struct qmi_info {
|
||||
int flag;
|
||||
void *wda_client;
|
||||
void *wda_pending;
|
||||
void *dfc_clients[MAX_CLIENT_NUM];
|
||||
void *dfc_pending[MAX_CLIENT_NUM];
|
||||
bool dfc_client_exiting[MAX_CLIENT_NUM];
|
||||
unsigned long ps_work_active;
|
||||
bool ps_enabled;
|
||||
bool dl_msg_active;
|
||||
bool ps_ignore_grant;
|
||||
int ps_ext;
|
||||
};
|
||||
|
||||
enum data_ep_type_enum_v01 {
|
||||
DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN,
|
||||
DATA_EP_TYPE_RESERVED_V01 = 0x00,
|
||||
DATA_EP_TYPE_HSIC_V01 = 0x01,
|
||||
DATA_EP_TYPE_HSUSB_V01 = 0x02,
|
||||
DATA_EP_TYPE_PCIE_V01 = 0x03,
|
||||
DATA_EP_TYPE_EMBEDDED_V01 = 0x04,
|
||||
DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX
|
||||
};
|
||||
|
||||
struct data_ep_id_type_v01 {
|
||||
|
||||
enum data_ep_type_enum_v01 ep_type;
|
||||
u32 iface_id;
|
||||
};
|
||||
|
||||
extern struct qmi_elem_info data_ep_id_type_v01_ei[];
|
||||
|
||||
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_DFC
|
||||
struct rmnet_flow_map *
|
||||
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
|
||||
u32 flow_id, int ip_type);
|
||||
|
||||
struct rmnet_bearer_map *
|
||||
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
|
||||
|
||||
unsigned int qmi_rmnet_grant_per(unsigned int grant);
|
||||
|
||||
int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
|
||||
struct qmi_info *qmi);
|
||||
|
||||
void dfc_qmi_client_exit(void *dfc_data);
|
||||
|
||||
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
|
||||
int ip_type, u32 mark, unsigned int len);
|
||||
|
||||
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable);
|
||||
|
||||
void dfc_qmi_query_flow(void *dfc_data);
|
||||
|
||||
int dfc_bearer_flow_ctl(struct net_device *dev,
|
||||
struct rmnet_bearer_map *bearer,
|
||||
struct qos_info *qos);
|
||||
|
||||
int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
|
||||
struct qmi_info *qmi);
|
||||
|
||||
void dfc_qmap_client_exit(void *dfc_data);
|
||||
|
||||
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
|
||||
|
||||
struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
|
||||
u8 bearer_id);
|
||||
|
||||
void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer);
|
||||
|
||||
void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer);
|
||||
|
||||
int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen);
|
||||
void rmnet_ll_guard_fn(struct timer_list *t);
|
||||
void rmnet_ll_wq_init(void);
|
||||
void rmnet_ll_wq_exit(void);
|
||||
#else
|
||||
static inline struct rmnet_flow_map *
|
||||
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
|
||||
uint32_t flow_id, int ip_type)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct rmnet_bearer_map *
|
||||
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void dfc_qmi_client_exit(void *dfc_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
dfc_bearer_flow_ctl(struct net_device *dev,
|
||||
struct rmnet_bearer_map *bearer,
|
||||
struct qos_info *qos)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void dfc_qmap_client_exit(void *dfc_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
|
||||
{
|
||||
}
|
||||
|
||||
static int rmnet_ll_switch(struct net_device *dev,
|
||||
struct tcmsg *tcm, int attrlen)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
|
||||
int
|
||||
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
|
||||
void wda_qmi_client_exit(void *wda_data);
|
||||
int wda_set_powersave_mode(void *wda_data, u8 enable, u8 num_bearers,
|
||||
u8 *bearer_id);
|
||||
void qmi_rmnet_flush_ps_wq(void);
|
||||
void wda_qmi_client_release(void *wda_data);
|
||||
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id);
|
||||
#else
|
||||
static inline int
|
||||
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void wda_qmi_client_exit(void *wda_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int wda_set_powersave_mode(void *wda_data, u8 enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void qmi_rmnet_flush_ps_wq(void)
|
||||
{
|
||||
}
|
||||
static inline void wda_qmi_client_release(void *wda_data)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /*_RMNET_QMI_I_H*/
|
891
qcom/opensource/datarmnet/core/rmnet_config.c
Normal file
891
qcom/opensource/datarmnet/core/rmnet_config.c
Normal file
@ -0,0 +1,891 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET configuration engine
|
||||
*
|
||||
*/
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_handlers.h"
|
||||
#include "rmnet_vnd.h"
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_descriptor.h"
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_genl.h"
|
||||
#include "rmnet_qmi.h"
|
||||
#include "qmi_rmnet.h"
|
||||
#define CONFIG_QTI_QMI_RMNET 1
|
||||
#define CONFIG_QTI_QMI_DFC 1
|
||||
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
|
||||
|
||||
#define QMAP_SHS_MASK 0xFF
|
||||
#define QMAP_SHS_PKT_LIMIT 200
|
||||
|
||||
/* Locking scheme -
|
||||
* The shared resource which needs to be protected is realdev->rx_handler_data.
|
||||
* For the writer path, this is using rtnl_lock(). The writer paths are
|
||||
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
|
||||
* paths are already called with rtnl_lock() acquired in. There is also an
|
||||
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
|
||||
* dereference here, we will need to use rtnl_dereference(). Dev list writing
|
||||
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
|
||||
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
|
||||
* path. We only need rcu_read_lock() for these scenarios. In these cases,
|
||||
* the rcu_read_lock() is held in __dev_queue_xmit() and
|
||||
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
|
||||
* to get the relevant information. For dev list reading, we again acquire
|
||||
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
|
||||
* We also use unregister_netdevice_many() to free all rmnet devices in
|
||||
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
|
||||
* same context.
|
||||
*/
|
||||
|
||||
/* Local Definitions and Declarations */
|
||||
|
||||
enum {
|
||||
IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
|
||||
IFLA_RMNET_UL_AGG_PARAMS,
|
||||
IFLA_RMNET_UL_AGG_STATE_ID,
|
||||
__IFLA_RMNET_EXT_MAX,
|
||||
};
|
||||
|
||||
static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = {
|
||||
[IFLA_RMNET_MUX_ID] = {
|
||||
.type = NLA_U16
|
||||
},
|
||||
[IFLA_RMNET_FLAGS] = {
|
||||
.len = sizeof(struct ifla_rmnet_flags)
|
||||
},
|
||||
[IFLA_RMNET_DFC_QOS] = {
|
||||
.len = sizeof(struct tcmsg)
|
||||
},
|
||||
[IFLA_RMNET_UL_AGG_PARAMS] = {
|
||||
.len = sizeof(struct rmnet_egress_agg_params)
|
||||
},
|
||||
[IFLA_RMNET_UL_AGG_STATE_ID] = {
|
||||
.type = NLA_U8
|
||||
},
|
||||
};
|
||||
|
||||
int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
{
|
||||
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_is_real_dev_registered);
|
||||
|
||||
/* Needs rtnl lock */
|
||||
static struct rmnet_port*
|
||||
rmnet_get_port_rtnl(const struct net_device *real_dev)
|
||||
{
|
||||
return rtnl_dereference(real_dev->rx_handler_data);
|
||||
}
|
||||
|
||||
static int rmnet_unregister_real_device(struct net_device *real_dev,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
if (port->nr_rmnet_devs)
|
||||
return -EINVAL;
|
||||
|
||||
netdev_rx_handler_unregister(real_dev);
|
||||
|
||||
rmnet_map_cmd_exit(port);
|
||||
rmnet_map_tx_aggregate_exit(port);
|
||||
|
||||
rmnet_descriptor_deinit(port);
|
||||
|
||||
kfree(port);
|
||||
|
||||
/* release reference on real_dev */
|
||||
dev_put(real_dev);
|
||||
|
||||
netdev_dbg(real_dev, "Removed from rmnet\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rmnet_register_real_device(struct net_device *real_dev)
|
||||
{
|
||||
struct rmnet_port *port;
|
||||
int rc, entry;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (rmnet_is_real_dev_registered(real_dev))
|
||||
return 0;
|
||||
|
||||
port = kzalloc(sizeof(*port), GFP_ATOMIC);
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
port->dev = real_dev;
|
||||
port->phy_shs_cfg.config = RMNET_SHS_NO_DLMKR | RMNET_SHS_NO_PSH |
|
||||
RMNET_SHS_STMP_ALL;
|
||||
port->phy_shs_cfg.map_mask = QMAP_SHS_MASK;
|
||||
port->phy_shs_cfg.max_pkts = QMAP_SHS_PKT_LIMIT;
|
||||
|
||||
|
||||
rmnet_map_tx_aggregate_init(port);
|
||||
rmnet_map_cmd_init(port);
|
||||
|
||||
|
||||
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
|
||||
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
|
||||
|
||||
rc = rmnet_descriptor_init(port);
|
||||
if (rc) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
|
||||
if (rc) {
|
||||
rc = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
/* hold on to real dev for MAP data */
|
||||
dev_hold(real_dev);
|
||||
|
||||
netdev_dbg(real_dev, "registered with rmnet\n");
|
||||
return 0;
|
||||
err:
|
||||
rmnet_descriptor_deinit(port);
|
||||
kfree(port);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void rmnet_unregister_bridge(struct net_device *dev,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_port *bridge_port;
|
||||
struct net_device *bridge_dev;
|
||||
|
||||
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
|
||||
return;
|
||||
|
||||
/* bridge slave handling */
|
||||
if (!port->nr_rmnet_devs) {
|
||||
bridge_dev = port->bridge_ep;
|
||||
|
||||
bridge_port = rmnet_get_port_rtnl(bridge_dev);
|
||||
bridge_port->bridge_ep = NULL;
|
||||
bridge_port->rmnet_mode = RMNET_EPMODE_VND;
|
||||
} else {
|
||||
bridge_dev = port->bridge_ep;
|
||||
|
||||
bridge_port = rmnet_get_port_rtnl(bridge_dev);
|
||||
rmnet_unregister_real_device(bridge_dev, bridge_port);
|
||||
}
|
||||
}
|
||||
|
||||
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net_device *real_dev;
|
||||
int mode = RMNET_EPMODE_VND;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
u32 data_format;
|
||||
int err = 0;
|
||||
u16 mux_id;
|
||||
|
||||
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!real_dev || !dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (!data[IFLA_RMNET_MUX_ID])
|
||||
return -EINVAL;
|
||||
|
||||
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
|
||||
if (!ep)
|
||||
return -ENOMEM;
|
||||
|
||||
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
|
||||
|
||||
err = rmnet_register_real_device(real_dev);
|
||||
if (err)
|
||||
goto err0;
|
||||
|
||||
port = rmnet_get_port_rtnl(real_dev);
|
||||
err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
port->rmnet_mode = mode;
|
||||
|
||||
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
|
||||
|
||||
if (data[IFLA_RMNET_FLAGS]) {
|
||||
struct ifla_rmnet_flags *flags;
|
||||
|
||||
flags = nla_data(data[IFLA_RMNET_FLAGS]);
|
||||
data_format = flags->flags & flags->mask;
|
||||
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
data_format |= RMNET_INGRESS_FORMAT_PS;
|
||||
port->data_format = data_format;
|
||||
}
|
||||
|
||||
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
|
||||
struct rmnet_egress_agg_params *agg_params;
|
||||
u8 state = RMNET_DEFAULT_AGG_STATE;
|
||||
|
||||
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
|
||||
if (data[IFLA_RMNET_UL_AGG_STATE_ID])
|
||||
state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
|
||||
|
||||
rmnet_map_update_ul_agg_config(&port->agg_state[state],
|
||||
agg_params->agg_size,
|
||||
agg_params->agg_count,
|
||||
agg_params->agg_features,
|
||||
agg_params->agg_time);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
err0:
|
||||
kfree(ep);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
u8 mux_id;
|
||||
|
||||
real_dev = priv->real_dev;
|
||||
|
||||
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
|
||||
return;
|
||||
|
||||
port = rmnet_get_port_rtnl(real_dev);
|
||||
|
||||
mux_id = rmnet_vnd_get_mux(dev);
|
||||
|
||||
ep = rmnet_get_endpoint(port, mux_id);
|
||||
if (ep) {
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
synchronize_rcu();
|
||||
rmnet_unregister_bridge(dev, port);
|
||||
rmnet_vnd_dellink(mux_id, port, ep);
|
||||
kfree(ep);
|
||||
}
|
||||
|
||||
if (!port->nr_rmnet_devs)
|
||||
qmi_rmnet_qmi_exit(port->qmi_info, port);
|
||||
|
||||
unregister_netdevice(dev);
|
||||
|
||||
qmi_rmnet_qos_exit_post();
|
||||
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
}
|
||||
|
||||
static void rmnet_force_unassociate_device(struct net_device *dev)
|
||||
{
|
||||
struct net_device *real_dev = dev;
|
||||
struct hlist_node *tmp_ep;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
unsigned long bkt_ep;
|
||||
LIST_HEAD(list);
|
||||
HLIST_HEAD(cleanup_list);
|
||||
|
||||
if (!rmnet_is_real_dev_registered(real_dev))
|
||||
return;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
port = rmnet_get_port_rtnl(dev);
|
||||
qmi_rmnet_qmi_exit(port->qmi_info, port);
|
||||
|
||||
rmnet_unregister_bridge(dev, port);
|
||||
|
||||
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
|
||||
unregister_netdevice_queue(ep->egress_dev, &list);
|
||||
rmnet_vnd_dellink(ep->mux_id, port, ep);
|
||||
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
hlist_add_head(&ep->hlnode, &cleanup_list);
|
||||
}
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
hlist_for_each_entry_safe(ep, tmp_ep, &cleanup_list, hlnode) {
|
||||
hlist_del(&ep->hlnode);
|
||||
kfree(ep);
|
||||
}
|
||||
|
||||
/* Unregistering devices in context before freeing port.
|
||||
* If this API becomes non-context their order should switch.
|
||||
*/
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
qmi_rmnet_qos_exit_post();
|
||||
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
}
|
||||
|
||||
static int rmnet_config_notify_cb(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(data);
|
||||
int rc;
|
||||
|
||||
if (!dev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_REGISTER:
|
||||
if (dev->rtnl_link_ops == &rmnet_link_ops) {
|
||||
rc = netdev_rx_handler_register(dev,
|
||||
rmnet_rx_priv_handler,
|
||||
NULL);
|
||||
|
||||
if (rc)
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
if (dev->rtnl_link_ops == &rmnet_link_ops) {
|
||||
netdev_rx_handler_unregister(dev);
|
||||
break;
|
||||
}
|
||||
|
||||
netdev_dbg(dev, "Kernel unregister\n");
|
||||
rmnet_force_unassociate_device(dev);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
rmnet_vnd_reset_mac_addr(dev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block rmnet_dev_notifier __read_mostly = {
|
||||
.notifier_call = rmnet_config_notify_cb,
|
||||
};
|
||||
|
||||
static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct rmnet_egress_agg_params *agg_params;
|
||||
u16 mux_id;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
if (data[IFLA_RMNET_MUX_ID]) {
|
||||
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
|
||||
if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
|
||||
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
|
||||
if (agg_params->agg_time < 1000000)
|
||||
return -EINVAL;
|
||||
|
||||
if (data[IFLA_RMNET_UL_AGG_STATE_ID]) {
|
||||
u8 state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
|
||||
|
||||
if (state >= RMNET_MAX_AGG_STATE)
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
u16 mux_id;
|
||||
u32 data_format;
|
||||
int rc = 0;
|
||||
|
||||
real_dev = __dev_get_by_index(dev_net(dev),
|
||||
nla_get_u32(tb[IFLA_LINK]));
|
||||
|
||||
if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
|
||||
return -ENODEV;
|
||||
|
||||
port = rmnet_get_port_rtnl(real_dev);
|
||||
|
||||
if (data[IFLA_RMNET_MUX_ID]) {
|
||||
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
|
||||
ep = rmnet_get_endpoint(port, priv->mux_id);
|
||||
if (!ep)
|
||||
return -ENODEV;
|
||||
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
|
||||
|
||||
ep->mux_id = mux_id;
|
||||
priv->mux_id = mux_id;
|
||||
}
|
||||
|
||||
if (data[IFLA_RMNET_FLAGS]) {
|
||||
struct ifla_rmnet_flags *flags;
|
||||
|
||||
flags = nla_data(data[IFLA_RMNET_FLAGS]);
|
||||
data_format = flags->flags & flags->mask;
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
data_format |= RMNET_INGRESS_FORMAT_PS;
|
||||
port->data_format = data_format;
|
||||
}
|
||||
|
||||
if (data[IFLA_RMNET_DFC_QOS]) {
|
||||
struct nlattr *qos = data[IFLA_RMNET_DFC_QOS];
|
||||
struct tcmsg *tcm;
|
||||
|
||||
tcm = nla_data(qos);
|
||||
rc = qmi_rmnet_change_link(dev, port, tcm, nla_len(qos));
|
||||
}
|
||||
|
||||
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
|
||||
struct rmnet_egress_agg_params *agg_params;
|
||||
u8 state = RMNET_DEFAULT_AGG_STATE;
|
||||
|
||||
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
|
||||
if (data[IFLA_RMNET_UL_AGG_STATE_ID])
|
||||
state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
|
||||
|
||||
rmnet_map_update_ul_agg_config(&port->agg_state[state],
|
||||
agg_params->agg_size,
|
||||
agg_params->agg_count,
|
||||
agg_params->agg_features,
|
||||
agg_params->agg_time);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static size_t rmnet_get_size(const struct net_device *dev)
|
||||
{
|
||||
return
|
||||
/* IFLA_RMNET_MUX_ID */
|
||||
nla_total_size(2) +
|
||||
/* IFLA_RMNET_FLAGS */
|
||||
nla_total_size(sizeof(struct ifla_rmnet_flags)) +
|
||||
/* IFLA_RMNET_DFC_QOS */
|
||||
nla_total_size(sizeof(struct tcmsg)) +
|
||||
/* IFLA_RMNET_UL_AGG_PARAMS */
|
||||
nla_total_size(sizeof(struct rmnet_egress_agg_params));
|
||||
}
|
||||
|
||||
static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
struct ifla_rmnet_flags f;
|
||||
struct rmnet_port *port = NULL;
|
||||
|
||||
real_dev = priv->real_dev;
|
||||
|
||||
if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (rmnet_is_real_dev_registered(real_dev)) {
|
||||
port = rmnet_get_port_rtnl(real_dev);
|
||||
f.flags = port->data_format;
|
||||
} else {
|
||||
f.flags = 0;
|
||||
}
|
||||
|
||||
f.mask = ~0;
|
||||
|
||||
if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (port) {
|
||||
struct rmnet_aggregation_state *state;
|
||||
|
||||
/* Only report default for now. The entire message type
|
||||
* would need to change to get both states in there (nested
|
||||
* messages, etc), since they both have the same NLA type...
|
||||
*/
|
||||
state = &port->agg_state[RMNET_DEFAULT_AGG_STATE];
|
||||
if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS,
|
||||
sizeof(state->params),
|
||||
&state->params))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
|
||||
.kind = "rmnet",
|
||||
.maxtype = __IFLA_RMNET_EXT_MAX - 1,
|
||||
.priv_size = sizeof(struct rmnet_priv),
|
||||
.setup = rmnet_vnd_setup,
|
||||
.validate = rmnet_rtnl_validate,
|
||||
.newlink = rmnet_newlink,
|
||||
.dellink = rmnet_dellink,
|
||||
.get_size = rmnet_get_size,
|
||||
.changelink = rmnet_changelink,
|
||||
.policy = rmnet_policy,
|
||||
.fill_info = rmnet_fill_info,
|
||||
};
|
||||
|
||||
/* Needs either rcu_read_lock() or rtnl lock */
|
||||
struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
|
||||
{
|
||||
if (rmnet_is_real_dev_registered(real_dev))
|
||||
return rcu_dereference_rtnl(real_dev->rx_handler_data);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_port);
|
||||
|
||||
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
|
||||
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
|
||||
if (ep->mux_id == mux_id)
|
||||
return ep;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_endpoint);
|
||||
|
||||
int rmnet_add_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
|
||||
struct net_device *real_dev = priv->real_dev;
|
||||
struct rmnet_port *port, *slave_port;
|
||||
int err;
|
||||
|
||||
port = rmnet_get_port(real_dev);
|
||||
|
||||
/* If there is more than one rmnet dev attached, its probably being
|
||||
* used for muxing. Skip the briding in that case
|
||||
*/
|
||||
if (port->nr_rmnet_devs > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (rmnet_is_real_dev_registered(slave_dev))
|
||||
return -EBUSY;
|
||||
|
||||
err = rmnet_register_real_device(slave_dev);
|
||||
if (err)
|
||||
return -EBUSY;
|
||||
|
||||
slave_port = rmnet_get_port(slave_dev);
|
||||
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
|
||||
slave_port->bridge_ep = real_dev;
|
||||
|
||||
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
|
||||
port->bridge_ep = slave_dev;
|
||||
|
||||
netdev_dbg(slave_dev, "registered with rmnet as slave\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rmnet_del_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
|
||||
struct net_device *real_dev = priv->real_dev;
|
||||
struct rmnet_port *port, *slave_port;
|
||||
|
||||
port = rmnet_get_port(real_dev);
|
||||
port->rmnet_mode = RMNET_EPMODE_VND;
|
||||
port->bridge_ep = NULL;
|
||||
|
||||
slave_port = rmnet_get_port(slave_dev);
|
||||
rmnet_unregister_real_device(slave_dev, slave_port);
|
||||
|
||||
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *rmnet_get_qmi_pt(void *port)
|
||||
{
|
||||
if (port)
|
||||
return ((struct rmnet_port *)port)->qmi_info;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_qmi_pt);
|
||||
|
||||
void *rmnet_get_qos_pt(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
|
||||
if (dev) {
|
||||
priv = netdev_priv(dev);
|
||||
return rcu_dereference(priv->qos_info);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_qos_pt);
|
||||
|
||||
void *rmnet_get_rmnet_port(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
|
||||
if (dev) {
|
||||
priv = netdev_priv(dev);
|
||||
return (void *)rmnet_get_port(priv->real_dev);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_rmnet_port);
|
||||
|
||||
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
|
||||
if (port) {
|
||||
ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
|
||||
if (ep)
|
||||
return ep->egress_dev;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_rmnet_dev);
|
||||
|
||||
void rmnet_reset_qmi_pt(void *port)
|
||||
{
|
||||
if (port)
|
||||
((struct rmnet_port *)port)->qmi_info = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_reset_qmi_pt);
|
||||
|
||||
void rmnet_init_qmi_pt(void *port, void *qmi)
|
||||
{
|
||||
if (port)
|
||||
((struct rmnet_port *)port)->qmi_info = qmi;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_init_qmi_pt);
|
||||
|
||||
void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct rmnet_priv *priv;
|
||||
struct rmnet_pcpu_stats *ps;
|
||||
unsigned int cpu, start;
|
||||
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (!port || !tx || !rx)
|
||||
return;
|
||||
|
||||
*tx = 0;
|
||||
*rx = 0;
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep, bkt, ep,
|
||||
hlnode) {
|
||||
dev = ep->egress_dev;
|
||||
if (!dev)
|
||||
continue;
|
||||
priv = netdev_priv(dev);
|
||||
for_each_possible_cpu(cpu) {
|
||||
ps = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&ps->syncp);
|
||||
*tx += ps->stats.tx_pkts;
|
||||
*rx += ps->stats.rx_pkts;
|
||||
} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_packets);
|
||||
|
||||
void rmnet_set_powersave_format(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return;
|
||||
((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_set_powersave_format);
|
||||
|
||||
void rmnet_clear_powersave_format(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return;
|
||||
((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_clear_powersave_format);
|
||||
|
||||
void rmnet_enable_all_flows(void *port)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (unlikely(!port))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
qmi_rmnet_enable_all_flows(ep->egress_dev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_enable_all_flows);
|
||||
|
||||
bool rmnet_all_flows_enabled(void *port)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
bool ret = true;
|
||||
|
||||
if (unlikely(!port))
|
||||
return true;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_all_flows_enabled);
|
||||
|
||||
void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
u8 current_num_bearers = 0;
|
||||
u8 number_bearers_left = 0;
|
||||
u8 num_bearers_in_out;
|
||||
|
||||
if (unlikely(!port || !num_bearers))
|
||||
return;
|
||||
|
||||
number_bearers_left = *num_bearers;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
num_bearers_in_out = number_bearers_left;
|
||||
qmi_rmnet_prepare_ps_bearers(ep->egress_dev,
|
||||
&num_bearers_in_out,
|
||||
bearer_id ? bearer_id +
|
||||
current_num_bearers : NULL);
|
||||
current_num_bearers += num_bearers_in_out;
|
||||
number_bearers_left -= num_bearers_in_out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
*num_bearers = current_num_bearers;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_prepare_ps_bearers);
|
||||
|
||||
int rmnet_get_powersave_notif(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return 0;
|
||||
return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_powersave_notif);
|
||||
|
||||
struct net_device *rmnet_get_real_dev(void *port)
|
||||
{
|
||||
if (port)
|
||||
return ((struct rmnet_port *)port)->dev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_real_dev);
|
||||
|
||||
int rmnet_get_dlmarker_info(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return 0;
|
||||
|
||||
return ((struct rmnet_port *)port)->data_format &
|
||||
(RMNET_INGRESS_FORMAT_DL_MARKER_V1 |
|
||||
RMNET_INGRESS_FORMAT_DL_MARKER_V2);
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_dlmarker_info);
|
||||
|
||||
/* Startup/Shutdown */
|
||||
|
||||
static int __init rmnet_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = register_netdevice_notifier(&rmnet_dev_notifier);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
rc = rtnl_link_register(&rmnet_link_ops);
|
||||
if (rc != 0) {
|
||||
unregister_netdevice_notifier(&rmnet_dev_notifier);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = rmnet_ll_init();
|
||||
if (rc != 0) {
|
||||
unregister_netdevice_notifier(&rmnet_dev_notifier);
|
||||
rtnl_link_unregister(&rmnet_link_ops);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rmnet_core_genl_init();
|
||||
|
||||
try_module_get(THIS_MODULE);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit rmnet_exit(void)
|
||||
{
|
||||
unregister_netdevice_notifier(&rmnet_dev_notifier);
|
||||
rtnl_link_unregister(&rmnet_link_ops);
|
||||
rmnet_ll_exit();
|
||||
rmnet_core_genl_deinit();
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
module_init(rmnet_init)
|
||||
module_exit(rmnet_exit)
|
||||
MODULE_LICENSE("GPL v2");
|
257
qcom/opensource/datarmnet/core/rmnet_config.h
Executable file
257
qcom/opensource/datarmnet/core/rmnet_config.h
Executable file
@ -0,0 +1,257 @@
|
||||
/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data configuration engine
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/gro_cells.h>
|
||||
|
||||
#ifndef _RMNET_CONFIG_H_
|
||||
#define _RMNET_CONFIG_H_
|
||||
|
||||
#define RMNET_MAX_LOGICAL_EP 255
|
||||
#define RMNET_MAX_VEID 16
|
||||
|
||||
#define RMNET_SHS_STMP_ALL BIT(0)
|
||||
#define RMNET_SHS_NO_PSH BIT(1)
|
||||
#define RMNET_SHS_NO_DLMKR BIT(2)
|
||||
|
||||
#define RMNET_LLM(prio) ((prio) == 0xDA001A) /* qmipriod */
|
||||
|
||||
#define RMNET_APS_MAJOR 0x9B6D
|
||||
#define RMNET_APS_LLC_MASK 0x0100
|
||||
#define RMNET_APS_LLB_MASK 0x0200
|
||||
|
||||
#define RMNET_APS_LLC(prio) \
|
||||
(((prio) >> 16 == RMNET_APS_MAJOR) && ((prio) & RMNET_APS_LLC_MASK))
|
||||
|
||||
#define RMNET_APS_LLB(prio) \
|
||||
(((prio) >> 16 == RMNET_APS_MAJOR) && ((prio) & RMNET_APS_LLB_MASK))
|
||||
|
||||
struct rmnet_shs_clnt_s {
|
||||
u16 config;
|
||||
u16 map_mask;
|
||||
u16 max_pkts;
|
||||
union {
|
||||
struct rmnet_port *port;
|
||||
} info;
|
||||
};
|
||||
|
||||
struct rmnet_endpoint {
|
||||
u8 mux_id;
|
||||
struct net_device *egress_dev;
|
||||
struct hlist_node hlnode;
|
||||
};
|
||||
|
||||
struct rmnet_agg_stats {
|
||||
u64 ul_agg_reuse;
|
||||
u64 ul_agg_alloc;
|
||||
};
|
||||
|
||||
struct rmnet_port_priv_stats {
|
||||
u64 dl_hdr_last_qmap_vers;
|
||||
u64 dl_hdr_last_ep_id;
|
||||
u64 dl_hdr_last_trans_id;
|
||||
u64 dl_hdr_last_seq;
|
||||
u64 dl_hdr_last_bytes;
|
||||
u64 dl_hdr_last_pkts;
|
||||
u64 dl_hdr_last_flows;
|
||||
u64 dl_hdr_count;
|
||||
u64 dl_hdr_total_bytes;
|
||||
u64 dl_hdr_total_pkts;
|
||||
u64 dl_trl_last_seq;
|
||||
u64 dl_trl_count;
|
||||
struct rmnet_agg_stats agg;
|
||||
u64 dl_chain_stat[7];
|
||||
u64 dl_frag_stat_1;
|
||||
u64 dl_frag_stat[5];
|
||||
u64 pb_marker_count;
|
||||
u64 pb_marker_seq;
|
||||
};
|
||||
|
||||
struct rmnet_egress_agg_params {
|
||||
u16 agg_size;
|
||||
u8 agg_count;
|
||||
u8 agg_features;
|
||||
u32 agg_time;
|
||||
};
|
||||
|
||||
enum {
|
||||
RMNET_DEFAULT_AGG_STATE,
|
||||
RMNET_LL_AGG_STATE,
|
||||
RMNET_MAX_AGG_STATE,
|
||||
};
|
||||
|
||||
struct rmnet_aggregation_state {
|
||||
struct rmnet_egress_agg_params params;
|
||||
struct timespec64 agg_time;
|
||||
struct timespec64 agg_last;
|
||||
struct hrtimer hrtimer;
|
||||
struct work_struct agg_wq;
|
||||
/* Protect aggregation related elements */
|
||||
spinlock_t agg_lock;
|
||||
struct sk_buff *agg_skb;
|
||||
int (*send_agg_skb)(struct sk_buff *skb);
|
||||
int agg_state;
|
||||
u8 agg_count;
|
||||
u8 agg_size_order;
|
||||
struct list_head agg_list;
|
||||
struct rmnet_agg_page *agg_head;
|
||||
struct rmnet_agg_stats *stats;
|
||||
};
|
||||
|
||||
|
||||
struct rmnet_agg_page {
|
||||
struct list_head list;
|
||||
struct page *page;
|
||||
};
|
||||
|
||||
|
||||
/* One instance of this structure is instantiated for each real_dev associated
|
||||
* with rmnet.
|
||||
*/
|
||||
struct rmnet_port {
|
||||
struct net_device *dev;
|
||||
u32 data_format;
|
||||
u8 nr_rmnet_devs;
|
||||
u8 rmnet_mode;
|
||||
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
|
||||
struct net_device *bridge_ep;
|
||||
void *rmnet_perf;
|
||||
|
||||
struct rmnet_aggregation_state agg_state[RMNET_MAX_AGG_STATE];
|
||||
|
||||
void *qmi_info;
|
||||
|
||||
/* dl marker elements */
|
||||
struct list_head dl_list;
|
||||
struct rmnet_port_priv_stats stats;
|
||||
int dl_marker_flush;
|
||||
/* Pending Byte Marker */
|
||||
struct list_head pb_list;
|
||||
/* Port Config for shs */
|
||||
struct rmnet_shs_clnt_s shs_cfg;
|
||||
struct rmnet_shs_clnt_s phy_shs_cfg;
|
||||
|
||||
/* Descriptor pool */
|
||||
spinlock_t desc_pool_lock;
|
||||
struct rmnet_frag_descriptor_pool *frag_desc_pool;
|
||||
};
|
||||
|
||||
extern struct rtnl_link_ops rmnet_link_ops;
|
||||
|
||||
struct rmnet_vnd_stats {
|
||||
u64 rx_pkts;
|
||||
u64 rx_bytes;
|
||||
u64 tx_pkts;
|
||||
u64 tx_bytes;
|
||||
u32 tx_drops;
|
||||
};
|
||||
|
||||
struct rmnet_pcpu_stats {
|
||||
struct rmnet_vnd_stats stats;
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
struct rmnet_coal_close_stats {
|
||||
u64 non_coal;
|
||||
u64 ip_miss;
|
||||
u64 trans_miss;
|
||||
u64 hw_nl;
|
||||
u64 hw_pkt;
|
||||
u64 hw_byte;
|
||||
u64 hw_time;
|
||||
u64 hw_evict;
|
||||
u64 coal;
|
||||
};
|
||||
|
||||
struct rmnet_coal_stats {
|
||||
u64 coal_rx;
|
||||
u64 coal_pkts;
|
||||
u64 coal_hdr_nlo_err;
|
||||
u64 coal_hdr_pkt_err;
|
||||
u64 coal_csum_err;
|
||||
u64 coal_reconstruct;
|
||||
u64 coal_ip_invalid;
|
||||
u64 coal_trans_invalid;
|
||||
struct rmnet_coal_close_stats close;
|
||||
u64 coal_veid[RMNET_MAX_VEID];
|
||||
u64 coal_tcp;
|
||||
u64 coal_tcp_bytes;
|
||||
u64 coal_udp;
|
||||
u64 coal_udp_bytes;
|
||||
};
|
||||
|
||||
struct rmnet_priv_stats {
|
||||
u64 csum_ok;
|
||||
u64 csum_valid_unset;
|
||||
u64 csum_validation_failed;
|
||||
u64 csum_err_bad_buffer;
|
||||
u64 csum_err_invalid_ip_version;
|
||||
u64 csum_err_invalid_transport;
|
||||
u64 csum_fragmented_pkt;
|
||||
u64 csum_skipped;
|
||||
u64 csum_sw;
|
||||
u64 csum_hw;
|
||||
struct rmnet_coal_stats coal;
|
||||
u64 ul_prio;
|
||||
u64 tso_pkts;
|
||||
u64 tso_arriv_errs;
|
||||
u64 tso_segment_success;
|
||||
u64 tso_segment_fail;
|
||||
u64 tso_segment_skip;
|
||||
u64 ll_tso_segs;
|
||||
u64 ll_tso_errs;
|
||||
u64 aps_prio;
|
||||
};
|
||||
|
||||
struct rmnet_priv {
|
||||
u8 mux_id;
|
||||
struct net_device *real_dev;
|
||||
struct rmnet_pcpu_stats __percpu *pcpu_stats;
|
||||
struct gro_cells gro_cells;
|
||||
struct rmnet_priv_stats stats;
|
||||
void __rcu *qos_info;
|
||||
char aps_cb[16];
|
||||
};
|
||||
|
||||
enum rmnet_dl_marker_prio {
|
||||
RMNET_PERF,
|
||||
RMNET_SHS,
|
||||
};
|
||||
|
||||
enum rmnet_trace_func {
|
||||
RMNET_MODULE,
|
||||
NW_STACK_MODULE,
|
||||
};
|
||||
|
||||
enum rmnet_trace_evt {
|
||||
RMNET_DLVR_SKB,
|
||||
RMNET_RCV_FROM_PND,
|
||||
RMNET_TX_UL_PKT,
|
||||
NW_STACK_DEV_Q_XMIT,
|
||||
NW_STACK_NAPI_GRO_FLUSH,
|
||||
NW_STACK_RX,
|
||||
NW_STACK_TX,
|
||||
};
|
||||
|
||||
int rmnet_is_real_dev_registered(const struct net_device *real_dev);
|
||||
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
|
||||
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
|
||||
int rmnet_add_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev,
|
||||
struct netlink_ext_ack *extack);
|
||||
int rmnet_del_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev);
|
||||
#endif /* _RMNET_CONFIG_H_ */
|
42
qcom/opensource/datarmnet/core/rmnet_ctl.h
Normal file
42
qcom/opensource/datarmnet/core/rmnet_ctl.h
Normal file
@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* RMNET_CTL header
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_CTL_H_
|
||||
#define _RMNET_CTL_H_
|
||||
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
enum rmnet_ctl_log_lvl {
|
||||
RMNET_CTL_LOG_CRIT,
|
||||
RMNET_CTL_LOG_ERR,
|
||||
RMNET_CTL_LOG_INFO,
|
||||
RMNET_CTL_LOG_DEBUG,
|
||||
};
|
||||
|
||||
struct rmnet_ctl_client_hooks {
|
||||
void (*ctl_dl_client_hook)(struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct rmnet_ctl_client_if {
|
||||
void * (*reg)(struct rmnet_ctl_client_hooks *hook);
|
||||
int (*dereg)(void *handle);
|
||||
int (*send)(void *handle, struct sk_buff *skb);
|
||||
void (*log)(enum rmnet_ctl_log_lvl lvl, const char *msg, int rc,
|
||||
const void *data, unsigned int len);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RMNET_LA_PLATFORM
|
||||
struct rmnet_ctl_client_if *rmnet_ctl_if(void);
|
||||
int rmnet_ctl_get_stats(u64 *s, int n);
|
||||
#else
|
||||
static inline struct rmnet_ctl_client_if *rmnet_ctl_if(void) {return NULL;};
|
||||
static inline int rmnet_ctl_get_stats(u64 *s, int n) {return 0;};
|
||||
#endif /* CONFIG_RMNET_LA_PLATFORM */
|
||||
|
||||
#endif /* _RMNET_CTL_H_ */
|
242
qcom/opensource/datarmnet/core/rmnet_ctl_client.c
Normal file
242
qcom/opensource/datarmnet/core/rmnet_ctl_client.c
Normal file
@ -0,0 +1,242 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* RMNET_CTL client handlers
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ipc_logging.h>
|
||||
#include <linux/version.h>
|
||||
#include "rmnet_ctl.h"
|
||||
#include "rmnet_ctl_client.h"
|
||||
|
||||
#define RMNET_CTL_LOG_PAGE 10
|
||||
#define RMNET_CTL_LOG_NAME "rmnet_ctl"
|
||||
#define RMNET_CTL_LOG_LVL "ipc_log_lvl"
|
||||
|
||||
struct rmnet_ctl_client {
|
||||
struct rmnet_ctl_client_hooks hooks;
|
||||
};
|
||||
|
||||
struct rmnet_ctl_endpoint {
|
||||
struct rmnet_ctl_dev __rcu *dev;
|
||||
struct rmnet_ctl_client __rcu *client;
|
||||
struct dentry *dbgfs_dir;
|
||||
struct dentry *dbgfs_loglvl;
|
||||
void *ipc_log;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_IPA_DEBUG) || defined(CONFIG_MHI_DEBUG)
|
||||
#define CONFIG_RMNET_CTL_DEBUG 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RMNET_CTL_DEBUG
|
||||
static u8 ipc_log_lvl = RMNET_CTL_LOG_DEBUG;
|
||||
#else
|
||||
static u8 ipc_log_lvl = RMNET_CTL_LOG_ERR;
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(client_lock);
|
||||
static struct rmnet_ctl_endpoint ctl_ep;
|
||||
|
||||
void rmnet_ctl_set_dbgfs(bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
if (IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
|
||||
ctl_ep.dbgfs_dir = debugfs_create_dir(
|
||||
RMNET_CTL_LOG_NAME, NULL);
|
||||
|
||||
if (!IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
|
||||
debugfs_create_u8((const char *) RMNET_CTL_LOG_LVL,
|
||||
(umode_t) 0644,
|
||||
(struct dentry *) ctl_ep.dbgfs_dir,
|
||||
(u8 *) &ipc_log_lvl);
|
||||
|
||||
if (!ctl_ep.ipc_log)
|
||||
ctl_ep.ipc_log = ipc_log_context_create(
|
||||
RMNET_CTL_LOG_PAGE, RMNET_CTL_LOG_NAME, 0);
|
||||
} else {
|
||||
debugfs_remove_recursive(ctl_ep.dbgfs_dir);
|
||||
ipc_log_context_destroy(ctl_ep.ipc_log);
|
||||
ctl_ep.dbgfs_dir = NULL;
|
||||
ctl_ep.dbgfs_loglvl = NULL;
|
||||
ctl_ep.ipc_log = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
|
||||
{
|
||||
rcu_assign_pointer(ctl_ep.dev, dev);
|
||||
}
|
||||
|
||||
void rmnet_ctl_endpoint_post(const void *data, size_t len)
|
||||
{
|
||||
struct rmnet_ctl_client *client;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (unlikely(!data || !len))
|
||||
return;
|
||||
|
||||
if (len == 0xFFFFFFFF) {
|
||||
skb = (struct sk_buff *)data;
|
||||
rmnet_ctl_log_info("RX", skb->data, skb->len);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
client = rcu_dereference(ctl_ep.client);
|
||||
if (client && client->hooks.ctl_dl_client_hook) {
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
client->hooks.ctl_dl_client_hook(skb);
|
||||
} else {
|
||||
kfree(skb);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
rmnet_ctl_log_info("RX", data, len);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
client = rcu_dereference(ctl_ep.client);
|
||||
if (client && client->hooks.ctl_dl_client_hook) {
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (skb) {
|
||||
skb_put_data(skb, data, len);
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
client->hooks.ctl_dl_client_hook(skb);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
|
||||
{
|
||||
struct rmnet_ctl_client *client;
|
||||
|
||||
if (!hook)
|
||||
return NULL;
|
||||
|
||||
client = kzalloc(sizeof(*client), GFP_KERNEL);
|
||||
if (!client)
|
||||
return NULL;
|
||||
client->hooks = *hook;
|
||||
|
||||
spin_lock(&client_lock);
|
||||
|
||||
/* Only support one client for now */
|
||||
if (rcu_dereference(ctl_ep.client)) {
|
||||
spin_unlock(&client_lock);
|
||||
kfree(client);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_assign_pointer(ctl_ep.client, client);
|
||||
|
||||
spin_unlock(&client_lock);
|
||||
|
||||
rmnet_ctl_set_dbgfs(true);
|
||||
|
||||
return client;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_register_client);
|
||||
|
||||
int rmnet_ctl_unregister_client(void *handle)
|
||||
{
|
||||
struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
|
||||
|
||||
spin_lock(&client_lock);
|
||||
|
||||
if (rcu_dereference(ctl_ep.client) != client) {
|
||||
spin_unlock(&client_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(ctl_ep.client, NULL);
|
||||
|
||||
spin_unlock(&client_lock);
|
||||
|
||||
synchronize_rcu();
|
||||
kfree(client);
|
||||
|
||||
rmnet_ctl_set_dbgfs(false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_unregister_client);
|
||||
|
||||
int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
|
||||
struct rmnet_ctl_dev *dev;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (client != rcu_dereference(ctl_ep.client)) {
|
||||
kfree_skb(skb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rmnet_ctl_log_info("TX", skb->data, skb->len);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
dev = rcu_dereference(ctl_ep.dev);
|
||||
if (dev && dev->xmit)
|
||||
rc = dev->xmit(dev, skb);
|
||||
else
|
||||
kfree_skb(skb);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (rc)
|
||||
rmnet_ctl_log_err("TXE", rc, NULL, 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_send_client);
|
||||
|
||||
void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
|
||||
int rc, const void *data, unsigned int len)
|
||||
{
|
||||
if (lvl <= ipc_log_lvl && ctl_ep.ipc_log) {
|
||||
if (data == NULL || len == 0)
|
||||
ipc_log_string(ctl_ep.ipc_log, "%3s(%d): (null)\n",
|
||||
msg, rc);
|
||||
else
|
||||
ipc_log_string(ctl_ep.ipc_log, "%3s(%d): %*ph\n",
|
||||
msg, rc, len > 32 ? 32 : len, data);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_log);
|
||||
|
||||
static struct rmnet_ctl_client_if client_if = {
|
||||
.reg = rmnet_ctl_register_client,
|
||||
.dereg = rmnet_ctl_unregister_client,
|
||||
.send = rmnet_ctl_send_client,
|
||||
.log = rmnet_ctl_log,
|
||||
};
|
||||
|
||||
struct rmnet_ctl_client_if *rmnet_ctl_if(void)
|
||||
{
|
||||
return &client_if;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_if);
|
||||
|
||||
int rmnet_ctl_get_stats(u64 *s, int n)
|
||||
{
|
||||
struct rmnet_ctl_dev *dev;
|
||||
|
||||
rcu_read_lock();
|
||||
dev = rcu_dereference(ctl_ep.dev);
|
||||
if (dev && n > 0) {
|
||||
n = min(n, (int)(sizeof(dev->stats) / sizeof(u64)));
|
||||
memcpy(s, &dev->stats, n * sizeof(u64));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_ctl_get_stats);
|
43
qcom/opensource/datarmnet/core/rmnet_ctl_client.h
Normal file
43
qcom/opensource/datarmnet/core/rmnet_ctl_client.h
Normal file
@ -0,0 +1,43 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET_CTL client handlers
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_CTL_CLIENT_H_
|
||||
#define _RMNET_CTL_CLIENT_H_
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include "rmnet_ctl.h"
|
||||
|
||||
void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
|
||||
int rc, const void *data, unsigned int len);
|
||||
|
||||
#define rmnet_ctl_log_err(msg, rc, data, len) \
|
||||
rmnet_ctl_log(RMNET_CTL_LOG_ERR, msg, rc, data, len)
|
||||
|
||||
#define rmnet_ctl_log_info(msg, data, len) \
|
||||
rmnet_ctl_log(RMNET_CTL_LOG_INFO, msg, 0, data, len)
|
||||
|
||||
#define rmnet_ctl_log_debug(msg, data, len) \
|
||||
rmnet_ctl_log(RMNET_CTL_LOG_DEBUG, msg, 0, data, len)
|
||||
|
||||
struct rmnet_ctl_stats {
|
||||
u64 rx_pkts;
|
||||
u64 rx_err;
|
||||
u64 tx_pkts;
|
||||
u64 tx_err;
|
||||
u64 tx_complete;
|
||||
};
|
||||
|
||||
struct rmnet_ctl_dev {
|
||||
int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
|
||||
struct rmnet_ctl_stats stats;
|
||||
};
|
||||
|
||||
void rmnet_ctl_endpoint_post(const void *data, size_t len);
|
||||
void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
|
||||
void rmnet_ctl_set_dbgfs(bool enable);
|
||||
|
||||
#endif /* _RMNET_CTL_CLIENT_H_ */
|
113
qcom/opensource/datarmnet/core/rmnet_ctl_ipa.c
Normal file
113
qcom/opensource/datarmnet/core/rmnet_ctl_ipa.c
Normal file
@ -0,0 +1,113 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET_CTL mhi handler
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ipa.h>
|
||||
#include "rmnet_ctl.h"
|
||||
#include "rmnet_ctl_client.h"
|
||||
|
||||
struct rmnet_ctl_ipa_dev {
|
||||
struct rmnet_ctl_dev dev;
|
||||
spinlock_t rx_lock; /* rx lock */
|
||||
spinlock_t tx_lock; /* tx lock */
|
||||
};
|
||||
|
||||
static struct rmnet_ctl_ipa_dev ctl_ipa_dev;
|
||||
static bool rmnet_ctl_ipa_registered;
|
||||
|
||||
static int rmnet_ctl_send_ipa(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ctl_ipa_dev *ctl_dev = container_of(
|
||||
dev, struct rmnet_ctl_ipa_dev, dev);
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&ctl_dev->tx_lock);
|
||||
|
||||
rc = ipa_rmnet_ctl_xmit(skb);
|
||||
if (rc)
|
||||
dev->stats.tx_err++;
|
||||
else
|
||||
dev->stats.tx_pkts++;
|
||||
|
||||
spin_unlock_bh(&ctl_dev->tx_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void rmnet_ctl_dl_callback(void *user_data, void *rx_data)
|
||||
{
|
||||
struct rmnet_ctl_ipa_dev *ctl_dev = user_data;
|
||||
|
||||
ctl_dev->dev.stats.rx_pkts++;
|
||||
rmnet_ctl_endpoint_post(rx_data, 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
static void rmnet_ctl_probe(void *user_data)
|
||||
{
|
||||
memset(&ctl_ipa_dev, 0, sizeof(ctl_ipa_dev));
|
||||
|
||||
spin_lock_init(&ctl_ipa_dev.rx_lock);
|
||||
spin_lock_init(&ctl_ipa_dev.tx_lock);
|
||||
|
||||
ctl_ipa_dev.dev.xmit = rmnet_ctl_send_ipa;
|
||||
rmnet_ctl_endpoint_setdev(&ctl_ipa_dev.dev);
|
||||
|
||||
pr_info("rmnet_ctl driver probed\n");
|
||||
}
|
||||
|
||||
static void rmnet_ctl_remove(void *user_data)
|
||||
{
|
||||
rmnet_ctl_endpoint_setdev(NULL);
|
||||
|
||||
pr_info("rmnet_ctl driver removed\n");
|
||||
}
|
||||
|
||||
static void rmnet_ctl_ipa_ready(void *user_data)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ipa_register_rmnet_ctl_cb(
|
||||
rmnet_ctl_probe,
|
||||
&ctl_ipa_dev,
|
||||
rmnet_ctl_remove,
|
||||
&ctl_ipa_dev,
|
||||
rmnet_ctl_dl_callback,
|
||||
&ctl_ipa_dev);
|
||||
|
||||
if (rc)
|
||||
pr_err("%s: %d\n", __func__, rc);
|
||||
else
|
||||
rmnet_ctl_ipa_registered = true;
|
||||
}
|
||||
|
||||
static int __init rmnet_ctl_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ipa_register_ipa_ready_cb(rmnet_ctl_ipa_ready, NULL);
|
||||
if (rc == -EEXIST)
|
||||
rmnet_ctl_ipa_ready(NULL);
|
||||
else if (rc)
|
||||
pr_err("%s: %d\n", __func__, rc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit rmnet_ctl_exit(void)
|
||||
{
|
||||
if (rmnet_ctl_ipa_registered) {
|
||||
ipa_unregister_rmnet_ctl_cb();
|
||||
rmnet_ctl_ipa_registered = false;
|
||||
}
|
||||
}
|
||||
|
||||
module_init(rmnet_ctl_init)
|
||||
module_exit(rmnet_ctl_exit)
|
||||
|
||||
MODULE_DESCRIPTION("RmNet control IPA Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
222
qcom/opensource/datarmnet/core/rmnet_ctl_mhi.c
Normal file
222
qcom/opensource/datarmnet/core/rmnet_ctl_mhi.c
Normal file
@ -0,0 +1,222 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET_CTL mhi handler
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/mhi.h>
|
||||
#include "rmnet_ctl.h"
|
||||
#include "rmnet_ctl_client.h"
|
||||
|
||||
#define RMNET_CTL_DEFAULT_MRU 256
|
||||
|
||||
struct rmnet_ctl_mhi_dev {
|
||||
struct mhi_device *mhi_dev;
|
||||
struct rmnet_ctl_dev dev;
|
||||
u32 mru;
|
||||
spinlock_t rx_lock; /* rx lock */
|
||||
spinlock_t tx_lock; /* tx lock */
|
||||
atomic_t in_reset;
|
||||
};
|
||||
|
||||
static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
|
||||
dev, struct rmnet_ctl_mhi_dev, dev);
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&ctl_dev->tx_lock);
|
||||
|
||||
rc = mhi_queue_transfer(ctl_dev->mhi_dev,
|
||||
DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
|
||||
if (rc)
|
||||
dev->stats.tx_err++;
|
||||
else
|
||||
dev->stats.tx_pkts++;
|
||||
|
||||
spin_unlock_bh(&ctl_dev->tx_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
|
||||
gfp_t gfp, void *free_buf)
|
||||
{
|
||||
struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
|
||||
void *buf;
|
||||
int no_tre, i, rc;
|
||||
|
||||
no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
|
||||
|
||||
if (!no_tre && free_buf) {
|
||||
kfree(free_buf);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < no_tre; i++) {
|
||||
if (free_buf) {
|
||||
buf = free_buf;
|
||||
free_buf = NULL;
|
||||
} else {
|
||||
buf = kmalloc(ctl_dev->mru, gfp);
|
||||
}
|
||||
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&ctl_dev->rx_lock);
|
||||
rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
|
||||
buf, ctl_dev->mru, MHI_EOT);
|
||||
spin_unlock_bh(&ctl_dev->rx_lock);
|
||||
|
||||
if (rc) {
|
||||
kfree(buf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *mhi_res)
|
||||
{
|
||||
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
|
||||
|
||||
if (mhi_res->transaction_status == -ENOTCONN) {
|
||||
kfree(mhi_res->buf_addr);
|
||||
return;
|
||||
} else if (mhi_res->transaction_status ||
|
||||
!mhi_res->buf_addr || !mhi_res->bytes_xferd) {
|
||||
rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
|
||||
ctl_dev->dev.stats.rx_err++;
|
||||
} else {
|
||||
ctl_dev->dev.stats.rx_pkts++;
|
||||
rmnet_ctl_endpoint_post(mhi_res->buf_addr,
|
||||
mhi_res->bytes_xferd);
|
||||
}
|
||||
|
||||
/* Re-supply receive buffers */
|
||||
rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
|
||||
}
|
||||
|
||||
static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *mhi_res)
|
||||
{
|
||||
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
|
||||
struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
|
||||
|
||||
if (skb) {
|
||||
if (mhi_res->transaction_status) {
|
||||
rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
|
||||
skb->data, skb->len);
|
||||
ctl_dev->dev.stats.tx_err++;
|
||||
} else {
|
||||
rmnet_ctl_log_debug("TXC", skb->data, skb->len);
|
||||
ctl_dev->dev.stats.tx_complete++;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
|
||||
enum MHI_CB mhi_cb)
|
||||
{
|
||||
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
|
||||
|
||||
if (mhi_cb != MHI_CB_FATAL_ERROR)
|
||||
return;
|
||||
|
||||
atomic_inc(&ctl_dev->in_reset);
|
||||
}
|
||||
|
||||
static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
|
||||
const struct mhi_device_id *id)
|
||||
{
|
||||
struct rmnet_ctl_mhi_dev *ctl_dev;
|
||||
struct device_node *of_node = mhi_dev->dev.of_node;
|
||||
int rc;
|
||||
|
||||
ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
|
||||
if (!ctl_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
ctl_dev->mhi_dev = mhi_dev;
|
||||
ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
|
||||
|
||||
spin_lock_init(&ctl_dev->rx_lock);
|
||||
spin_lock_init(&ctl_dev->tx_lock);
|
||||
atomic_set(&ctl_dev->in_reset, 0);
|
||||
dev_set_drvdata(&mhi_dev->dev, ctl_dev);
|
||||
|
||||
rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
|
||||
if (rc || !ctl_dev->mru)
|
||||
ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
|
||||
|
||||
rc = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Post receive buffers */
|
||||
rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
|
||||
|
||||
rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
|
||||
|
||||
pr_info("rmnet_ctl driver probed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
|
||||
{
|
||||
rmnet_ctl_endpoint_setdev(NULL);
|
||||
synchronize_rcu();
|
||||
dev_set_drvdata(&mhi_dev->dev, NULL);
|
||||
|
||||
pr_info("rmnet_ctl driver removed\n");
|
||||
}
|
||||
|
||||
static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
|
||||
{ .chan = "RMNET_CTL" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct mhi_driver rmnet_ctl_driver = {
|
||||
.probe = rmnet_ctl_probe,
|
||||
.remove = rmnet_ctl_remove,
|
||||
.dl_xfer_cb = rmnet_ctl_dl_callback,
|
||||
.ul_xfer_cb = rmnet_ctl_ul_callback,
|
||||
.status_cb = rmnet_ctl_status_callback,
|
||||
.id_table = rmnet_ctl_mhi_match,
|
||||
.driver = {
|
||||
.name = "rmnet_ctl",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init rmnet_ctl_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = mhi_driver_register(&rmnet_ctl_driver);
|
||||
rmnet_ctl_set_dbgfs(true);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit rmnet_ctl_exit(void)
|
||||
{
|
||||
mhi_driver_unregister(&rmnet_ctl_driver);
|
||||
rmnet_ctl_set_dbgfs(false);
|
||||
}
|
||||
|
||||
module_init(rmnet_ctl_init)
|
||||
module_exit(rmnet_ctl_exit)
|
||||
|
||||
MODULE_DESCRIPTION("RmNet Control MHI Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
2061
qcom/opensource/datarmnet/core/rmnet_descriptor.c
Normal file
2061
qcom/opensource/datarmnet/core/rmnet_descriptor.c
Normal file
File diff suppressed because it is too large
Load Diff
119
qcom/opensource/datarmnet/core/rmnet_descriptor.h
Normal file
119
qcom/opensource/datarmnet/core/rmnet_descriptor.h
Normal file
@ -0,0 +1,119 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Packet Descriptor Framework
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_DESCRIPTOR_H_
|
||||
#define _RMNET_DESCRIPTOR_H_
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_map.h"
|
||||
|
||||
struct rmnet_frag_descriptor_pool {
|
||||
struct list_head free_list;
|
||||
u32 pool_size;
|
||||
};
|
||||
|
||||
struct rmnet_fragment {
|
||||
struct list_head list;
|
||||
skb_frag_t frag;
|
||||
};
|
||||
|
||||
struct rmnet_frag_descriptor {
|
||||
struct list_head list;
|
||||
struct list_head frags;
|
||||
struct net_device *dev;
|
||||
u32 coal_bufsize;
|
||||
u32 coal_bytes;
|
||||
u32 len;
|
||||
u32 hash;
|
||||
u32 priority;
|
||||
__be32 tcp_seq;
|
||||
__be16 ip_id;
|
||||
__be16 tcp_flags;
|
||||
u16 data_offset;
|
||||
u16 gso_size;
|
||||
u16 gso_segs;
|
||||
u16 ip_len;
|
||||
u16 trans_len;
|
||||
u8 ip_proto;
|
||||
u8 trans_proto;
|
||||
u8 pkt_id;
|
||||
u8 csum_valid:1,
|
||||
hdrs_valid:1,
|
||||
ip_id_set:1,
|
||||
tcp_seq_set:1,
|
||||
flush_shs:1,
|
||||
tcp_flags_set:1,
|
||||
reserved:2;
|
||||
};
|
||||
|
||||
/* Descriptor management */
|
||||
struct rmnet_frag_descriptor *
|
||||
rmnet_get_frag_descriptor(struct rmnet_port *port);
|
||||
void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port);
|
||||
void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, unsigned int size);
|
||||
void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, unsigned int size);
|
||||
void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
|
||||
u32 len, void *buf);
|
||||
int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct page *p, u32 page_offset, u32 len);
|
||||
int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
|
||||
struct rmnet_frag_descriptor *from,
|
||||
u32 off, u32 len);
|
||||
int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
|
||||
int start, u8 *nexthdrp, __be16 *frag_offp,
|
||||
bool *frag_hdrp);
|
||||
|
||||
/* QMAP command packets */
|
||||
void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_map_header *qmap, struct rmnet_port *port);
|
||||
int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, u16 pkt_len);
|
||||
|
||||
/* Ingress data handlers */
|
||||
void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
|
||||
struct list_head *list, u32 priority);
|
||||
void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port);
|
||||
int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port,
|
||||
struct list_head *list,
|
||||
u16 len);
|
||||
void rmnet_frag_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
|
||||
int rmnet_descriptor_init(struct rmnet_port *port);
|
||||
void rmnet_descriptor_deinit(struct rmnet_port *port);
|
||||
|
||||
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
|
||||
{
|
||||
struct rmnet_fragment *frag;
|
||||
|
||||
frag = list_first_entry_or_null(&frag_desc->frags,
|
||||
struct rmnet_fragment, list);
|
||||
|
||||
if (!frag)
|
||||
return NULL;
|
||||
|
||||
return skb_frag_address(&frag->frag);
|
||||
}
|
||||
|
||||
#endif /* _RMNET_DESCRIPTOR_H_ */
|
463
qcom/opensource/datarmnet/core/rmnet_genl.c
Normal file
463
qcom/opensource/datarmnet/core/rmnet_genl.c
Normal file
@ -0,0 +1,463 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET Data Generic Netlink
|
||||
*
|
||||
*/
|
||||
|
||||
#include "rmnet_genl.h"
|
||||
#include <net/sock.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#define RMNET_CORE_GENL_MAX_STR_LEN 255
|
||||
|
||||
/* Static Functions and Definitions */
|
||||
static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
|
||||
1] = {
|
||||
[RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
|
||||
[RMNET_CORE_GENL_ATTR_PID_BPS] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_bps_resp)),
|
||||
[RMNET_CORE_GENL_ATTR_PID_BOOST] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_boost_req)),
|
||||
[RMNET_CORE_GENL_ATTR_TETHER_INFO] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_tether_info_req)),
|
||||
[RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len =
|
||||
RMNET_CORE_GENL_MAX_STR_LEN },
|
||||
};
|
||||
|
||||
#define RMNET_CORE_GENL_OP(_cmd, _func) \
|
||||
{ \
|
||||
.cmd = _cmd, \
|
||||
.doit = _func, \
|
||||
.dumpit = NULL, \
|
||||
.flags = 0, \
|
||||
}
|
||||
|
||||
static const struct genl_ops rmnet_core_genl_ops[] = {
|
||||
RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
|
||||
rmnet_core_genl_pid_bps_req_hdlr),
|
||||
RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
|
||||
rmnet_core_genl_pid_boost_req_hdlr),
|
||||
RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_TETHER_INFO_REQ,
|
||||
rmnet_core_genl_tether_info_req_hdlr),
|
||||
};
|
||||
|
||||
struct genl_family rmnet_core_genl_family = {
|
||||
.hdrsize = 0,
|
||||
.name = RMNET_CORE_GENL_FAMILY_NAME,
|
||||
.version = RMNET_CORE_GENL_VERSION,
|
||||
.maxattr = RMNET_CORE_GENL_ATTR_MAX,
|
||||
.policy = rmnet_genl_attr_policy,
|
||||
.ops = rmnet_core_genl_ops,
|
||||
.n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
|
||||
};
|
||||
|
||||
#define RMNET_PID_STATS_HT_SIZE (8)
|
||||
#define RMNET_PID_STATS_HT rmnet_pid_ht
|
||||
DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
|
||||
|
||||
/* Spinlock definition for pid hash table */
|
||||
static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
|
||||
|
||||
#define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
|
||||
#define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
|
||||
#define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
|
||||
#define RMNET_GENL_NSEC_TO_SEC(x) ({\
|
||||
u64 __quotient = (x); \
|
||||
do_div(__quotient, 1000000000); \
|
||||
__quotient; \
|
||||
})
|
||||
|
||||
int rmnet_core_userspace_connected;
|
||||
#define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
|
||||
|
||||
struct rmnet_pid_node_s {
|
||||
struct hlist_node list;
|
||||
ktime_t timstamp_last_query;
|
||||
u64 tx_bytes;
|
||||
u64 tx_bytes_last_query;
|
||||
u64 tx_bps;
|
||||
u64 sched_boost_period_ms;
|
||||
int sched_boost_remaining_ms;
|
||||
int sched_boost_enable;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
typedef void (*rmnet_perf_tether_cmd_hook_t)(u8 message, u64 val);
|
||||
rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd_hook __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_perf_tether_cmd_hook);
|
||||
|
||||
void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
|
||||
int *boost_enable, u64 *boost_period)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct rmnet_pid_node_s *node_p;
|
||||
unsigned long ht_flags;
|
||||
u8 is_match_found = 0;
|
||||
u64 tx_bytes = 0;
|
||||
|
||||
*boost_enable = 0;
|
||||
*boost_period = 0;
|
||||
|
||||
/* Using do while to spin lock and unlock only once */
|
||||
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
|
||||
do {
|
||||
hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
|
||||
list, pid) {
|
||||
if (pid != node_p->pid)
|
||||
continue;
|
||||
|
||||
/* PID Match found */
|
||||
is_match_found = 1;
|
||||
node_p->tx_bytes += len;
|
||||
tx_bytes = node_p->tx_bytes;
|
||||
|
||||
if (node_p->sched_boost_enable) {
|
||||
rm_err("boost triggered for pid %d",
|
||||
pid);
|
||||
/* Just triggered boost, dont re-trigger */
|
||||
node_p->sched_boost_enable = 0;
|
||||
*boost_enable = 1;
|
||||
*boost_period = node_p->sched_boost_period_ms;
|
||||
node_p->sched_boost_remaining_ms =
|
||||
(int)*boost_period;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_match_found)
|
||||
break;
|
||||
|
||||
/* No PID match */
|
||||
node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
|
||||
if (!node_p)
|
||||
break;
|
||||
|
||||
node_p->pid = pid;
|
||||
node_p->tx_bytes = len;
|
||||
node_p->sched_boost_enable = 0;
|
||||
node_p->sched_boost_period_ms = 0;
|
||||
node_p->sched_boost_remaining_ms = 0;
|
||||
hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
|
||||
break;
|
||||
} while (0);
|
||||
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
|
||||
}
|
||||
|
||||
void rmnet_boost_for_pid(pid_t pid, int boost_enable,
|
||||
u64 boost_period)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct rmnet_pid_node_s *node_p;
|
||||
unsigned long ht_flags;
|
||||
|
||||
/* Using do while to spin lock and unlock only once */
|
||||
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
|
||||
do {
|
||||
hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
|
||||
list, pid) {
|
||||
if (pid != node_p->pid)
|
||||
continue;
|
||||
|
||||
/* PID Match found */
|
||||
rm_err("CORE_BOOST: enable boost for pid %d for %llu ms",
|
||||
pid, boost_period);
|
||||
node_p->sched_boost_enable = boost_enable;
|
||||
node_p->sched_boost_period_ms = boost_period;
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
} while (0);
|
||||
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
|
||||
}
|
||||
|
||||
static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
|
||||
*pid_bps_resp_ptr)
|
||||
{
|
||||
struct timespec64 time;
|
||||
struct hlist_node *tmp;
|
||||
struct rmnet_pid_node_s *node_p;
|
||||
unsigned long ht_flags;
|
||||
u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
|
||||
int i;
|
||||
u16 bkt;
|
||||
|
||||
ktime_get_real_ts64(&time);
|
||||
pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
|
||||
time.tv_nsec;
|
||||
|
||||
/* Using do while to spin lock and unlock only once */
|
||||
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
|
||||
do {
|
||||
i = 0;
|
||||
|
||||
hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
|
||||
node_p, list) {
|
||||
tx_bytes_cur = node_p->tx_bytes;
|
||||
if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
|
||||
/* Dont send inactive pids to userspace */
|
||||
hash_del(&node_p->list);
|
||||
kfree(node_p);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Compute bits per second */
|
||||
byte_diff = (node_p->tx_bytes -
|
||||
node_p->tx_bytes_last_query);
|
||||
time_diff_ns = (pid_bps_resp_ptr->timestamp -
|
||||
node_p->timstamp_last_query);
|
||||
|
||||
tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
|
||||
/* Note that do_div returns remainder and the */
|
||||
/* numerator gets assigned the quotient */
|
||||
/* Since do_div takes the numerator as a reference, */
|
||||
/* a tmp_bits is used*/
|
||||
do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
|
||||
node_p->tx_bps = tmp_bits;
|
||||
|
||||
if (node_p->sched_boost_remaining_ms >=
|
||||
RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
|
||||
node_p->sched_boost_remaining_ms -=
|
||||
RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
|
||||
|
||||
rm_err("CORE_BOOST: enabling boost for pid %d\n"
|
||||
"sched boost remaining = %d ms",
|
||||
node_p->pid,
|
||||
node_p->sched_boost_remaining_ms);
|
||||
} else {
|
||||
node_p->sched_boost_remaining_ms = 0;
|
||||
}
|
||||
|
||||
pid_bps_resp_ptr->list[i].pid = node_p->pid;
|
||||
pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
|
||||
pid_bps_resp_ptr->list[i].boost_remaining_ms =
|
||||
node_p->sched_boost_remaining_ms;
|
||||
|
||||
node_p->timstamp_last_query =
|
||||
pid_bps_resp_ptr->timestamp;
|
||||
node_p->tx_bytes_last_query = tx_bytes_cur;
|
||||
i++;
|
||||
|
||||
/* Support copying up to 32 active pids */
|
||||
if (i >= RMNET_CORE_GENL_MAX_PIDS)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
} while (0);
|
||||
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
|
||||
|
||||
pid_bps_resp_ptr->list_len = i;
|
||||
}
|
||||
|
||||
int rmnet_core_genl_send_resp(struct genl_info *info,
|
||||
struct rmnet_core_pid_bps_resp *pid_bps_resp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
void *msg_head;
|
||||
int rc;
|
||||
|
||||
if (!info || !pid_bps_resp) {
|
||||
rm_err("%s", "SHS_GNL: Invalid params\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
|
||||
&rmnet_core_genl_family,
|
||||
0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
|
||||
if (!msg_head) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
|
||||
sizeof(struct rmnet_core_pid_bps_resp),
|
||||
pid_bps_resp);
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
|
||||
genlmsg_end(skb, msg_head);
|
||||
|
||||
rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
|
||||
rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
|
||||
return RMNET_GENL_SUCCESS;
|
||||
|
||||
out:
|
||||
/* TODO: Need to free skb?? */
|
||||
rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
|
||||
rmnet_core_userspace_connected = 0;
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct nlattr *na;
|
||||
struct rmnet_core_pid_bps_req pid_bps_req;
|
||||
struct rmnet_core_pid_bps_resp pid_bps_resp;
|
||||
int is_req_valid = 0;
|
||||
|
||||
rm_err("CORE_GNL: %s connected = %d", __func__,
|
||||
rmnet_core_userspace_connected);
|
||||
|
||||
if (!info) {
|
||||
rm_err("%s", "CORE_GNL: error - info is null");
|
||||
pid_bps_resp.valid = 0;
|
||||
} else {
|
||||
na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
|
||||
if (na) {
|
||||
if (nla_memcpy(&pid_bps_req, na,
|
||||
sizeof(pid_bps_req)) > 0) {
|
||||
is_req_valid = 1;
|
||||
} else {
|
||||
rm_err("CORE_GNL: nla_memcpy failed %d\n",
|
||||
RMNET_CORE_GENL_ATTR_PID_BPS);
|
||||
}
|
||||
} else {
|
||||
rm_err("CORE_GNL: no info->attrs %d\n",
|
||||
RMNET_CORE_GENL_ATTR_PID_BPS);
|
||||
}
|
||||
}
|
||||
|
||||
if (!rmnet_core_userspace_connected)
|
||||
rmnet_core_userspace_connected = 1;
|
||||
|
||||
/* Copy to pid/byte list to the payload */
|
||||
memset(&pid_bps_resp, 0x0,
|
||||
sizeof(pid_bps_resp));
|
||||
if (is_req_valid) {
|
||||
rmnet_create_pid_bps_resp(&pid_bps_resp);
|
||||
}
|
||||
pid_bps_resp.valid = 1;
|
||||
|
||||
rmnet_core_genl_send_resp(info, &pid_bps_resp);
|
||||
|
||||
return RMNET_GENL_SUCCESS;
|
||||
}
|
||||
|
||||
int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct nlattr *na;
|
||||
struct rmnet_core_pid_boost_req pid_boost_req;
|
||||
int is_req_valid = 0;
|
||||
u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
|
||||
u16 i = 0;
|
||||
|
||||
rm_err("CORE_GNL: %s", __func__);
|
||||
|
||||
if (!info) {
|
||||
rm_err("%s", "CORE_GNL: error - info is null");
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
|
||||
if (na) {
|
||||
if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
|
||||
is_req_valid = 1;
|
||||
} else {
|
||||
rm_err("CORE_GNL: nla_memcpy failed %d\n",
|
||||
RMNET_CORE_GENL_ATTR_PID_BOOST);
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
} else {
|
||||
rm_err("CORE_GNL: no info->attrs %d\n",
|
||||
RMNET_CORE_GENL_ATTR_PID_BOOST);
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
|
||||
boost_pid_cnt = pid_boost_req.list_len;
|
||||
|
||||
if (!pid_boost_req.valid)
|
||||
boost_pid_cnt = 0;
|
||||
|
||||
for (i = 0; i < boost_pid_cnt; i++) {
|
||||
if (pid_boost_req.list[i].boost_enabled) {
|
||||
rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
|
||||
pid_boost_req.list[i].boost_period);
|
||||
}
|
||||
}
|
||||
|
||||
return RMNET_GENL_SUCCESS;
|
||||
}
|
||||
|
||||
int rmnet_core_genl_tether_info_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct nlattr *na;
|
||||
struct rmnet_core_tether_info_req tether_info_req;
|
||||
int is_req_valid = 0;
|
||||
rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd;
|
||||
|
||||
rm_err("CORE_GNL: %s", __func__);
|
||||
|
||||
if (!info) {
|
||||
rm_err("%s", "CORE_GNL: error - info is null");
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
na = info->attrs[RMNET_CORE_GENL_ATTR_TETHER_INFO];
|
||||
if (na) {
|
||||
if (nla_memcpy(&tether_info_req, na, sizeof(tether_info_req)) > 0) {
|
||||
is_req_valid = 1;
|
||||
} else {
|
||||
rm_err("CORE_GNL: nla_memcpy failed %d\n",
|
||||
RMNET_CORE_GENL_ATTR_TETHER_INFO);
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
} else {
|
||||
rm_err("CORE_GNL: no info->attrs %d\n",
|
||||
RMNET_CORE_GENL_ATTR_TETHER_INFO);
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
if (!tether_info_req.valid) {
|
||||
rm_err("%s", "CORE_GNL: tether info req is invalid");
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
rmnet_perf_tether_cmd = rcu_dereference(rmnet_perf_tether_cmd_hook);
|
||||
if (rmnet_perf_tether_cmd)
|
||||
rmnet_perf_tether_cmd(1, tether_info_req.tether_filters_en);
|
||||
|
||||
rm_err("CORE_GNL: tether filters %s",
|
||||
tether_info_req.tether_filters_en ? "enabled" : "disabled");
|
||||
|
||||
return RMNET_GENL_SUCCESS;
|
||||
}
|
||||
|
||||
/* register new rmnet core driver generic netlink family */
|
||||
int rmnet_core_genl_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = genl_register_family(&rmnet_core_genl_family);
|
||||
if (ret != 0) {
|
||||
rm_err("CORE_GNL: register family failed: %i", ret);
|
||||
genl_unregister_family(&rmnet_core_genl_family);
|
||||
return RMNET_GENL_FAILURE;
|
||||
}
|
||||
|
||||
rm_err("CORE_GNL: successfully registered generic netlink family: %s",
|
||||
RMNET_CORE_GENL_FAMILY_NAME);
|
||||
|
||||
return RMNET_GENL_SUCCESS;
|
||||
}
|
||||
|
||||
/* Unregister the generic netlink family */
|
||||
int rmnet_core_genl_deinit(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = genl_unregister_family(&rmnet_core_genl_family);
|
||||
if (ret != 0)
|
||||
rm_err("CORE_GNL: unregister family failed: %i\n", ret);
|
||||
|
||||
return RMNET_GENL_SUCCESS;
|
||||
}
|
107
qcom/opensource/datarmnet/core/rmnet_genl.h
Normal file
107
qcom/opensource/datarmnet/core/rmnet_genl.h
Normal file
@ -0,0 +1,107 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET Data Generic Netlink
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_GENL_H_
|
||||
#define _RMNET_GENL_H_
|
||||
|
||||
#include <net/genetlink.h>
|
||||
|
||||
#define RMNET_CORE_DEBUG 0
|
||||
|
||||
#define rm_err(fmt, ...) \
|
||||
do { if (RMNET_CORE_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
|
||||
|
||||
/* Generic Netlink Definitions */
|
||||
#define RMNET_CORE_GENL_VERSION 1
|
||||
#define RMNET_CORE_GENL_FAMILY_NAME "RMNET_CORE"
|
||||
|
||||
#define RMNET_CORE_GENL_MAX_PIDS 32
|
||||
|
||||
#define RMNET_GENL_SUCCESS (0)
|
||||
#define RMNET_GENL_FAILURE (-1)
|
||||
|
||||
extern int rmnet_core_userspace_connected;
|
||||
|
||||
enum {
|
||||
RMNET_CORE_GENL_CMD_UNSPEC,
|
||||
RMNET_CORE_GENL_CMD_PID_BPS_REQ,
|
||||
RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
|
||||
RMNET_CORE_GENL_CMD_TETHER_INFO_REQ,
|
||||
__RMNET_CORE_GENL_CMD_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
RMNET_CORE_GENL_ATTR_UNSPEC,
|
||||
RMNET_CORE_GENL_ATTR_STR,
|
||||
RMNET_CORE_GENL_ATTR_INT,
|
||||
RMNET_CORE_GENL_ATTR_PID_BPS,
|
||||
RMNET_CORE_GENL_ATTR_PID_BOOST,
|
||||
RMNET_CORE_GENL_ATTR_TETHER_INFO,
|
||||
__RMNET_CORE_GENL_ATTR_MAX,
|
||||
};
|
||||
|
||||
#define RMNET_CORE_GENL_ATTR_MAX (__RMNET_CORE_GENL_ATTR_MAX - 1)
|
||||
|
||||
struct rmnet_core_pid_bps_info {
|
||||
u64 tx_bps;
|
||||
u32 pid;
|
||||
u32 boost_remaining_ms;
|
||||
};
|
||||
|
||||
struct rmnet_core_pid_boost_info {
|
||||
u32 boost_enabled;
|
||||
/* Boost period in ms */
|
||||
u32 boost_period;
|
||||
u32 pid;
|
||||
};
|
||||
|
||||
struct rmnet_core_pid_bps_req {
|
||||
struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
|
||||
u64 timestamp;
|
||||
u16 list_len;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
struct rmnet_core_pid_bps_resp {
|
||||
struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
|
||||
u64 timestamp;
|
||||
u16 list_len;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
struct rmnet_core_pid_boost_req {
|
||||
struct rmnet_core_pid_boost_info list[RMNET_CORE_GENL_MAX_PIDS];
|
||||
u64 timestamp;
|
||||
u16 list_len;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* Tether Info Request Structure */
|
||||
struct rmnet_core_tether_info_req {
|
||||
uint8_t tether_filters_en;
|
||||
uint8_t valid;
|
||||
};
|
||||
|
||||
/* Function Prototypes */
|
||||
int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info);
|
||||
|
||||
int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info);
|
||||
|
||||
int rmnet_core_genl_tether_info_req_hdlr(struct sk_buff *skb_2,
|
||||
struct genl_info *info);
|
||||
|
||||
/* Called by vnd select queue */
|
||||
void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
|
||||
int *boost_enable, u64 *boost_period);
|
||||
|
||||
int rmnet_core_genl_init(void);
|
||||
|
||||
int rmnet_core_genl_deinit(void);
|
||||
|
||||
#endif /*_RMNET_CORE_GENL_H_*/
|
551
qcom/opensource/datarmnet/core/rmnet_handlers.c
Executable file
551
qcom/opensource/datarmnet/core/rmnet_handlers.c
Executable file
@ -0,0 +1,551 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data ingress/egress handler
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/netdev_features.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/inet.h>
|
||||
#include <net/sock.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_vnd.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_handlers.h"
|
||||
#include "rmnet_descriptor.h"
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_module.h"
|
||||
|
||||
|
||||
#include "rmnet_qmi.h"
|
||||
#include "qmi_rmnet.h"
|
||||
|
||||
#define RMNET_IP_VERSION_4 0x40
|
||||
#define RMNET_IP_VERSION_6 0x60
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "rmnet_trace.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
|
||||
EXPORT_TRACEPOINT_SYMBOL(print_icmp_rx);
|
||||
|
||||
|
||||
|
||||
/* Helper Functions */
|
||||
|
||||
void rmnet_set_skb_proto(struct sk_buff *skb)
|
||||
{
|
||||
switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
|
||||
case RMNET_IP_VERSION_4:
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
break;
|
||||
case RMNET_IP_VERSION_6:
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_set_skb_proto);
|
||||
|
||||
bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
|
||||
|
||||
bool rmnet_slow_start_on(u32 hash_key)
|
||||
{
|
||||
bool (*rmnet_shs_slow_start_on)(u32 hash_key);
|
||||
|
||||
rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
|
||||
if (rmnet_shs_slow_start_on)
|
||||
return rmnet_shs_slow_start_on(hash_key);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_slow_start_on);
|
||||
|
||||
/* Shs hook handler */
|
||||
int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_shs_skb_entry);
|
||||
|
||||
int (*rmnet_shs_switch)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_shs_switch);
|
||||
|
||||
|
||||
/* Shs hook handler for work queue*/
|
||||
int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
|
||||
|
||||
/* Generic handler */
|
||||
|
||||
void
|
||||
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
|
||||
{
|
||||
int (*rmnet_shs_stamp)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg);
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
|
||||
0xDEF, 0xDEF, (void *)skb, NULL);
|
||||
skb_reset_network_header(skb);
|
||||
rmnet_vnd_rx_fixup(skb->dev, skb->len);
|
||||
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb_set_mac_header(skb, 0);
|
||||
|
||||
/* Low latency packets use a different balancing scheme */
|
||||
if (skb->priority == 0xda1a)
|
||||
goto skip_shs;
|
||||
|
||||
rcu_read_lock();
|
||||
rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
|
||||
if (rmnet_shs_stamp) {
|
||||
rmnet_shs_stamp(skb, &port->shs_cfg);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
skip_shs:
|
||||
if (rmnet_module_hook_shs_skb_ll_entry(NULL, skb, &port->shs_cfg))
|
||||
return;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_deliver_skb);
|
||||
|
||||
/* Important to note, port cannot be used here if it has gone stale */
|
||||
void
|
||||
rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
|
||||
enum rmnet_packet_context ctx)
|
||||
{
|
||||
int (*rmnet_shs_stamp)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg);
|
||||
struct rmnet_priv *priv = netdev_priv(skb->dev);
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
|
||||
0xDEF, 0xDEF, (void *)skb, NULL);
|
||||
skb_reset_transport_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
rmnet_vnd_rx_fixup(skb->dev, skb->len);
|
||||
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb_set_mac_header(skb, 0);
|
||||
|
||||
/* packets coming from work queue context due to packet flush timer
|
||||
* must go through the special workqueue path in SHS driver
|
||||
*/
|
||||
rcu_read_lock();
|
||||
rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
|
||||
rcu_dereference(rmnet_shs_skb_entry_wq);
|
||||
if (rmnet_shs_stamp) {
|
||||
rmnet_shs_stamp(skb, &port->shs_cfg);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (ctx == RMNET_NET_RX_CTX)
|
||||
netif_receive_skb(skb);
|
||||
else
|
||||
gro_cells_receive(&priv->gro_cells, skb);
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_deliver_skb_wq);
|
||||
|
||||
/* Deliver a list of skbs after undoing coalescing */
|
||||
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(head))) {
|
||||
rmnet_set_skb_proto(skb);
|
||||
rmnet_deliver_skb(skb, port);
|
||||
}
|
||||
}
|
||||
|
||||
/* MAP handler */
|
||||
|
||||
static void
|
||||
__rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_map_header *qmap;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct sk_buff_head list;
|
||||
u16 len, pad;
|
||||
u8 mux_id;
|
||||
|
||||
/* We don't need the spinlock since only we touch this */
|
||||
__skb_queue_head_init(&list);
|
||||
|
||||
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
|
||||
if (qmap->cd_bit) {
|
||||
qmi_rmnet_set_dl_msg_active(port);
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
|
||||
if (!rmnet_map_flow_command(skb, port, false))
|
||||
return;
|
||||
}
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
|
||||
return rmnet_map_command(skb, port);
|
||||
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
mux_id = qmap->mux_id;
|
||||
pad = qmap->pad_len;
|
||||
len = ntohs(qmap->pkt_len) - pad;
|
||||
|
||||
if (mux_id >= RMNET_MAX_LOGICAL_EP)
|
||||
goto free_skb;
|
||||
|
||||
ep = rmnet_get_endpoint(port, mux_id);
|
||||
if (!ep)
|
||||
goto free_skb;
|
||||
|
||||
skb->dev = ep->egress_dev;
|
||||
|
||||
/* Handle QMAPv5 packet */
|
||||
if (qmap->next_hdr &&
|
||||
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
|
||||
RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
|
||||
if (rmnet_map_process_next_hdr_packet(skb, &list, len))
|
||||
goto free_skb;
|
||||
} else {
|
||||
/* We only have the main QMAP header to worry about */
|
||||
pskb_pull(skb, sizeof(*qmap));
|
||||
|
||||
rmnet_set_skb_proto(skb);
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
|
||||
if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
pskb_trim(skb, len);
|
||||
|
||||
/* Push the single packet onto the list */
|
||||
__skb_queue_tail(&list, skb);
|
||||
}
|
||||
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
qmi_rmnet_work_maybe_restart(port, NULL, skb_peek(&list));
|
||||
|
||||
rmnet_deliver_skb_list(&list, port);
|
||||
return;
|
||||
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
|
||||
struct rmnet_port *port) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_perf_deag_entry);
|
||||
|
||||
static void
|
||||
rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct sk_buff *skbn;
|
||||
int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
|
||||
if (skb->dev->type == ARPHRD_ETHER) {
|
||||
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
skb_push(skb, ETH_HLEN);
|
||||
}
|
||||
|
||||
if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
|
||||
RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5)) {
|
||||
if (skb_is_nonlinear(skb)) {
|
||||
rmnet_frag_ingress_handler(skb, port);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* No aggregation. Pass the frame on as is */
|
||||
if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
|
||||
__rmnet_map_ingress_handler(skb, port);
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb->priority == 0xda1a)
|
||||
goto no_perf;
|
||||
|
||||
/* Pass off handling to rmnet_perf module, if present */
|
||||
rcu_read_lock();
|
||||
rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
|
||||
if (rmnet_perf_core_deaggregate) {
|
||||
rmnet_perf_core_deaggregate(skb, port);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
no_perf:
|
||||
/* Deaggregation and freeing of HW originating
|
||||
* buffers is done within here
|
||||
*/
|
||||
while (skb) {
|
||||
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
|
||||
|
||||
skb_shinfo(skb)->frag_list = NULL;
|
||||
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
|
||||
__rmnet_map_ingress_handler(skbn, port);
|
||||
|
||||
if (skbn == skb)
|
||||
goto next_skb;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
next_skb:
|
||||
skb = skb_frag;
|
||||
}
|
||||
}
|
||||
|
||||
static int rmnet_map_egress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port, u8 mux_id,
|
||||
struct net_device *orig_dev,
|
||||
bool low_latency)
|
||||
{
|
||||
int required_headroom, additional_header_len, csum_type, tso = 0;
|
||||
struct rmnet_map_header *map_header;
|
||||
struct rmnet_aggregation_state *state;
|
||||
|
||||
additional_header_len = 0;
|
||||
required_headroom = sizeof(struct rmnet_map_header);
|
||||
csum_type = 0;
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
|
||||
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
|
||||
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
|
||||
} else if ((port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5) ||
|
||||
(port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
|
||||
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
|
||||
csum_type = RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5;
|
||||
}
|
||||
|
||||
required_headroom += additional_header_len;
|
||||
|
||||
if (skb_headroom(skb) < required_headroom) {
|
||||
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
qmi_rmnet_work_maybe_restart(port, NULL, NULL);
|
||||
|
||||
state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
|
||||
RMNET_DEFAULT_AGG_STATE];
|
||||
|
||||
if (csum_type &&
|
||||
(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
|
||||
skb_shinfo(skb)->gso_size) {
|
||||
spin_lock_bh(&state->agg_lock);
|
||||
rmnet_map_send_agg_skb(state);
|
||||
|
||||
if (rmnet_map_add_tso_header(skb, port, orig_dev))
|
||||
return -EINVAL;
|
||||
csum_type = 0;
|
||||
tso = 1;
|
||||
}
|
||||
|
||||
if (csum_type)
|
||||
rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
|
||||
csum_type);
|
||||
|
||||
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
|
||||
port);
|
||||
if (!map_header)
|
||||
return -ENOMEM;
|
||||
|
||||
map_header->mux_id = mux_id;
|
||||
|
||||
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
|
||||
if (state->params.agg_count < 2 ||
|
||||
rmnet_map_tx_agg_skip(skb, required_headroom) || tso)
|
||||
goto done;
|
||||
|
||||
rmnet_map_tx_aggregate(skb, port, low_latency);
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
done:
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
|
||||
{
|
||||
if (bridge_dev) {
|
||||
skb->dev = bridge_dev;
|
||||
dev_queue_xmit(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/* Ingress / Egress Entry Points */
|
||||
|
||||
/* Processes packet as per ingress data format for receiving device. Logical
|
||||
* endpoint is determined from packet inspection. Packet is then sent to the
|
||||
* egress device listed in the logical endpoint configuration.
|
||||
*/
|
||||
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
||||
{
|
||||
struct sk_buff *skb = *pskb;
|
||||
struct rmnet_port *port;
|
||||
struct net_device *dev;
|
||||
struct rmnet_skb_cb *cb;
|
||||
int (*rmnet_core_shs_switch)(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg);
|
||||
|
||||
if (!skb)
|
||||
goto done;
|
||||
|
||||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
return RX_HANDLER_PASS;
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
|
||||
0xDEF, 0xDEF, 0xDEF, NULL, NULL);
|
||||
dev = skb->dev;
|
||||
port = rmnet_get_port(dev);
|
||||
if (unlikely(!port)) {
|
||||
#if (KERNEL_VERSION(6, 0, 0) < LINUX_VERSION_CODE)
|
||||
dev_core_stats_rx_nohandler_inc(skb->dev);
|
||||
#else
|
||||
atomic_long_inc(&skb->dev->rx_nohandler);
|
||||
#endif
|
||||
kfree_skb(skb);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (port->rmnet_mode) {
|
||||
case RMNET_EPMODE_VND:
|
||||
|
||||
rcu_read_lock();
|
||||
rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
|
||||
cb = RMNET_SKB_CB(skb);
|
||||
if (rmnet_core_shs_switch && !cb->qmap_steer &&
|
||||
skb->priority != 0xda1a) {
|
||||
cb->qmap_steer = 1;
|
||||
rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
|
||||
rcu_read_unlock();
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
rmnet_map_ingress_handler(skb, port);
|
||||
break;
|
||||
case RMNET_EPMODE_BRIDGE:
|
||||
rmnet_bridge_handler(skb, port->bridge_ep);
|
||||
break;
|
||||
}
|
||||
|
||||
done:
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_rx_handler);
|
||||
|
||||
rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
|
||||
{
|
||||
struct sk_buff *skb = *pskb;
|
||||
rx_handler_result_t rc = RX_HANDLER_PASS;
|
||||
|
||||
rmnet_module_hook_wlan_ingress_rx_handler(&rc, pskb);
|
||||
if (rc != RX_HANDLER_PASS)
|
||||
return rc;
|
||||
|
||||
rmnet_module_hook_perf_ingress_rx_handler(skb);
|
||||
|
||||
return RX_HANDLER_PASS;
|
||||
}
|
||||
|
||||
/* Modifies packet as per logical endpoint configuration and egress data format
|
||||
* for egress device configured in logical endpoint. Packet is then transmitted
|
||||
* on the egress device.
|
||||
*/
|
||||
void rmnet_egress_handler(struct sk_buff *skb, bool low_latency)
|
||||
{
|
||||
struct net_device *orig_dev;
|
||||
struct rmnet_port *port;
|
||||
struct rmnet_priv *priv;
|
||||
u8 mux_id;
|
||||
int err;
|
||||
u32 skb_len;
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
|
||||
0xDEF, (void *)skb, NULL);
|
||||
sk_pacing_shift_update(skb->sk, 8);
|
||||
|
||||
orig_dev = skb->dev;
|
||||
priv = netdev_priv(orig_dev);
|
||||
skb->dev = priv->real_dev;
|
||||
mux_id = priv->mux_id;
|
||||
|
||||
port = rmnet_get_port(skb->dev);
|
||||
if (!port)
|
||||
goto drop;
|
||||
|
||||
skb_len = skb->len;
|
||||
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev,
|
||||
low_latency);
|
||||
if (err == -ENOMEM || err == -EINVAL) {
|
||||
goto drop;
|
||||
} else if (err == -EINPROGRESS) {
|
||||
rmnet_vnd_tx_fixup(orig_dev, skb_len);
|
||||
return;
|
||||
}
|
||||
|
||||
rmnet_vnd_tx_fixup(orig_dev, skb_len);
|
||||
|
||||
if (low_latency) {
|
||||
if (rmnet_ll_send_skb(skb)) {
|
||||
/* Drop but no need to free. Above API handles that */
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
dev_queue_xmit(skb);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
drop:
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
}
|
37
qcom/opensource/datarmnet/core/rmnet_handlers.h
Normal file
37
qcom/opensource/datarmnet/core/rmnet_handlers.h
Normal file
@ -0,0 +1,37 @@
|
||||
/* Copyright (c) 2013, 2016-2017, 2019-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data ingress/egress handler
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_HANDLERS_H_
|
||||
#define _RMNET_HANDLERS_H_
|
||||
|
||||
#include "rmnet_config.h"
|
||||
|
||||
enum rmnet_packet_context {
|
||||
RMNET_NET_RX_CTX,
|
||||
RMNET_WQ_CTX,
|
||||
};
|
||||
|
||||
void rmnet_egress_handler(struct sk_buff *skb, bool low_latency);
|
||||
void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
|
||||
void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
|
||||
enum rmnet_packet_context ctx);
|
||||
void rmnet_set_skb_proto(struct sk_buff *skb);
|
||||
bool rmnet_slow_start_on(u32 hash_key);
|
||||
rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
|
||||
rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb);
|
||||
#endif /* _RMNET_HANDLERS_H_ */
|
159
qcom/opensource/datarmnet/core/rmnet_hook.h
Normal file
159
qcom/opensource/datarmnet/core/rmnet_hook.h
Normal file
@ -0,0 +1,159 @@
|
||||
/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#if !defined(__RMNET_HOOKS__) || defined(__RMNET_HOOK_MULTIREAD__)
|
||||
#define __RMNET_HOOKS__
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/tcp.h>
|
||||
#include "rmnet_descriptor.h"
|
||||
|
||||
RMNET_MODULE_HOOK(offload_ingress,
|
||||
RMNET_MODULE_HOOK_NUM(OFFLOAD_INGRESS),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct list_head *desc_list,
|
||||
struct rmnet_port *port),
|
||||
RMNET_MODULE_HOOK_ARGS(desc_list, port),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(offload_chain_end,
|
||||
RMNET_MODULE_HOOK_NUM(OFFLOAD_CHAIN_END),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(void),
|
||||
RMNET_MODULE_HOOK_ARGS(),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(shs_skb_entry,
|
||||
RMNET_MODULE_HOOK_NUM(SHS_SKB_ENTRY),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg),
|
||||
RMNET_MODULE_HOOK_ARGS(skb, cfg),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(int)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(shs_skb_ll_entry,
|
||||
RMNET_MODULE_HOOK_NUM(SHS_SKB_LL_ENTRY),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg),
|
||||
RMNET_MODULE_HOOK_ARGS(skb, cfg),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(int)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(shs_switch,
|
||||
RMNET_MODULE_HOOK_NUM(SHS_SWITCH),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
|
||||
struct rmnet_shs_clnt_s *cfg),
|
||||
RMNET_MODULE_HOOK_ARGS(skb, cfg),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(int)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_tether_ingress,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_TETHER_INGRESS),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct tcphdr *tp,
|
||||
struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(tp, skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_tether_egress,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_TETHER_EGRESS),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_tether_cmd,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_TETHER_CMD),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(u8 message, u64 val),
|
||||
RMNET_MODULE_HOOK_ARGS(message, val),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_ingress,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_INGRESS),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(int)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_egress,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_EGRESS),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_set_thresh,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_SET_THRESH),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(u32 hash, u32 thresh),
|
||||
RMNET_MODULE_HOOK_ARGS(hash, thresh),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(aps_pre_queue,
|
||||
RMNET_MODULE_HOOK_NUM(APS_PRE_QUEUE),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct net_device *dev, struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(dev, skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(aps_post_queue,
|
||||
RMNET_MODULE_HOOK_NUM(APS_POST_QUEUE),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct net_device *dev, struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(dev, skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(int)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(wlan_flow_match,
|
||||
RMNET_MODULE_HOOK_NUM(WLAN_FLOW_MATCH),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(aps_data_inactive,
|
||||
RMNET_MODULE_HOOK_NUM(APS_DATA_INACTIVE),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(void),
|
||||
RMNET_MODULE_HOOK_ARGS(),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(aps_data_active,
|
||||
RMNET_MODULE_HOOK_NUM(APS_DATA_ACTIVE),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(frag_desc, skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(aps_data_report,
|
||||
RMNET_MODULE_HOOK_NUM(APS_DATA_REPORT),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(perf_ingress_rx_handler,
|
||||
RMNET_MODULE_HOOK_NUM(PERF_INGRESS_RX_HANDLER),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
|
||||
RMNET_MODULE_HOOK_ARGS(skb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(void)
|
||||
);
|
||||
|
||||
RMNET_MODULE_HOOK(wlan_ingress_rx_handler,
|
||||
RMNET_MODULE_HOOK_NUM(WLAN_INGRESS_RX_HANDLER),
|
||||
RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff **pskb),
|
||||
RMNET_MODULE_HOOK_ARGS(pskb),
|
||||
RMNET_MODULE_HOOK_RETURN_TYPE(rx_handler_result_t)
|
||||
);
|
||||
|
||||
#endif
|
173
qcom/opensource/datarmnet/core/rmnet_ll.c
Normal file
173
qcom/opensource/datarmnet/core/rmnet_ll.c
Normal file
@ -0,0 +1,173 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/version.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
#define RMNET_LL_MAX_RECYCLE_ITER 16
|
||||
|
||||
static struct rmnet_ll_stats rmnet_ll_stats;
|
||||
/* For TX sync with DMA operations */
|
||||
DEFINE_SPINLOCK(rmnet_ll_tx_lock);
|
||||
|
||||
/* Client operations for respective underlying HW */
|
||||
extern struct rmnet_ll_client_ops rmnet_ll_client;
|
||||
|
||||
static void rmnet_ll_buffers_submit(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct list_head *buf_list)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
|
||||
list_for_each_entry(ll_buf, buf_list, list) {
|
||||
if (ll_buf->submitted)
|
||||
continue;
|
||||
|
||||
if (!rmnet_ll_client.buffer_queue ||
|
||||
rmnet_ll_client.buffer_queue(ll_ep, ll_buf)) {
|
||||
rmnet_ll_stats.rx_queue_err++;
|
||||
/* Don't leak the page if we're not storing it */
|
||||
if (ll_buf->temp_alloc)
|
||||
put_page(ll_buf->page);
|
||||
} else {
|
||||
ll_buf->submitted = true;
|
||||
rmnet_ll_stats.rx_queue++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct rmnet_ll_buffer *
|
||||
rmnet_ll_buffer_alloc(struct rmnet_ll_endpoint *ll_ep, gfp_t gfp)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
page = __dev_alloc_pages(gfp, ll_ep->page_order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
/* Store the buffer information at the end */
|
||||
addr = page_address(page);
|
||||
ll_buf = addr + ll_ep->buf_len;
|
||||
ll_buf->page = page;
|
||||
ll_buf->submitted = false;
|
||||
INIT_LIST_HEAD(&ll_buf->list);
|
||||
return ll_buf;
|
||||
}
|
||||
|
||||
int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
spin_lock_init(&ll_ep->buf_pool.pool_lock);
|
||||
INIT_LIST_HEAD(&ll_ep->buf_pool.buf_list);
|
||||
ll_ep->buf_pool.last = ll_ep->buf_pool.buf_list.next;
|
||||
ll_ep->buf_pool.pool_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf, *tmp;
|
||||
list_for_each_entry_safe(ll_buf, tmp, &ll_ep->buf_pool.buf_list, list) {
|
||||
list_del(&ll_buf->list);
|
||||
put_page(ll_buf->page);
|
||||
}
|
||||
|
||||
ll_ep->buf_pool.last = NULL;
|
||||
}
|
||||
|
||||
void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct rmnet_ll_buffer *ll_buf, *tmp;
|
||||
LIST_HEAD(buf_list);
|
||||
int num_tre, count = 0, iter = 0;
|
||||
|
||||
if (!rmnet_ll_client.query_free_descriptors)
|
||||
goto out;
|
||||
|
||||
num_tre = rmnet_ll_client.query_free_descriptors(ll_ep);
|
||||
if (!num_tre)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry_safe(ll_buf, tmp, ll_ep->buf_pool.last, list) {
|
||||
if (++iter > RMNET_LL_MAX_RECYCLE_ITER || count == num_tre)
|
||||
break;
|
||||
|
||||
if (ll_buf->submitted)
|
||||
continue;
|
||||
|
||||
count++;
|
||||
list_move_tail(&ll_buf->list, &buf_list);
|
||||
}
|
||||
|
||||
/* Mark where we left off */
|
||||
ll_ep->buf_pool.last = &ll_buf->list;
|
||||
/* Submit any pool buffers to the HW if we found some */
|
||||
if (count) {
|
||||
rmnet_ll_buffers_submit(ll_ep, &buf_list);
|
||||
/* requeue immediately BEFORE the last checked element */
|
||||
list_splice_tail_init(&buf_list, ll_ep->buf_pool.last);
|
||||
}
|
||||
|
||||
/* Do any temporary allocations needed to fill the rest */
|
||||
for (; count < num_tre; count++) {
|
||||
ll_buf = rmnet_ll_buffer_alloc(ll_ep, GFP_ATOMIC);
|
||||
if (!ll_buf)
|
||||
break;
|
||||
|
||||
list_add_tail(&ll_buf->list, &buf_list);
|
||||
ll_buf->temp_alloc = true;
|
||||
rmnet_ll_stats.rx_tmp_allocs++;
|
||||
}
|
||||
|
||||
if (!list_empty(&buf_list))
|
||||
rmnet_ll_buffers_submit(ll_ep, &buf_list);
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
int rmnet_ll_send_skb(struct sk_buff *skb)
|
||||
{
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
rc = rmnet_ll_client.tx(skb);
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
if (rc)
|
||||
rmnet_ll_stats.tx_queue_err++;
|
||||
else
|
||||
rmnet_ll_stats.tx_queue++;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct rmnet_ll_stats *rmnet_ll_get_stats(void)
|
||||
{
|
||||
return &rmnet_ll_stats;
|
||||
}
|
||||
|
||||
int rmnet_ll_init(void)
|
||||
{
|
||||
return rmnet_ll_client.init();
|
||||
}
|
||||
|
||||
void rmnet_ll_exit(void)
|
||||
{
|
||||
rmnet_ll_client.exit();
|
||||
}
|
45
qcom/opensource/datarmnet/core/rmnet_ll.h
Normal file
45
qcom/opensource/datarmnet/core/rmnet_ll.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#ifndef __RMNET_LL_H__
|
||||
#define __RMNET_LL_H__
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct rmnet_ll_stats {
|
||||
u64 tx_queue;
|
||||
u64 tx_queue_err;
|
||||
u64 tx_complete;
|
||||
u64 tx_complete_err;
|
||||
u64 rx_queue;
|
||||
u64 rx_queue_err;
|
||||
u64 rx_status_err;
|
||||
u64 rx_null;
|
||||
u64 rx_oom;
|
||||
u64 rx_pkts;
|
||||
u64 rx_tmp_allocs;
|
||||
u64 tx_disabled;
|
||||
u64 tx_enabled;
|
||||
u64 tx_fc_queued;
|
||||
u64 tx_fc_sent;
|
||||
u64 tx_fc_err;
|
||||
};
|
||||
|
||||
int rmnet_ll_send_skb(struct sk_buff *skb);
|
||||
struct rmnet_ll_stats *rmnet_ll_get_stats(void);
|
||||
int rmnet_ll_init(void);
|
||||
void rmnet_ll_exit(void);
|
||||
|
||||
#endif
|
69
qcom/opensource/datarmnet/core/rmnet_ll_core.h
Normal file
69
qcom/opensource/datarmnet/core/rmnet_ll_core.h
Normal file
@ -0,0 +1,69 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#ifndef __RMNET_LL_CORE_H__
|
||||
#define __RMNET_LL_CORE_H__
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define RMNET_LL_DEFAULT_MRU 0x8000
|
||||
|
||||
struct rmnet_ll_buffer {
|
||||
struct list_head list;
|
||||
struct page *page;
|
||||
bool temp_alloc;
|
||||
bool submitted;
|
||||
};
|
||||
|
||||
struct rmnet_ll_buffer_pool {
|
||||
struct list_head buf_list;
|
||||
/* Protect access to the recycle buffer pool */
|
||||
spinlock_t pool_lock;
|
||||
struct list_head *last;
|
||||
u32 pool_size;
|
||||
};
|
||||
|
||||
struct rmnet_ll_endpoint {
|
||||
struct rmnet_ll_buffer_pool buf_pool;
|
||||
struct net_device *phys_dev;
|
||||
void *priv;
|
||||
u32 dev_mru;
|
||||
u32 page_order;
|
||||
u32 buf_len;
|
||||
};
|
||||
|
||||
/* Core operations to hide differences between physical transports.
|
||||
*
|
||||
* buffer_queue: Queue an allocated buffer to the HW for RX. Optional.
|
||||
* query_free_descriptors: Return number of free RX descriptors. Optional.
|
||||
* tx: Send an SKB over the channel in the TX direction.
|
||||
* init: Initialization callback on module load
|
||||
* exit: Exit callback on module unload
|
||||
*/
|
||||
struct rmnet_ll_client_ops {
|
||||
int (*buffer_queue)(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct rmnet_ll_buffer *ll_buf);
|
||||
int (*query_free_descriptors)(struct rmnet_ll_endpoint *ll_ep);
|
||||
int (*tx)(struct sk_buff *skb);
|
||||
int (*init)(void);
|
||||
int (*exit)(void);
|
||||
};
|
||||
|
||||
int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep);
|
||||
void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep);
|
||||
void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep);
|
||||
|
||||
#endif
|
222
qcom/opensource/datarmnet/core/rmnet_ll_ipa.c
Normal file
222
qcom/opensource/datarmnet/core/rmnet_ll_ipa.c
Normal file
@ -0,0 +1,222 @@
|
||||
/* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet IPA Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#if !defined(__arch_um__)
|
||||
#include <linux/ipa.h>
|
||||
#endif /* !defined(__arch_um__) */
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/version.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
#define IPA_RMNET_LL_RECEIVE 1
|
||||
#define IPA_RMNET_LL_FLOW_EVT 2
|
||||
|
||||
#define MAX_Q_LEN 1000
|
||||
|
||||
#if !defined(__arch_um__)
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
|
||||
static struct sk_buff_head tx_pending_list;
|
||||
extern spinlock_t rmnet_ll_tx_lock;
|
||||
|
||||
static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t);
|
||||
DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending);
|
||||
static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t)
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb;
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&tx_pending_list))) {
|
||||
rc = ipa_rmnet_ll_xmit(skb);
|
||||
if (rc == -EAGAIN) {
|
||||
stats->tx_disabled++;
|
||||
__skb_queue_head(&tx_pending_list, skb);
|
||||
break;
|
||||
}
|
||||
if (rc >= 0)
|
||||
stats->tx_fc_sent++;
|
||||
else
|
||||
stats->tx_fc_err++;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ipa_ep;
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
if (arg == (void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT)) {
|
||||
stats->tx_enabled++;
|
||||
tasklet_schedule(&tx_pending_task);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(arg != (void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE))) {
|
||||
pr_err("%s: invalid arg %u\n", __func__, (uintptr_t)arg);
|
||||
return;
|
||||
}
|
||||
|
||||
skb = rx_data;
|
||||
/* Odds are IPA does this, but just to be safe */
|
||||
skb->dev = ll_ep->phys_dev;
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
skb_record_rx_queue(skb, 1);
|
||||
|
||||
tmp = skb;
|
||||
while (tmp) {
|
||||
/* Mark the SKB as low latency */
|
||||
tmp->priority = 0xda1a;
|
||||
if (tmp == skb)
|
||||
tmp = skb_shinfo(tmp)->frag_list;
|
||||
else
|
||||
tmp = tmp->next;
|
||||
}
|
||||
|
||||
stats->rx_pkts++;
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_probe(void *arg)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
|
||||
ll_ep = kzalloc(sizeof(*ll_ep), GFP_KERNEL);
|
||||
if (!ll_ep) {
|
||||
pr_err("%s(): allocating LL CTX failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_ipa0");
|
||||
if (!ll_ep->phys_dev) {
|
||||
pr_err("%s(): Invalid physical device\n", __func__);
|
||||
kfree(ll_ep);
|
||||
return;
|
||||
}
|
||||
|
||||
*((struct rmnet_ll_endpoint **)arg) = ll_ep;
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_remove(void *arg)
|
||||
{
|
||||
struct rmnet_ll_endpoint **ll_ep = arg;
|
||||
struct sk_buff *skb;
|
||||
|
||||
dev_put((*ll_ep)->phys_dev);
|
||||
kfree(*ll_ep);
|
||||
*ll_ep = NULL;
|
||||
|
||||
spin_lock_bh(&rmnet_ll_tx_lock);
|
||||
while ((skb = __skb_dequeue(&tx_pending_list)))
|
||||
kfree_skb(skb);
|
||||
spin_unlock_bh(&rmnet_ll_tx_lock);
|
||||
}
|
||||
|
||||
static void rmnet_ll_ipa_ready(void * __unused)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ipa_register_rmnet_ll_cb(rmnet_ll_ipa_probe,
|
||||
(void *)&rmnet_ll_ipa_ep,
|
||||
rmnet_ll_ipa_remove,
|
||||
(void *)&rmnet_ll_ipa_ep,
|
||||
rmnet_ll_ipa_rx,
|
||||
(void *)&rmnet_ll_ipa_ep);
|
||||
if (rc)
|
||||
pr_err("%s(): Registering IPA LL callback failed with rc %d\n",
|
||||
__func__, rc);
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_tx(struct sk_buff *skb)
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
int rc;
|
||||
|
||||
if (!rmnet_ll_ipa_ep)
|
||||
return -ENODEV;
|
||||
|
||||
if (!skb_queue_empty(&tx_pending_list))
|
||||
goto queue_skb;
|
||||
|
||||
rc = ipa_rmnet_ll_xmit(skb);
|
||||
|
||||
/* rc >=0: success, return number of free descriptors left */
|
||||
if (rc >= 0)
|
||||
return 0;
|
||||
|
||||
/* IPA handles freeing the SKB on failure */
|
||||
if (rc != -EAGAIN)
|
||||
return rc;
|
||||
|
||||
stats->tx_disabled++;
|
||||
|
||||
queue_skb:
|
||||
/* Flow controlled */
|
||||
if (skb_queue_len(&tx_pending_list) >= MAX_Q_LEN) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
__skb_queue_tail(&tx_pending_list, skb);
|
||||
stats->tx_fc_queued++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
__skb_queue_head_init(&tx_pending_list);
|
||||
rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
|
||||
if (rc == -EEXIST) {
|
||||
/* IPA is already up. Call it ourselves, since they don't */
|
||||
rmnet_ll_ipa_ready(NULL);
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int rmnet_ll_ipa_exit(void)
|
||||
{
|
||||
if (rmnet_ll_ipa_ep) {
|
||||
ipa_unregister_rmnet_ll_cb();
|
||||
/* Teardown? */
|
||||
rmnet_ll_ipa_ep = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int rmnet_ll_ipa_tx(struct sk_buff *skb){return 0;};
|
||||
static int rmnet_ll_ipa_init(void){return 0;}
|
||||
static int rmnet_ll_ipa_exit(void){return 0;};
|
||||
#endif /* !defined(__arch_um__) */
|
||||
|
||||
/* Export operations struct to the main framework */
|
||||
struct rmnet_ll_client_ops rmnet_ll_client = {
|
||||
.tx = rmnet_ll_ipa_tx,
|
||||
.init = rmnet_ll_ipa_init,
|
||||
.exit = rmnet_ll_ipa_exit,
|
||||
};
|
239
qcom/opensource/datarmnet/core/rmnet_ll_mhi.c
Normal file
239
qcom/opensource/datarmnet/core/rmnet_ll_mhi.c
Normal file
@ -0,0 +1,239 @@
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RmNet MHI Low Latency channel handlers
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/mm.h>
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ll_core.h"
|
||||
|
||||
static struct rmnet_ll_endpoint *rmnet_ll_mhi_ep;
|
||||
|
||||
static void rmnet_ll_mhi_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct rmnet_ll_buffer *ll_buf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Get the buffer struct back for our page information */
|
||||
ll_buf = res->buf_addr + ll_ep->buf_len;
|
||||
ll_buf->submitted = false;
|
||||
if (res->transaction_status) {
|
||||
stats->rx_status_err++;
|
||||
goto err;
|
||||
} else if (!res->bytes_xferd) {
|
||||
stats->rx_null++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Store this away so we don't have to look it up every time */
|
||||
if (!ll_ep->phys_dev) {
|
||||
ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_mhi0");
|
||||
if (!ll_ep->phys_dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
skb = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
stats->rx_oom++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Build the SKB and pass it off to the stack */
|
||||
skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
|
||||
ll_ep->buf_len);
|
||||
if (!ll_buf->temp_alloc)
|
||||
get_page(ll_buf->page);
|
||||
|
||||
skb->dev = ll_ep->phys_dev;
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
/* Mark this as arriving on the LL channel. Allows rmnet to skip
|
||||
* module handling as needed.
|
||||
*/
|
||||
skb->priority = 0xda1a;
|
||||
stats->rx_pkts++;
|
||||
netif_rx(skb);
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
return;
|
||||
|
||||
err:
|
||||
/* Go, and never darken my towels again! */
|
||||
if (ll_buf->temp_alloc)
|
||||
put_page(ll_buf->page);
|
||||
}
|
||||
|
||||
static void rmnet_ll_mhi_tx_complete(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *res)
|
||||
{
|
||||
struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
|
||||
struct sk_buff *skb = res->buf_addr;
|
||||
|
||||
/* Check the result and free the SKB */
|
||||
if (res->transaction_status)
|
||||
stats->tx_complete_err++;
|
||||
else
|
||||
stats->tx_complete++;
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_probe(struct mhi_device *mhi_dev,
|
||||
const struct mhi_device_id *id)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
int rc;
|
||||
|
||||
/* Allocate space for our state from the managed pool tied to the life
|
||||
* of the mhi device.
|
||||
*/
|
||||
ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
|
||||
if (!ll_ep)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hold on to the mhi_dev so we can send data to it later */
|
||||
ll_ep->priv = (void *)mhi_dev;
|
||||
|
||||
/* Grab the MRU of the device so we know the size of the pages we need
|
||||
* to allocate for the pool.
|
||||
*/
|
||||
rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
|
||||
&ll_ep->dev_mru);
|
||||
if (rc || !ll_ep->dev_mru)
|
||||
/* Use our default mru */
|
||||
ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
|
||||
|
||||
ll_ep->page_order = get_order(ll_ep->dev_mru);
|
||||
/* We store some stuff at the end of the page, so don't let the HW
|
||||
* use that part of it.
|
||||
*/
|
||||
ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
|
||||
|
||||
/* Tell MHI to initialize the UL/DL channels for transfer */
|
||||
rc = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
|
||||
__func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = rmnet_ll_buffer_pool_alloc(ll_ep);
|
||||
if (rc) {
|
||||
pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
|
||||
rc);
|
||||
mhi_unprepare_from_transfer(mhi_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rmnet_ll_buffers_recycle(ll_ep);
|
||||
|
||||
/* Not a fan of storing this pointer in two locations, but I've yet to
|
||||
* come up with any other good way of accessing it on the TX path from
|
||||
* rmnet otherwise, since we won't have any references to the mhi_dev.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, ll_ep);
|
||||
rmnet_ll_mhi_ep = ll_ep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_ll_mhi_remove(struct mhi_device *mhi_dev)
|
||||
{
|
||||
struct rmnet_ll_endpoint *ll_ep;
|
||||
|
||||
ll_ep = dev_get_drvdata(&mhi_dev->dev);
|
||||
/* Remove our private data form the device. No need to free it though.
|
||||
* It will be freed once the mhi_dev is released since it was alloced
|
||||
* from a managed pool.
|
||||
*/
|
||||
dev_set_drvdata(&mhi_dev->dev, NULL);
|
||||
rmnet_ll_mhi_ep = NULL;
|
||||
rmnet_ll_buffer_pool_free(ll_ep);
|
||||
}
|
||||
|
||||
static const struct mhi_device_id rmnet_ll_mhi_channel_table[] = {
|
||||
{
|
||||
.chan = "RMNET_DATA_LL",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct mhi_driver rmnet_ll_driver = {
|
||||
.probe = rmnet_ll_mhi_probe,
|
||||
.remove = rmnet_ll_mhi_remove,
|
||||
.dl_xfer_cb = rmnet_ll_mhi_rx,
|
||||
.ul_xfer_cb = rmnet_ll_mhi_tx_complete,
|
||||
.id_table = rmnet_ll_mhi_channel_table,
|
||||
.driver = {
|
||||
.name = "rmnet_ll",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int rmnet_ll_mhi_queue(struct rmnet_ll_endpoint *ll_ep,
|
||||
struct rmnet_ll_buffer *ll_buf)
|
||||
{
|
||||
struct mhi_device *mhi_dev = ll_ep->priv;
|
||||
|
||||
return mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE,
|
||||
page_address(ll_buf->page),
|
||||
ll_ep->buf_len, MHI_EOT);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_query_free_descriptors(struct rmnet_ll_endpoint *ll_ep)
|
||||
{
|
||||
struct mhi_device *mhi_dev = ll_ep->priv;
|
||||
|
||||
return mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_tx(struct sk_buff *skb)
|
||||
{
|
||||
struct mhi_device *mhi_dev;
|
||||
int rc;
|
||||
|
||||
if (!rmnet_ll_mhi_ep)
|
||||
return -ENODEV;
|
||||
|
||||
mhi_dev = rmnet_ll_mhi_ep->priv;
|
||||
rc = mhi_queue_skb(mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
|
||||
if (rc)
|
||||
kfree_skb(skb);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_init(void)
|
||||
{
|
||||
return mhi_driver_register(&rmnet_ll_driver);
|
||||
}
|
||||
|
||||
static int rmnet_ll_mhi_exit(void)
|
||||
{
|
||||
mhi_driver_unregister(&rmnet_ll_driver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Export operations struct to the main framework */
|
||||
struct rmnet_ll_client_ops rmnet_ll_client = {
|
||||
.buffer_queue = rmnet_ll_mhi_queue,
|
||||
.query_free_descriptors = rmnet_ll_mhi_query_free_descriptors,
|
||||
.tx = rmnet_ll_mhi_tx,
|
||||
.init = rmnet_ll_mhi_init,
|
||||
.exit = rmnet_ll_mhi_exit,
|
||||
};
|
542
qcom/opensource/datarmnet/core/rmnet_ll_qmap.c
Normal file
542
qcom/opensource/datarmnet/core/rmnet_ll_qmap.c
Normal file
@ -0,0 +1,542 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/netlink.h>
|
||||
#include <uapi/linux/rtnetlink.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/sock.h>
|
||||
#include "dfc.h"
|
||||
#include "rmnet_qmi.h"
|
||||
#include "rmnet_qmap.h"
|
||||
#include "qmi_rmnet_i.h"
|
||||
|
||||
#define QMAP_LL_VER 1
|
||||
#define QMAP_LL_MAX_BEARER 15
|
||||
|
||||
#define QMAP_SWITCH_TO_LL 1
|
||||
#define QMAP_SWITCH_TO_DEFAULT 2
|
||||
#define QMAP_SWITCH_QUERY 3
|
||||
|
||||
/* Switch status from modem */
|
||||
#define SWITCH_STATUS_ERROR 0
|
||||
#define SWITCH_STATUS_SUCCESS 1
|
||||
#define SWITCH_STATUS_DEFAULT 2
|
||||
#define SWITCH_STATUS_LL 3
|
||||
#define SWITCH_STATUS_FAIL_TEMP 4
|
||||
#define SWITCH_STATUS_FAIL_PERM 5
|
||||
|
||||
/* Internal switch status */
|
||||
#define SWITCH_STATUS_NONE 0xFF
|
||||
#define SWITCH_STATUS_TIMEOUT 0xFE
|
||||
#define SWITCH_STATUS_NO_EFFECT 0xFD
|
||||
|
||||
#define LL_MASK_NL_ACK 1
|
||||
#define LL_MASK_AUTO_RETRY 2
|
||||
|
||||
#define LL_TIMEOUT (5 * HZ)
|
||||
#define LL_RETRY_TIME (10 * HZ)
|
||||
#define LL_MAX_RETRY (3)
|
||||
|
||||
struct qmap_ll_bearer {
|
||||
u8 bearer_id;
|
||||
u8 status;
|
||||
u8 reserved[2];
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_ll_switch {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved;
|
||||
u8 request_type;
|
||||
u8 num_bearers;
|
||||
struct qmap_ll_bearer bearer[0];
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_ll_switch_resp {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved[2];
|
||||
u8 num_bearers;
|
||||
struct qmap_ll_bearer bearer[0];
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_ll_switch_status {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 reserved[2];
|
||||
u8 num_bearers;
|
||||
struct qmap_ll_bearer bearer[0];
|
||||
} __aligned(1);
|
||||
|
||||
/*
|
||||
* LL workqueue
|
||||
*/
|
||||
static DEFINE_SPINLOCK(ll_wq_lock);
|
||||
static struct workqueue_struct *ll_wq;
|
||||
|
||||
struct ll_ack_work {
|
||||
struct work_struct work;
|
||||
u32 nl_pid;
|
||||
u32 nl_seq;
|
||||
u8 bearer_id;
|
||||
u8 status_code;
|
||||
u8 current_ch;
|
||||
};
|
||||
|
||||
static void ll_ack_fn(struct work_struct *work)
|
||||
{
|
||||
struct ll_ack_work *ack_work;
|
||||
struct sk_buff *skb;
|
||||
struct nlmsghdr *nlh;
|
||||
struct nlmsgerr *errmsg;
|
||||
unsigned int flags = NLM_F_CAPPED;
|
||||
|
||||
ack_work = container_of(work, struct ll_ack_work, work);
|
||||
|
||||
skb = nlmsg_new(sizeof(*errmsg), GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
nlh = __nlmsg_put(skb, ack_work->nl_pid,
|
||||
ack_work->nl_seq, NLMSG_ERROR,
|
||||
sizeof(*errmsg), flags);
|
||||
errmsg = nlmsg_data(nlh);
|
||||
errmsg->error = 0;
|
||||
errmsg->msg.nlmsg_len = sizeof(struct nlmsghdr);
|
||||
errmsg->msg.nlmsg_type = ack_work->bearer_id;
|
||||
errmsg->msg.nlmsg_flags = ack_work->status_code;
|
||||
errmsg->msg.nlmsg_seq = ack_work->current_ch;
|
||||
errmsg->msg.nlmsg_pid = ack_work->nl_pid;
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
rtnl_unicast(skb, &init_net, ack_work->nl_pid);
|
||||
out:
|
||||
kfree(ack_work);
|
||||
}
|
||||
|
||||
static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
|
||||
{
|
||||
struct ll_ack_work *ack_work;
|
||||
|
||||
if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
|
||||
return;
|
||||
|
||||
ack_work = kzalloc(sizeof(*ack_work), GFP_ATOMIC);
|
||||
if (!ack_work)
|
||||
return;
|
||||
|
||||
ack_work->nl_pid = bearer->ch_switch.nl_pid;
|
||||
ack_work->nl_seq = bearer->ch_switch.nl_seq;
|
||||
ack_work->bearer_id = bearer->bearer_id;
|
||||
ack_work->status_code = bearer->ch_switch.status_code;
|
||||
ack_work->current_ch = bearer->ch_switch.current_ch;
|
||||
INIT_WORK(&ack_work->work, ll_ack_fn);
|
||||
|
||||
spin_lock_bh(&ll_wq_lock);
|
||||
if (ll_wq)
|
||||
queue_work(ll_wq, &ack_work->work);
|
||||
else
|
||||
kfree(ack_work);
|
||||
spin_unlock_bh(&ll_wq_lock);
|
||||
}
|
||||
|
||||
void rmnet_ll_wq_init(void)
|
||||
{
|
||||
WARN_ON(ll_wq);
|
||||
ll_wq = alloc_ordered_workqueue("rmnet_ll_wq", 0);
|
||||
}
|
||||
|
||||
void rmnet_ll_wq_exit(void)
|
||||
{
|
||||
struct workqueue_struct *tmp = NULL;
|
||||
|
||||
spin_lock_bh(&ll_wq_lock);
|
||||
if (ll_wq) {
|
||||
tmp = ll_wq;
|
||||
ll_wq = NULL;
|
||||
}
|
||||
spin_unlock_bh(&ll_wq_lock);
|
||||
|
||||
if (tmp)
|
||||
destroy_workqueue(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* LLC switch
|
||||
*/
|
||||
|
||||
static void ll_qmap_maybe_set_ch(struct qos_info *qos,
|
||||
struct rmnet_bearer_map *bearer, u8 status)
|
||||
{
|
||||
u8 ch;
|
||||
|
||||
if (status == SWITCH_STATUS_DEFAULT)
|
||||
ch = RMNET_CH_DEFAULT;
|
||||
else if (status == SWITCH_STATUS_LL)
|
||||
ch = RMNET_CH_LL;
|
||||
else
|
||||
return;
|
||||
|
||||
bearer->ch_switch.current_ch = ch;
|
||||
if (bearer->mq_idx < MAX_MQ_NUM)
|
||||
qos->mq[bearer->mq_idx].is_ll_ch = ch;
|
||||
}
|
||||
|
||||
static void ll_switch_complete(struct rmnet_bearer_map *bearer, u8 status)
|
||||
{
|
||||
bearer->ch_switch.status_code = status;
|
||||
|
||||
if (status == SWITCH_STATUS_FAIL_TEMP &&
|
||||
bearer->ch_switch.retry_left) {
|
||||
/* Temp failure retry */
|
||||
bearer->ch_switch.state = CH_SWITCH_FAILED_RETRY;
|
||||
mod_timer(&bearer->ch_switch.guard_timer,
|
||||
jiffies + LL_RETRY_TIME);
|
||||
bearer->ch_switch.retry_left--;
|
||||
} else {
|
||||
/* Success or permanent failure */
|
||||
bearer->ch_switch.timer_quit = true;
|
||||
del_timer(&bearer->ch_switch.guard_timer);
|
||||
bearer->ch_switch.state = CH_SWITCH_NONE;
|
||||
bearer->ch_switch.retry_left = 0;
|
||||
ll_send_nl_ack(bearer);
|
||||
bearer->ch_switch.flags = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ll_qmap_handle_switch_resp(struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_ll_switch_resp *cmd;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
struct qos_info *qos;
|
||||
struct net_device *dev;
|
||||
int i;
|
||||
|
||||
if (skb->len < sizeof(struct qmap_ll_switch_resp))
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
cmd = (struct qmap_ll_switch_resp *)skb->data;
|
||||
if (!cmd->num_bearers)
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
if (skb->len < sizeof(*cmd) +
|
||||
cmd->num_bearers * sizeof(struct qmap_ll_bearer))
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
|
||||
if (!dev)
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
qos = rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return QMAP_CMD_DONE;
|
||||
|
||||
trace_dfc_ll_switch("ACK", 0, cmd->num_bearers, cmd->bearer);
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
for (i = 0; i < cmd->num_bearers; i++) {
|
||||
bearer = qmi_rmnet_get_bearer_map(qos,
|
||||
cmd->bearer[i].bearer_id);
|
||||
if (!bearer)
|
||||
continue;
|
||||
|
||||
ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
|
||||
|
||||
if (bearer->ch_switch.state == CH_SWITCH_STARTED &&
|
||||
bearer->ch_switch.switch_txid == cmd->hdr.tx_id) {
|
||||
/* This is an ACK to the switch request */
|
||||
if (cmd->bearer[i].status == SWITCH_STATUS_SUCCESS)
|
||||
bearer->ch_switch.state = CH_SWITCH_ACKED;
|
||||
else
|
||||
ll_switch_complete(bearer,
|
||||
cmd->bearer[i].status);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
|
||||
return QMAP_CMD_DONE;
|
||||
}
|
||||
|
||||
static int ll_qmap_handle_switch_status(struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_ll_switch_status *cmd;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
struct qos_info *qos;
|
||||
struct net_device *dev;
|
||||
int i;
|
||||
|
||||
if (skb->len < sizeof(struct qmap_ll_switch_status))
|
||||
return QMAP_CMD_INVALID;
|
||||
|
||||
cmd = (struct qmap_ll_switch_status *)skb->data;
|
||||
if (!cmd->num_bearers)
|
||||
return QMAP_CMD_ACK;
|
||||
|
||||
if (skb->len < sizeof(*cmd) +
|
||||
cmd->num_bearers * sizeof(struct qmap_ll_bearer))
|
||||
return QMAP_CMD_INVALID;
|
||||
|
||||
dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
|
||||
if (!dev)
|
||||
return QMAP_CMD_ACK;
|
||||
|
||||
qos = rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return QMAP_CMD_ACK;
|
||||
|
||||
trace_dfc_ll_switch("STS", 0, cmd->num_bearers, cmd->bearer);
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
for (i = 0; i < cmd->num_bearers; i++) {
|
||||
bearer = qmi_rmnet_get_bearer_map(qos,
|
||||
cmd->bearer[i].bearer_id);
|
||||
if (!bearer)
|
||||
continue;
|
||||
|
||||
ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
|
||||
|
||||
if (bearer->ch_switch.state == CH_SWITCH_ACKED)
|
||||
ll_switch_complete(bearer, cmd->bearer[i].status);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
|
||||
return QMAP_CMD_ACK;
|
||||
}
|
||||
|
||||
int ll_qmap_cmd_handler(struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_cmd_hdr *cmd;
|
||||
int rc = QMAP_CMD_DONE;
|
||||
|
||||
cmd = (struct qmap_cmd_hdr *)skb->data;
|
||||
|
||||
if (cmd->cmd_name == QMAP_LL_SWITCH) {
|
||||
if (cmd->cmd_type != QMAP_CMD_ACK)
|
||||
return rc;
|
||||
} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
switch (cmd->cmd_name) {
|
||||
case QMAP_LL_SWITCH:
|
||||
rc = ll_qmap_handle_switch_resp(skb);
|
||||
break;
|
||||
|
||||
case QMAP_LL_SWITCH_STATUS:
|
||||
rc = ll_qmap_handle_switch_status(skb);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (cmd->cmd_type == QMAP_CMD_REQUEST)
|
||||
rc = QMAP_CMD_UNSUPPORTED;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ll_qmap_send_switch(u8 mux_id, u8 channel, u8 num_bearers,
|
||||
u8 *bearer_list, __be32 *txid)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmap_ll_switch *ll_switch;
|
||||
unsigned int len;
|
||||
int i;
|
||||
|
||||
if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER || !bearer_list)
|
||||
return -EINVAL;
|
||||
|
||||
len = sizeof(struct qmap_ll_switch) +
|
||||
num_bearers * sizeof(struct qmap_ll_bearer);
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
ll_switch = skb_put(skb, len);
|
||||
memset(ll_switch, 0, len);
|
||||
|
||||
ll_switch->hdr.cd_bit = 1;
|
||||
ll_switch->hdr.mux_id = mux_id;
|
||||
ll_switch->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
|
||||
ll_switch->hdr.cmd_name = QMAP_LL_SWITCH;
|
||||
ll_switch->hdr.cmd_type = QMAP_CMD_REQUEST;
|
||||
ll_switch->hdr.tx_id = htonl(rmnet_qmap_next_txid());
|
||||
|
||||
ll_switch->cmd_ver = QMAP_LL_VER;
|
||||
if (channel == RMNET_CH_CTL)
|
||||
ll_switch->request_type = QMAP_SWITCH_QUERY;
|
||||
else if (channel == RMNET_CH_LL)
|
||||
ll_switch->request_type = QMAP_SWITCH_TO_LL;
|
||||
else
|
||||
ll_switch->request_type = QMAP_SWITCH_TO_DEFAULT;
|
||||
ll_switch->num_bearers = num_bearers;
|
||||
for (i = 0; i < num_bearers; i++)
|
||||
ll_switch->bearer[i].bearer_id = bearer_list[i];
|
||||
|
||||
if (txid)
|
||||
*txid = ll_switch->hdr.tx_id;
|
||||
|
||||
trace_dfc_ll_switch("REQ", ll_switch->request_type,
|
||||
ll_switch->num_bearers, ll_switch->bearer);
|
||||
|
||||
return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start channel switch. The switch request is sent only if all bearers
|
||||
* are eligible to switch. Return 0 if switch request is sent.
|
||||
*/
|
||||
int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen)
|
||||
{
|
||||
u8 switch_to_ch;
|
||||
u8 num_bearers;
|
||||
u8 *bearer_list;
|
||||
u32 flags;
|
||||
struct qos_info *qos;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
__be32 txid;
|
||||
int i;
|
||||
int j;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!dev || !tcm)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* tcm__pad1: switch type (ch #, 0xFF query)
|
||||
* tcm__pad2: num bearers
|
||||
* tcm_info: flags
|
||||
* tcm_ifindex: netlink fd
|
||||
* tcm_handle: pid
|
||||
* tcm_parent: seq
|
||||
*/
|
||||
|
||||
switch_to_ch = tcm->tcm__pad1;
|
||||
num_bearers = tcm->tcm__pad2;
|
||||
flags = tcm->tcm_info;
|
||||
|
||||
if (switch_to_ch != RMNET_CH_CTL && switch_to_ch >= RMNET_CH_MAX)
|
||||
return -EOPNOTSUPP;
|
||||
if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER)
|
||||
return -EINVAL;
|
||||
if (attrlen - sizeof(*tcm) < num_bearers)
|
||||
return -EINVAL;
|
||||
|
||||
bearer_list = (u8 *)tcm + sizeof(*tcm);
|
||||
|
||||
for (i = 0; i < num_bearers; i++)
|
||||
for (j = 0; j < num_bearers; j++)
|
||||
if (j != i && bearer_list[i] == bearer_list[j])
|
||||
return -EINVAL;
|
||||
|
||||
qos = rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
/* Validate the bearer list */
|
||||
for (i = 0; i < num_bearers; i++) {
|
||||
bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
|
||||
if (!bearer) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (bearer->ch_switch.state != CH_SWITCH_NONE) {
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Send QMAP switch command */
|
||||
rc = ll_qmap_send_switch(qos->mux_id, switch_to_ch,
|
||||
num_bearers, bearer_list, &txid);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* Update state */
|
||||
for (i = 0; i < num_bearers; i++) {
|
||||
bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
|
||||
if (!bearer)
|
||||
continue;
|
||||
bearer->ch_switch.switch_to_ch = switch_to_ch;
|
||||
bearer->ch_switch.switch_txid = txid;
|
||||
bearer->ch_switch.state = CH_SWITCH_STARTED;
|
||||
bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
|
||||
bearer->ch_switch.retry_left =
|
||||
(flags & LL_MASK_AUTO_RETRY) ? LL_MAX_RETRY : 0;
|
||||
bearer->ch_switch.flags = flags;
|
||||
bearer->ch_switch.timer_quit = false;
|
||||
mod_timer(&bearer->ch_switch.guard_timer,
|
||||
jiffies + LL_TIMEOUT);
|
||||
|
||||
bearer->ch_switch.nl_pid = tcm->tcm_handle;
|
||||
bearer->ch_switch.nl_seq = tcm->tcm_parent;
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void rmnet_ll_guard_fn(struct timer_list *t)
|
||||
{
|
||||
struct rmnet_ch_switch *ch_switch;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
int switch_status = SWITCH_STATUS_TIMEOUT;
|
||||
__be32 txid;
|
||||
int rc;
|
||||
|
||||
ch_switch = container_of(t, struct rmnet_ch_switch, guard_timer);
|
||||
bearer = container_of(ch_switch, struct rmnet_bearer_map, ch_switch);
|
||||
|
||||
spin_lock_bh(&bearer->qos->qos_lock);
|
||||
|
||||
if (bearer->ch_switch.timer_quit ||
|
||||
bearer->ch_switch.state == CH_SWITCH_NONE)
|
||||
goto out;
|
||||
|
||||
if (bearer->ch_switch.state == CH_SWITCH_FAILED_RETRY) {
|
||||
if (bearer->ch_switch.current_ch ==
|
||||
bearer->ch_switch.switch_to_ch) {
|
||||
switch_status = SWITCH_STATUS_NO_EFFECT;
|
||||
goto send_err;
|
||||
}
|
||||
|
||||
rc = ll_qmap_send_switch(bearer->qos->mux_id,
|
||||
bearer->ch_switch.switch_to_ch,
|
||||
1,
|
||||
&bearer->bearer_id,
|
||||
&txid);
|
||||
if (!rc) {
|
||||
bearer->ch_switch.switch_txid = txid;
|
||||
bearer->ch_switch.state = CH_SWITCH_STARTED;
|
||||
bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
send_err:
|
||||
bearer->ch_switch.state = CH_SWITCH_NONE;
|
||||
bearer->ch_switch.status_code = switch_status;
|
||||
bearer->ch_switch.retry_left = 0;
|
||||
ll_send_nl_ack(bearer);
|
||||
bearer->ch_switch.flags = 0;
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&bearer->qos->qos_lock);
|
||||
}
|
331
qcom/opensource/datarmnet/core/rmnet_map.h
Normal file
331
qcom/opensource/datarmnet/core/rmnet_map.h
Normal file
@ -0,0 +1,331 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_MAP_H_
|
||||
#define _RMNET_MAP_H_
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include "rmnet_config.h"
|
||||
|
||||
struct rmnet_map_control_command {
|
||||
u8 command_name;
|
||||
u8 cmd_type:2;
|
||||
u8 reserved:6;
|
||||
u16 reserved2;
|
||||
u32 transaction_id;
|
||||
union {
|
||||
struct {
|
||||
u16 ip_family:2;
|
||||
u16 reserved:14;
|
||||
__be16 flow_control_seq_num;
|
||||
__be32 qos_id;
|
||||
} flow_control;
|
||||
u8 data[0];
|
||||
};
|
||||
} __aligned(1);
|
||||
|
||||
enum rmnet_map_commands {
|
||||
RMNET_MAP_COMMAND_NONE,
|
||||
RMNET_MAP_COMMAND_FLOW_DISABLE,
|
||||
RMNET_MAP_COMMAND_FLOW_ENABLE,
|
||||
RMNET_MAP_COMMAND_FLOW_START = 7,
|
||||
RMNET_MAP_COMMAND_FLOW_END = 8,
|
||||
RMNET_MAP_COMMAND_PB_BYTES = 35,
|
||||
/* These should always be the last 2 elements */
|
||||
RMNET_MAP_COMMAND_UNKNOWN,
|
||||
RMNET_MAP_COMMAND_ENUM_LENGTH
|
||||
};
|
||||
|
||||
enum rmnet_map_v5_header_type {
|
||||
RMNET_MAP_HEADER_TYPE_UNKNOWN,
|
||||
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
|
||||
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
|
||||
RMNET_MAP_HEADER_TYPE_TSO = 0x3,
|
||||
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
|
||||
};
|
||||
|
||||
enum rmnet_map_v5_close_type {
|
||||
RMNET_MAP_COAL_CLOSE_NON_COAL,
|
||||
RMNET_MAP_COAL_CLOSE_IP_MISS,
|
||||
RMNET_MAP_COAL_CLOSE_TRANS_MISS,
|
||||
RMNET_MAP_COAL_CLOSE_HW,
|
||||
RMNET_MAP_COAL_CLOSE_COAL,
|
||||
};
|
||||
|
||||
enum rmnet_map_v5_close_value {
|
||||
RMNET_MAP_COAL_CLOSE_HW_NL,
|
||||
RMNET_MAP_COAL_CLOSE_HW_PKT,
|
||||
RMNET_MAP_COAL_CLOSE_HW_BYTE,
|
||||
RMNET_MAP_COAL_CLOSE_HW_TIME,
|
||||
RMNET_MAP_COAL_CLOSE_HW_EVICT,
|
||||
};
|
||||
|
||||
/* Main QMAP header */
|
||||
struct rmnet_map_header {
|
||||
u8 pad_len:6;
|
||||
u8 next_hdr:1;
|
||||
u8 cd_bit:1;
|
||||
u8 mux_id;
|
||||
__be16 pkt_len;
|
||||
} __aligned(1);
|
||||
|
||||
/* QMAP v5 headers */
|
||||
struct rmnet_map_v5_csum_header {
|
||||
u8 next_hdr:1;
|
||||
u8 header_type:7;
|
||||
u8 hw_reserved:4;
|
||||
u8 aps_prio:1;
|
||||
u8 priority:1;
|
||||
u8 hw_reserved_bit:1;
|
||||
u8 csum_valid_required:1;
|
||||
__be16 reserved;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_v5_nl_pair {
|
||||
__be16 pkt_len;
|
||||
u8 csum_error_bitmap;
|
||||
u8 num_packets;
|
||||
} __aligned(1);
|
||||
|
||||
/* NLO: Number-length object */
|
||||
#define RMNET_MAP_V5_MAX_NLOS (6)
|
||||
#define RMNET_MAP_V5_MAX_PACKETS (48)
|
||||
|
||||
struct rmnet_map_v5_coal_header {
|
||||
u8 next_hdr:1;
|
||||
u8 header_type:7;
|
||||
u8 reserved1:4;
|
||||
u8 num_nlos:3;
|
||||
u8 csum_valid:1;
|
||||
u8 close_type:4;
|
||||
u8 close_value:4;
|
||||
u8 reserved2:4;
|
||||
u8 virtual_channel_id:4;
|
||||
|
||||
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_v5_tso_header {
|
||||
u8 next_hdr:1;
|
||||
u8 header_type:7;
|
||||
u8 hw_reserved:5;
|
||||
u8 priority:1;
|
||||
u8 zero_csum:1;
|
||||
u8 ip_id_cfg:1;
|
||||
__be16 segment_size;
|
||||
} __aligned(1);
|
||||
|
||||
/* QMAP v4 headers */
|
||||
struct rmnet_map_dl_csum_trailer {
|
||||
u8 reserved1;
|
||||
u8 valid:1;
|
||||
u8 reserved2:7;
|
||||
u16 csum_start_offset;
|
||||
u16 csum_length;
|
||||
__be16 csum_value;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_ul_csum_header {
|
||||
__be16 csum_start_offset;
|
||||
u16 csum_insert_offset:14;
|
||||
u16 udp_ind:1;
|
||||
u16 csum_enabled:1;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_control_command_header {
|
||||
u8 command_name;
|
||||
u8 cmd_type:2;
|
||||
u8 reserved:5;
|
||||
u8 e:1;
|
||||
u16 source_id:15;
|
||||
u16 ext:1;
|
||||
u32 transaction_id;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_flow_info_le {
|
||||
__be32 mux_id;
|
||||
__be32 flow_id;
|
||||
__be32 bytes;
|
||||
__be32 pkts;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_flow_info_be {
|
||||
u32 mux_id;
|
||||
u32 flow_id;
|
||||
u32 bytes;
|
||||
u32 pkts;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_pb_ind_hdr {
|
||||
union {
|
||||
struct {
|
||||
u32 seq_num;
|
||||
u32 start_end_seq_num;
|
||||
u32 row_bytes_pending;
|
||||
u32 fc_bytes_pending;
|
||||
} le __aligned(1);
|
||||
struct {
|
||||
u32 seq_num;
|
||||
u32 start_end_seq_num;
|
||||
u32 row_bytes_pending;
|
||||
u32 fc_bytes_pending;
|
||||
} be __aligned(1);
|
||||
} __aligned(1);
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_pb_ind {
|
||||
u8 priority;
|
||||
void (*pb_ind_handler)(struct rmnet_map_pb_ind_hdr *pbhdr);
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct rmnet_map_dl_ind_hdr {
|
||||
union {
|
||||
struct {
|
||||
u32 seq;
|
||||
u32 bytes;
|
||||
u32 pkts;
|
||||
u32 flows;
|
||||
struct rmnet_map_flow_info_le flow[0];
|
||||
} le __aligned(1);
|
||||
struct {
|
||||
__be32 seq;
|
||||
__be32 bytes;
|
||||
__be32 pkts;
|
||||
__be32 flows;
|
||||
struct rmnet_map_flow_info_be flow[0];
|
||||
} be __aligned(1);
|
||||
} __aligned(1);
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_dl_ind_trl {
|
||||
union {
|
||||
__be32 seq_be;
|
||||
u32 seq_le;
|
||||
} __aligned(1);
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_dl_ind {
|
||||
u8 priority;
|
||||
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
|
||||
struct rmnet_map_control_command_header *qcmd);
|
||||
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
|
||||
struct rmnet_map_control_command_header *qcmd);
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
|
||||
(Y)->data)->mux_id)
|
||||
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
|
||||
(Y)->data)->cd_bit)
|
||||
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
|
||||
(Y)->data)->pad_len)
|
||||
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
|
||||
((Y)->data + \
|
||||
sizeof(struct rmnet_map_header)))
|
||||
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
|
||||
(Y)->data)->pkt_len))
|
||||
|
||||
#define RMNET_MAP_DEAGGR_SPACING 64
|
||||
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
|
||||
|
||||
#define RMNET_MAP_COMMAND_REQUEST 0
|
||||
#define RMNET_MAP_COMMAND_ACK 1
|
||||
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
|
||||
#define RMNET_MAP_COMMAND_INVALID 3
|
||||
|
||||
#define RMNET_MAP_NO_PAD_BYTES 0
|
||||
#define RMNET_MAP_ADD_PAD_BYTES 1
|
||||
|
||||
static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
|
||||
{
|
||||
/* Nonlinear packets we receive are entirely within frag 0 */
|
||||
if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
|
||||
return skb_frag_address(skb_shinfo(skb)->frags);
|
||||
|
||||
return skb->data;
|
||||
}
|
||||
|
||||
static inline struct rmnet_map_control_command *
|
||||
rmnet_map_get_cmd_start(struct sk_buff *skb)
|
||||
{
|
||||
unsigned char *data = rmnet_map_data_ptr(skb);
|
||||
|
||||
data += sizeof(struct rmnet_map_header);
|
||||
return (struct rmnet_map_control_command *)data;
|
||||
}
|
||||
|
||||
static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
|
||||
{
|
||||
unsigned char *data = rmnet_map_data_ptr(skb);
|
||||
|
||||
data += sizeof(struct rmnet_map_header);
|
||||
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
|
||||
}
|
||||
|
||||
static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
|
||||
{
|
||||
unsigned char *data = rmnet_map_data_ptr(skb);
|
||||
|
||||
data += sizeof(struct rmnet_map_header);
|
||||
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
|
||||
}
|
||||
|
||||
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
|
||||
int hdrlen, int pad,
|
||||
struct rmnet_port *port);
|
||||
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
|
||||
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
|
||||
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
struct net_device *orig_dev,
|
||||
int csum_type);
|
||||
bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
|
||||
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
|
||||
struct sk_buff_head *list,
|
||||
u16 len);
|
||||
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
|
||||
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
|
||||
bool low_latency);
|
||||
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
|
||||
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
|
||||
void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
|
||||
u16 size, u8 count, u8 features, u32 time);
|
||||
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_hdr *dl_hdr,
|
||||
struct rmnet_map_control_command_header *qcmd);
|
||||
void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_trl *dltrl,
|
||||
struct rmnet_map_control_command_header *qcmd);
|
||||
void rmnet_map_pb_ind_notify(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind_hdr *pbhdr);
|
||||
int rmnet_map_flow_command(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf);
|
||||
void rmnet_map_cmd_init(struct rmnet_port *port);
|
||||
int rmnet_map_dl_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind);
|
||||
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind);
|
||||
int rmnet_map_pb_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind *pb_ind);
|
||||
int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind *pb_ind);
|
||||
void rmnet_map_cmd_exit(struct rmnet_port *port);
|
||||
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
|
||||
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state);
|
||||
int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
|
||||
struct net_device *orig_dev);
|
||||
#endif /* _RMNET_MAP_H_ */
|
465
qcom/opensource/datarmnet/core/rmnet_map_command.c
Normal file
465
qcom/opensource/datarmnet/core/rmnet_map_command.c
Normal file
@ -0,0 +1,465 @@
|
||||
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_vnd.h"
|
||||
|
||||
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define RMNET_PB_IND_HDR_SIZE (sizeof(struct rmnet_map_pb_ind_hdr) + \
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
int enable)
|
||||
{
|
||||
struct rmnet_map_header *qmap;
|
||||
struct rmnet_map_control_command *cmd;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct net_device *vnd;
|
||||
u16 ip_family;
|
||||
u16 fc_seq;
|
||||
u32 qos_id;
|
||||
u8 mux_id;
|
||||
int r;
|
||||
|
||||
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
|
||||
mux_id = qmap->mux_id;
|
||||
cmd = rmnet_map_get_cmd_start(skb);
|
||||
|
||||
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
|
||||
kfree_skb(skb);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
|
||||
ep = rmnet_get_endpoint(port, mux_id);
|
||||
if (!ep) {
|
||||
kfree_skb(skb);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
|
||||
vnd = ep->egress_dev;
|
||||
|
||||
ip_family = cmd->flow_control.ip_family;
|
||||
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
|
||||
qos_id = ntohl(cmd->flow_control.qos_id);
|
||||
|
||||
/* Ignore the ip family and pass the sequence number for both v4 and v6
|
||||
* sequence. User space does not support creating dedicated flows for
|
||||
* the 2 protocols
|
||||
*/
|
||||
r = rmnet_vnd_do_flow_control(vnd, enable);
|
||||
if (r) {
|
||||
kfree_skb(skb);
|
||||
return RMNET_MAP_COMMAND_UNSUPPORTED;
|
||||
} else {
|
||||
return RMNET_MAP_COMMAND_ACK;
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_map_send_ack(struct sk_buff *skb,
|
||||
unsigned char type,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_map_control_command *cmd;
|
||||
struct net_device *dev = skb->dev;
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pskb_trim(skb,
|
||||
skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
|
||||
cmd = rmnet_map_get_cmd_start(skb);
|
||||
cmd->cmd_type = type & 0x03;
|
||||
|
||||
netif_tx_lock(dev);
|
||||
dev->netdev_ops->ndo_start_xmit(skb, dev);
|
||||
netif_tx_unlock(dev);
|
||||
}
|
||||
|
||||
void
|
||||
rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_hdr *dlhdr,
|
||||
struct rmnet_map_control_command_header *qcmd)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list)
|
||||
tmp->dl_hdr_handler_v2(dlhdr, qcmd);
|
||||
}
|
||||
|
||||
void
|
||||
rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_trl *dltrl,
|
||||
struct rmnet_map_control_command_header *qcmd)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list)
|
||||
tmp->dl_trl_handler_v2(dltrl, qcmd);
|
||||
}
|
||||
|
||||
void rmnet_map_pb_ind_notify(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind_hdr *pbhdr)
|
||||
{
|
||||
struct rmnet_map_pb_ind *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &port->pb_list, list) {
|
||||
port->stats.pb_marker_seq = pbhdr->le.seq_num;
|
||||
tmp->pb_ind_handler(pbhdr);
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_map_process_pb_ind(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_pb_ind_hdr *pbhdr;
|
||||
u32 data_format;
|
||||
bool is_dl_mark_v2;
|
||||
|
||||
if (skb->len < RMNET_PB_IND_HDR_SIZE)
|
||||
return;
|
||||
|
||||
data_format = port->data_format;
|
||||
is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
|
||||
if (is_dl_mark_v2) {
|
||||
pskb_pull(skb, sizeof(struct rmnet_map_header) +
|
||||
sizeof(struct rmnet_map_control_command_header));
|
||||
}
|
||||
|
||||
pbhdr = (struct rmnet_map_pb_ind_hdr *)rmnet_map_data_ptr(skb);
|
||||
port->stats.pb_marker_count++;
|
||||
|
||||
if (is_dl_mark_v2)
|
||||
rmnet_map_pb_ind_notify(port, pbhdr);
|
||||
|
||||
if (rmnet_perf) {
|
||||
unsigned int pull_size;
|
||||
|
||||
pull_size = sizeof(struct rmnet_map_pb_ind_hdr);
|
||||
if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
|
||||
pskb_pull(skb, pull_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_map_process_flow_start(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_dl_ind_hdr *dlhdr;
|
||||
struct rmnet_map_control_command_header *qcmd;
|
||||
u32 data_format;
|
||||
bool is_dl_mark_v2;
|
||||
|
||||
if (skb->len < RMNET_DL_IND_HDR_SIZE)
|
||||
return;
|
||||
|
||||
data_format = port->data_format;
|
||||
is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
|
||||
if (is_dl_mark_v2) {
|
||||
pskb_pull(skb, sizeof(struct rmnet_map_header));
|
||||
qcmd = (struct rmnet_map_control_command_header *)
|
||||
rmnet_map_data_ptr(skb);
|
||||
port->stats.dl_hdr_last_ep_id = qcmd->source_id;
|
||||
port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
|
||||
port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
|
||||
pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
|
||||
}
|
||||
|
||||
dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
|
||||
|
||||
port->stats.dl_hdr_last_seq = dlhdr->le.seq;
|
||||
port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
|
||||
port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
|
||||
port->stats.dl_hdr_last_flows = dlhdr->le.flows;
|
||||
port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
|
||||
port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
|
||||
port->stats.dl_hdr_count++;
|
||||
|
||||
if (is_dl_mark_v2)
|
||||
rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
|
||||
|
||||
if (rmnet_perf) {
|
||||
unsigned int pull_size;
|
||||
|
||||
pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
|
||||
if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
|
||||
pskb_pull(skb, pull_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_map_process_flow_end(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_dl_ind_trl *dltrl;
|
||||
struct rmnet_map_control_command_header *qcmd;
|
||||
u32 data_format;
|
||||
bool is_dl_mark_v2;
|
||||
|
||||
if (skb->len < RMNET_DL_IND_TRL_SIZE)
|
||||
return;
|
||||
|
||||
data_format = port->data_format;
|
||||
is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
|
||||
if (is_dl_mark_v2) {
|
||||
pskb_pull(skb, sizeof(struct rmnet_map_header));
|
||||
qcmd = (struct rmnet_map_control_command_header *)
|
||||
rmnet_map_data_ptr(skb);
|
||||
pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
|
||||
}
|
||||
|
||||
dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
|
||||
|
||||
port->stats.dl_trl_last_seq = dltrl->seq_le;
|
||||
port->stats.dl_trl_count++;
|
||||
|
||||
if (is_dl_mark_v2)
|
||||
rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
|
||||
|
||||
if (rmnet_perf) {
|
||||
unsigned int pull_size;
|
||||
|
||||
pull_size = sizeof(struct rmnet_map_dl_ind_trl);
|
||||
if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
|
||||
pskb_pull(skb, pull_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
|
||||
* name is decoded here and appropriate handler is called.
|
||||
*/
|
||||
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_map_control_command *cmd;
|
||||
unsigned char command_name;
|
||||
unsigned char rc = 0;
|
||||
|
||||
cmd = rmnet_map_get_cmd_start(skb);
|
||||
command_name = cmd->command_name;
|
||||
|
||||
switch (command_name) {
|
||||
case RMNET_MAP_COMMAND_FLOW_ENABLE:
|
||||
rc = rmnet_map_do_flow_control(skb, port, 1);
|
||||
break;
|
||||
|
||||
case RMNET_MAP_COMMAND_FLOW_DISABLE:
|
||||
rc = rmnet_map_do_flow_control(skb, port, 0);
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
if (rc == RMNET_MAP_COMMAND_ACK)
|
||||
rmnet_map_send_ack(skb, rc, port);
|
||||
}
|
||||
|
||||
int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_control_command *cmd;
|
||||
unsigned char command_name;
|
||||
|
||||
cmd = rmnet_map_get_cmd_start(skb);
|
||||
command_name = cmd->command_name;
|
||||
|
||||
/* Silently discard any markers on the LL channel */
|
||||
if (skb->priority == 0xda1a &&
|
||||
(command_name == RMNET_MAP_COMMAND_FLOW_START ||
|
||||
command_name == RMNET_MAP_COMMAND_FLOW_END)) {
|
||||
if (!rmnet_perf)
|
||||
consume_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (command_name) {
|
||||
case RMNET_MAP_COMMAND_FLOW_START:
|
||||
rmnet_map_process_flow_start(skb, port, rmnet_perf);
|
||||
break;
|
||||
|
||||
case RMNET_MAP_COMMAND_FLOW_END:
|
||||
rmnet_map_process_flow_end(skb, port, rmnet_perf);
|
||||
break;
|
||||
|
||||
case RMNET_MAP_COMMAND_PB_BYTES:
|
||||
rmnet_map_process_pb_ind(skb, port, rmnet_perf);
|
||||
break;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* rmnet_perf module will handle the consuming */
|
||||
if (!rmnet_perf)
|
||||
consume_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_flow_command);
|
||||
|
||||
void rmnet_map_cmd_exit(struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp, *idx;
|
||||
|
||||
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
|
||||
list_del_rcu(&tmp->list);
|
||||
|
||||
list_for_each_entry_safe(tmp, idx, &port->pb_list, list)
|
||||
list_del_rcu(&tmp->list);
|
||||
}
|
||||
|
||||
void rmnet_map_cmd_init(struct rmnet_port *port)
|
||||
{
|
||||
INIT_LIST_HEAD(&port->dl_list);
|
||||
INIT_LIST_HEAD(&port->pb_list);
|
||||
}
|
||||
|
||||
int rmnet_map_dl_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind)
|
||||
{
|
||||
struct rmnet_map_dl_ind *dl_ind_iterator;
|
||||
bool empty_ind_list = true;
|
||||
|
||||
if (!port || !dl_ind || !dl_ind->dl_hdr_handler_v2 ||
|
||||
!dl_ind->dl_trl_handler_v2)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
|
||||
empty_ind_list = false;
|
||||
if (dl_ind_iterator->priority < dl_ind->priority) {
|
||||
if (dl_ind_iterator->list.next) {
|
||||
if (dl_ind->priority
|
||||
< list_entry_rcu(dl_ind_iterator->list.next,
|
||||
typeof(*dl_ind_iterator), list)->priority) {
|
||||
list_add_rcu(&dl_ind->list,
|
||||
&dl_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
list_add_rcu(&dl_ind->list,
|
||||
&dl_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
list_add_tail_rcu(&dl_ind->list,
|
||||
&dl_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (empty_ind_list)
|
||||
list_add_rcu(&dl_ind->list, &port->dl_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_dl_ind_register);
|
||||
|
||||
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
|
||||
if (!port || !dl_ind || !(port->dl_list.next))
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list) {
|
||||
if (tmp == dl_ind) {
|
||||
list_del_rcu(&dl_ind->list);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);
|
||||
|
||||
int rmnet_map_pb_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind *pb_ind)
|
||||
{
|
||||
struct rmnet_map_pb_ind *pb_ind_iterator;
|
||||
bool empty_ind_list = true;
|
||||
|
||||
if (!port || !pb_ind || !pb_ind->pb_ind_handler)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry_rcu(pb_ind_iterator, &port->pb_list, list) {
|
||||
empty_ind_list = false;
|
||||
if (pb_ind_iterator->priority < pb_ind->priority) {
|
||||
if (pb_ind_iterator->list.next) {
|
||||
if (pb_ind->priority
|
||||
< list_entry_rcu(pb_ind_iterator->list.next,
|
||||
typeof(*pb_ind_iterator), list)->priority) {
|
||||
list_add_rcu(&pb_ind->list,
|
||||
&pb_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
list_add_rcu(&pb_ind->list,
|
||||
&pb_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
list_add_tail_rcu(&pb_ind->list,
|
||||
&pb_ind_iterator->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (empty_ind_list)
|
||||
list_add_rcu(&pb_ind->list, &port->pb_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_pb_ind_register);
|
||||
|
||||
int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_pb_ind *pb_ind)
|
||||
{
|
||||
struct rmnet_map_pb_ind *tmp;
|
||||
|
||||
if (!port || !pb_ind || !(port->pb_list.next))
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(tmp, &port->pb_list, list) {
|
||||
if (tmp == pb_ind) {
|
||||
list_del_rcu(&pb_ind->list);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_pb_ind_deregister);
|
1720
qcom/opensource/datarmnet/core/rmnet_map_data.c
Normal file
1720
qcom/opensource/datarmnet/core/rmnet_map_data.c
Normal file
File diff suppressed because it is too large
Load Diff
100
qcom/opensource/datarmnet/core/rmnet_module.c
Normal file
100
qcom/opensource/datarmnet/core/rmnet_module.c
Normal file
@ -0,0 +1,100 @@
|
||||
/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include "rmnet_module.h"
|
||||
|
||||
struct rmnet_module_hook_info {
|
||||
void *func __rcu;
|
||||
};
|
||||
|
||||
static struct rmnet_module_hook_info
|
||||
rmnet_module_hooks[__RMNET_MODULE_NUM_HOOKS];
|
||||
|
||||
void
|
||||
rmnet_module_hook_register(const struct rmnet_module_hook_register_info *info,
|
||||
int hook_count)
|
||||
{
|
||||
struct rmnet_module_hook_info *hook_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hook_count; i++) {
|
||||
int hook = info[i].hooknum;
|
||||
|
||||
if (hook < __RMNET_MODULE_NUM_HOOKS) {
|
||||
hook_info = &rmnet_module_hooks[hook];
|
||||
rcu_assign_pointer(hook_info->func, info[i].func);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_module_hook_register);
|
||||
|
||||
bool rmnet_module_hook_is_set(int hook)
|
||||
{
|
||||
if (hook >= __RMNET_MODULE_NUM_HOOKS)
|
||||
return false;
|
||||
|
||||
return rcu_dereference(rmnet_module_hooks[hook].func) != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_module_hook_is_set);
|
||||
|
||||
void
|
||||
rmnet_module_hook_unregister_no_sync(const struct rmnet_module_hook_register_info *info,
|
||||
int hook_count)
|
||||
{
|
||||
struct rmnet_module_hook_info *hook_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hook_count; i++) {
|
||||
int hook = info[i].hooknum;
|
||||
|
||||
if (hook < __RMNET_MODULE_NUM_HOOKS) {
|
||||
hook_info = &rmnet_module_hooks[hook];
|
||||
rcu_assign_pointer(hook_info->func, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_module_hook_unregister_no_sync);
|
||||
|
||||
#define __RMNET_HOOK_DEFINE(call, hook_num, proto, args, ret_type) \
|
||||
int rmnet_module_hook_##call( \
|
||||
__RMNET_HOOK_PROTO(RMNET_HOOK_PARAMS(proto), ret_type) \
|
||||
) \
|
||||
{ \
|
||||
ret_type (*__func)(proto); \
|
||||
struct rmnet_module_hook_info *__info = \
|
||||
&rmnet_module_hooks[hook_num]; \
|
||||
int __ret = 0; \
|
||||
\
|
||||
rcu_read_lock(); \
|
||||
__func = rcu_dereference(__info->func); \
|
||||
if (__func) { \
|
||||
RMNET_HOOK_IF_NON_VOID_TYPE(ret_type)( ret_type __rc = ) \
|
||||
__func(args); \
|
||||
__ret = 1; \
|
||||
\
|
||||
RMNET_HOOK_IF_NON_VOID_TYPE(ret_type)( if (__ret_code) \
|
||||
*__ret_code = __rc; )\
|
||||
} \
|
||||
\
|
||||
rcu_read_unlock(); \
|
||||
return __ret; \
|
||||
} \
|
||||
EXPORT_SYMBOL(rmnet_module_hook_##call);
|
||||
|
||||
#undef RMNET_MODULE_HOOK
|
||||
#define RMNET_MODULE_HOOK(call, hook_num, proto, args, ret_type) \
|
||||
__RMNET_HOOK_DEFINE(call, hook_num, RMNET_HOOK_PARAMS(proto), \
|
||||
RMNET_HOOK_PARAMS(args), ret_type)
|
||||
|
||||
#define __RMNET_HOOK_MULTIREAD__
|
||||
#include "rmnet_hook.h"
|
||||
|
137
qcom/opensource/datarmnet/core/rmnet_module.h
Normal file
137
qcom/opensource/datarmnet/core/rmnet_module.h
Normal file
@ -0,0 +1,137 @@
|
||||
/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __RMNET_MODULE_H__
|
||||
#define __RMNET_MODULE_H__
|
||||
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
enum {
|
||||
RMNET_MODULE_HOOK_OFFLOAD_INGRESS,
|
||||
RMNET_MODULE_HOOK_OFFLOAD_CHAIN_END,
|
||||
RMNET_MODULE_HOOK_SHS_SKB_ENTRY,
|
||||
RMNET_MODULE_HOOK_SHS_SKB_LL_ENTRY,
|
||||
RMNET_MODULE_HOOK_SHS_SWITCH,
|
||||
RMNET_MODULE_HOOK_PERF_TETHER_INGRESS,
|
||||
RMNET_MODULE_HOOK_PERF_TETHER_EGRESS,
|
||||
RMNET_MODULE_HOOK_PERF_TETHER_CMD,
|
||||
RMNET_MODULE_HOOK_PERF_INGRESS,
|
||||
RMNET_MODULE_HOOK_PERF_EGRESS,
|
||||
RMNET_MODULE_HOOK_PERF_SET_THRESH,
|
||||
RMNET_MODULE_HOOK_APS_PRE_QUEUE,
|
||||
RMNET_MODULE_HOOK_APS_POST_QUEUE,
|
||||
RMNET_MODULE_HOOK_WLAN_FLOW_MATCH,
|
||||
RMNET_MODULE_HOOK_APS_DATA_INACTIVE,
|
||||
RMNET_MODULE_HOOK_APS_DATA_ACTIVE,
|
||||
RMNET_MODULE_HOOK_APS_DATA_REPORT,
|
||||
RMNET_MODULE_HOOK_PERF_INGRESS_RX_HANDLER,
|
||||
RMNET_MODULE_HOOK_WLAN_INGRESS_RX_HANDLER,
|
||||
__RMNET_MODULE_NUM_HOOKS,
|
||||
};
|
||||
|
||||
struct rmnet_module_hook_register_info {
|
||||
int hooknum;
|
||||
void *func;
|
||||
};
|
||||
|
||||
void
|
||||
rmnet_module_hook_register(const struct rmnet_module_hook_register_info *info,
|
||||
int hook_count);
|
||||
bool rmnet_module_hook_is_set(int hook);
|
||||
void
|
||||
rmnet_module_hook_unregister_no_sync(const struct rmnet_module_hook_register_info *info,
|
||||
int hook_count);
|
||||
static inline void
|
||||
rmnet_module_hook_unregister(const struct rmnet_module_hook_register_info *info,
|
||||
int hook_count)
|
||||
{
|
||||
rmnet_module_hook_unregister_no_sync(info, hook_count);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
/* Dummy macro. Can use kernel version later */
|
||||
#define __CAT(a, b) a ## b
|
||||
#define CAT(a, b) __CAT(a, b)
|
||||
|
||||
#define RMNET_HOOK_PARAMS(args...) args
|
||||
|
||||
#define RMNET_MODULE_HOOK_NUM(__hook) CAT(RMNET_MODULE_HOOK_, __hook)
|
||||
#define RMNET_MODULE_HOOK_PROTOCOL(proto...) proto
|
||||
#define RMNET_MODULE_HOOK_ARGS(args...) args
|
||||
#define RMNET_MODULE_HOOK_RETURN_TYPE(type) type
|
||||
|
||||
/* A ...lovely... framework for checking if the argument passed in to a function
|
||||
* macro is a pair of parentheses.
|
||||
* If so, resolve to 1. Otherwise, 0.
|
||||
*
|
||||
* The idea here is that you pass the argument along with a "test" macro to
|
||||
* a "checker" macro. If the argument IS a pair of parentheses, this will cause
|
||||
* the tester macro to expand into multiple arguments.
|
||||
*
|
||||
* The key is that "checker" macro just returns the second argument it receives.
|
||||
* So have the "tester" macro expand to a set of arguments that makes 1 the
|
||||
* second argument, or 0 if it doesn't expand.
|
||||
*/
|
||||
#define __RMNET_HOOK_SECOND_ARG(_, arg, ...) arg
|
||||
#define RMNET_HOOK_PARENTHESES_CHECKER(args...) \
|
||||
__RMNET_HOOK_SECOND_ARG(args, 0, )
|
||||
#define __RMNET_HOOK_PARENTHESES_TEST(arg) arg, 1,
|
||||
#define __RMNET_HOOK_IS_PARENTHESES_TEST(...) __RMNET_HOOK_PARENTHESES_TEST(XXX)
|
||||
#define RMNET_HOOK_IS_PARENTHESES(arg) \
|
||||
RMNET_HOOK_PARENTHESES_CHECKER(__RMNET_HOOK_IS_PARENTHESES_TEST arg)
|
||||
|
||||
/* So what's the point of the above stuff, you ask?
|
||||
*
|
||||
* CPP can't actually do type checking ;). But what we CAN do is something
|
||||
* like this to determine if the type passed in is void. If so, this will
|
||||
* expand to (), causing the RMNET_HOOK_IS_PARENTHESES check to resolve to 1,
|
||||
* but if not, then the check resolves to 0.
|
||||
*/
|
||||
#define __RMNET_HOOK_CHECK_TYPE_IS_void(arg) arg
|
||||
#define RMNET_HOOK_TYPE_IS_VOID(type) \
|
||||
RMNET_HOOK_IS_PARENTHESES( __RMNET_HOOK_CHECK_TYPE_IS_ ## type (()) )
|
||||
|
||||
/* And now, we have some logic macros. The main macro will resolve
|
||||
* to one of the branches depending on the bool value passed in.
|
||||
*/
|
||||
#define __IF_0(t_path, e_path...) e_path
|
||||
#define __IF_1(t_path, e_path...) t_path
|
||||
#define IF(arg) CAT(__IF_, arg)
|
||||
#define __NOT_1 0
|
||||
#define __NOT_0 1
|
||||
#define NOT(arg) CAT(__NOT_, arg)
|
||||
|
||||
/* And now we combine this all, for a purely function macro way of splitting
|
||||
* return type handling...
|
||||
*
|
||||
* ...all to circumvent that you can't actually add #if conditionals in macro
|
||||
* expansions. It would have been much simpler that way. ;)
|
||||
*/
|
||||
#define RMNET_HOOK_IF_NON_VOID_TYPE(type) \
|
||||
IF(NOT(RMNET_HOOK_TYPE_IS_VOID(type)))
|
||||
|
||||
#define __RMNET_HOOK_PROTO(proto, ret_type)\
|
||||
RMNET_HOOK_IF_NON_VOID_TYPE(ret_type) \
|
||||
(RMNET_HOOK_PARAMS(ret_type *__ret_code, proto), \
|
||||
RMNET_HOOK_PARAMS(proto))
|
||||
|
||||
#define __RMNET_HOOK_DECLARE(call, proto, ret_type) \
|
||||
int rmnet_module_hook_##call( \
|
||||
__RMNET_HOOK_PROTO(RMNET_HOOK_PARAMS(proto), ret_type));
|
||||
|
||||
#undef RMNET_MODULE_HOOK
|
||||
#define RMNET_MODULE_HOOK(call, hook_num, proto, args, ret_type) \
|
||||
__RMNET_HOOK_DECLARE(call, RMNET_HOOK_PARAMS(proto), ret_type)
|
||||
|
||||
#include "rmnet_hook.h"
|
||||
|
||||
#endif
|
70
qcom/opensource/datarmnet/core/rmnet_private.h
Normal file
70
qcom/opensource/datarmnet/core/rmnet_private.h
Normal file
@ -0,0 +1,70 @@
|
||||
/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_PRIVATE_H_
|
||||
#define _RMNET_PRIVATE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define RMNET_MAX_PACKET_SIZE 16384
|
||||
#define RMNET_DFLT_PACKET_SIZE 1500
|
||||
#define RMNET_NEEDED_HEADROOM 16
|
||||
#define RMNET_TX_QUEUE_LEN 1000
|
||||
|
||||
/* Constants */
|
||||
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
|
||||
#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30)
|
||||
#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29)
|
||||
|
||||
#define RMNET_FLAGS_INGRESS_COALESCE BIT(4)
|
||||
#define RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5 BIT(5)
|
||||
#define RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5 BIT(6)
|
||||
|
||||
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
|
||||
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
|
||||
|
||||
/* UL Packet prioritization */
|
||||
#define RMNET_EGRESS_FORMAT_PRIORITY BIT(28)
|
||||
|
||||
/* Power save feature*/
|
||||
#define RMNET_INGRESS_FORMAT_PS BIT(27)
|
||||
#define RMNET_FORMAT_PS_NOTIF BIT(26)
|
||||
|
||||
/* UL Aggregation parameters */
|
||||
#define RMNET_PAGE_RECYCLE BIT(0)
|
||||
|
||||
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
|
||||
#define RMNET_EPMODE_VND (1)
|
||||
/* Pass the frame directly to another device with dev_queue_xmit() */
|
||||
#define RMNET_EPMODE_BRIDGE (2)
|
||||
|
||||
/* Struct for skb control block use within rmnet driver */
|
||||
struct rmnet_skb_cb {
|
||||
/* MUST be the first entries because of legacy reasons */
|
||||
char flush_shs;
|
||||
char qmap_steer;
|
||||
|
||||
bool tethered;
|
||||
|
||||
/* coalescing stats */
|
||||
u32 coal_bytes;
|
||||
u32 coal_bufsize;
|
||||
|
||||
u32 bif;
|
||||
u32 ack_thresh;
|
||||
u32 ack_forced;
|
||||
};
|
||||
|
||||
#define RMNET_SKB_CB(skb) ((struct rmnet_skb_cb *)(skb)->cb)
|
||||
|
||||
#endif /* _RMNET_PRIVATE_H_ */
|
162
qcom/opensource/datarmnet/core/rmnet_qmap.c
Normal file
162
qcom/opensource/datarmnet/core/rmnet_qmap.c
Normal file
@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include "dfc.h"
|
||||
#include "rmnet_qmi.h"
|
||||
#include "rmnet_ctl.h"
|
||||
#include "rmnet_qmap.h"
|
||||
#include "rmnet_module.h"
|
||||
#include "rmnet_hook.h"
|
||||
|
||||
static atomic_t qmap_txid;
|
||||
static void *rmnet_ctl_handle;
|
||||
static void *rmnet_port;
|
||||
static struct net_device *real_data_dev;
|
||||
static struct rmnet_ctl_client_if *rmnet_ctl;
|
||||
|
||||
int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush)
|
||||
{
|
||||
trace_dfc_qmap(skb->data, skb->len, false, ch);
|
||||
|
||||
if (ch != RMNET_CH_CTL && real_data_dev) {
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
skb->dev = real_data_dev;
|
||||
rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0, skb->data,
|
||||
skb->len);
|
||||
|
||||
rmnet_map_tx_qmap_cmd(skb, ch, flush);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rmnet_ctl->send(rmnet_ctl_handle, skb)) {
|
||||
pr_err("Failed to send to rmnet ctl\n");
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_qmap_send);
|
||||
|
||||
static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
|
||||
{
|
||||
struct qmap_cmd_hdr *cmd;
|
||||
int rc = QMAP_CMD_DONE;
|
||||
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
trace_dfc_qmap(skb->data, skb->len, true, RMNET_CH_CTL);
|
||||
|
||||
if (skb->len < sizeof(struct qmap_cmd_hdr))
|
||||
goto free_skb;
|
||||
|
||||
cmd = (struct qmap_cmd_hdr *)skb->data;
|
||||
if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
|
||||
goto free_skb;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
switch (cmd->cmd_name) {
|
||||
case QMAP_DFC_CONFIG:
|
||||
case QMAP_DFC_IND:
|
||||
case QMAP_DFC_QUERY:
|
||||
case QMAP_DFC_END_MARKER:
|
||||
case QMAP_DFC_POWERSAVE:
|
||||
rc = dfc_qmap_cmd_handler(skb);
|
||||
break;
|
||||
|
||||
case QMAP_LL_SWITCH:
|
||||
case QMAP_LL_SWITCH_STATUS:
|
||||
rc = ll_qmap_cmd_handler(skb);
|
||||
break;
|
||||
|
||||
case QMAP_DATA_REPORT:
|
||||
rmnet_module_hook_aps_data_report(skb);
|
||||
rc = QMAP_CMD_DONE;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (cmd->cmd_type == QMAP_CMD_REQUEST)
|
||||
rc = QMAP_CMD_UNSUPPORTED;
|
||||
}
|
||||
|
||||
/* Send ack */
|
||||
if (rc != QMAP_CMD_DONE) {
|
||||
if (rc == QMAP_CMD_ACK_INBAND) {
|
||||
cmd->cmd_type = QMAP_CMD_ACK;
|
||||
rmnet_qmap_send(skb, RMNET_CH_DEFAULT, false);
|
||||
} else {
|
||||
cmd->cmd_type = rc;
|
||||
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static struct rmnet_ctl_client_hooks cb = {
|
||||
.ctl_dl_client_hook = rmnet_qmap_cmd_handler,
|
||||
};
|
||||
|
||||
int rmnet_qmap_next_txid(void)
|
||||
{
|
||||
return atomic_inc_return(&qmap_txid);
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_qmap_next_txid);
|
||||
|
||||
struct net_device *rmnet_qmap_get_dev(u8 mux_id)
|
||||
{
|
||||
return rmnet_get_rmnet_dev(rmnet_port, mux_id);
|
||||
}
|
||||
|
||||
int rmnet_qmap_init(void *port)
|
||||
{
|
||||
if (rmnet_ctl_handle)
|
||||
return 0;
|
||||
|
||||
atomic_set(&qmap_txid, 0);
|
||||
rmnet_port = port;
|
||||
real_data_dev = rmnet_get_real_dev(rmnet_port);
|
||||
|
||||
rmnet_ctl = rmnet_ctl_if();
|
||||
if (!rmnet_ctl) {
|
||||
pr_err("rmnet_ctl module not loaded\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (rmnet_ctl->reg)
|
||||
rmnet_ctl_handle = rmnet_ctl->reg(&cb);
|
||||
|
||||
if (!rmnet_ctl_handle) {
|
||||
pr_err("Failed to register with rmnet ctl\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rmnet_qmap_exit(void)
|
||||
{
|
||||
if (rmnet_ctl && rmnet_ctl->dereg)
|
||||
rmnet_ctl->dereg(rmnet_ctl_handle);
|
||||
|
||||
rmnet_ctl_handle = NULL;
|
||||
real_data_dev = NULL;
|
||||
rmnet_port = NULL;
|
||||
}
|
68
qcom/opensource/datarmnet/core/rmnet_qmap.h
Normal file
68
qcom/opensource/datarmnet/core/rmnet_qmap.h
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __RMNET_QMAP_H
|
||||
#define __RMNET_QMAP_H
|
||||
|
||||
#include "qmi_rmnet_i.h"
|
||||
|
||||
#define QMAP_CMD_DONE -1
|
||||
#define QMAP_CMD_ACK_INBAND -2
|
||||
|
||||
#define QMAP_CMD_REQUEST 0
|
||||
#define QMAP_CMD_ACK 1
|
||||
#define QMAP_CMD_UNSUPPORTED 2
|
||||
#define QMAP_CMD_INVALID 3
|
||||
|
||||
struct qmap_hdr {
|
||||
u8 cd_pad;
|
||||
u8 mux_id;
|
||||
__be16 pkt_len;
|
||||
} __aligned(1);
|
||||
|
||||
#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
|
||||
|
||||
struct qmap_cmd_hdr {
|
||||
u8 pad_len:6;
|
||||
u8 reserved_bit:1;
|
||||
u8 cd_bit:1;
|
||||
u8 mux_id;
|
||||
__be16 pkt_len;
|
||||
u8 cmd_name;
|
||||
u8 cmd_type:2;
|
||||
u8 reserved:6;
|
||||
u16 reserved2;
|
||||
__be32 tx_id;
|
||||
} __aligned(1);
|
||||
|
||||
int rmnet_qmap_init(void *port);
|
||||
void rmnet_qmap_exit(void);
|
||||
int rmnet_qmap_next_txid(void);
|
||||
int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush);
|
||||
struct net_device *rmnet_qmap_get_dev(u8 mux_id);
|
||||
|
||||
#define QMAP_DFC_CONFIG 10
|
||||
#define QMAP_DFC_IND 11
|
||||
#define QMAP_DFC_QUERY 12
|
||||
#define QMAP_DFC_END_MARKER 13
|
||||
#define QMAP_DFC_POWERSAVE 14
|
||||
int dfc_qmap_cmd_handler(struct sk_buff *skb);
|
||||
|
||||
#define QMAP_LL_SWITCH 25
|
||||
#define QMAP_LL_SWITCH_STATUS 26
|
||||
int ll_qmap_cmd_handler(struct sk_buff *skb);
|
||||
|
||||
#define QMAP_DATA_REPORT 34
|
||||
|
||||
#endif /* __RMNET_QMAP_H */
|
101
qcom/opensource/datarmnet/core/rmnet_qmi.h
Normal file
101
qcom/opensource/datarmnet/core/rmnet_qmi.h
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_QMI_H
|
||||
#define _RMNET_QMI_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#define CONFIG_QTI_QMI_RMNET 1
|
||||
|
||||
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_RMNET
|
||||
void *rmnet_get_qmi_pt(void *port);
|
||||
void *rmnet_get_qos_pt(struct net_device *dev);
|
||||
void *rmnet_get_rmnet_port(struct net_device *dev);
|
||||
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id);
|
||||
void rmnet_reset_qmi_pt(void *port);
|
||||
void rmnet_init_qmi_pt(void *port, void *qmi);
|
||||
void rmnet_enable_all_flows(void *port);
|
||||
bool rmnet_all_flows_enabled(void *port);
|
||||
void rmnet_set_powersave_format(void *port);
|
||||
void rmnet_clear_powersave_format(void *port);
|
||||
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
|
||||
int rmnet_get_powersave_notif(void *port);
|
||||
struct net_device *rmnet_get_real_dev(void *port);
|
||||
int rmnet_get_dlmarker_info(void *port);
|
||||
void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id);
|
||||
#else
|
||||
static inline void *rmnet_get_qmi_pt(void *port)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *rmnet_get_qos_pt(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *rmnet_get_rmnet_port(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct net_device *rmnet_get_rmnet_dev(void *port,
|
||||
u8 mux_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void rmnet_reset_qmi_pt(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_init_qmi_pt(void *port, void *qmi)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_enable_all_flows(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool rmnet_all_flows_enabled(void *port)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void rmnet_set_port_format(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int rmnet_get_powersave_notif(void *port)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct net_device *rmnet_get_real_dev(void *port)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int rmnet_get_dlmarker_info(void *port)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_QTI_QMI_RMNET */
|
||||
#endif /*_RMNET_QMI_H*/
|
496
qcom/opensource/datarmnet/core/rmnet_trace.h
Normal file
496
qcom/opensource/datarmnet/core/rmnet_trace.h
Normal file
@ -0,0 +1,496 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/version.h>
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM rmnet
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
||||
#ifndef RMNET_TRACE_INCLUDE_PATH
|
||||
#if defined(CONFIG_RMNET_LA_PLATFORM)
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
|
||||
#elif defined(__arch_um__)
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
|
||||
#else
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
|
||||
#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
|
||||
#endif /* RMNET_TRACE_INCLUDE_PATH */
|
||||
#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_FILE rmnet_trace
|
||||
|
||||
#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_RMNET_H
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet module */
|
||||
/*****************************************************************************/
|
||||
TRACE_EVENT(rmnet_xmit_skb,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb),
|
||||
|
||||
TP_ARGS(skb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, skb->dev->name)
|
||||
__field(unsigned int, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, skb->dev->name);
|
||||
__entry->len = skb->len;
|
||||
),
|
||||
|
||||
TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS
|
||||
(rmnet_mod_template,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
)
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
TRACE_EVENT(print_skb_gso,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest,
|
||||
u16 ip_proto, u16 xport_proto, const char *saddr, const char *daddr),
|
||||
|
||||
TP_ARGS(skb, src, dest, ip_proto, xport_proto, saddr, daddr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, skbaddr)
|
||||
__field(int, len)
|
||||
__field(int, data_len)
|
||||
__field(__be16, src)
|
||||
__field(__be16, dest)
|
||||
__field(u16, ip_proto)
|
||||
__field(u16, xport_proto)
|
||||
__string(saddr, saddr)
|
||||
__string(daddr, daddr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->skbaddr = skb;
|
||||
__entry->len = skb->len;
|
||||
__entry->data_len = skb->data_len;
|
||||
__entry->src = src;
|
||||
__entry->dest = dest;
|
||||
__entry->ip_proto = ip_proto;
|
||||
__entry->xport_proto = xport_proto;
|
||||
__assign_str(saddr, saddr);
|
||||
__assign_str(daddr, daddr);
|
||||
),
|
||||
|
||||
TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, [%s][%s] src=%s %u dest=%s %u",
|
||||
__entry->skbaddr, __entry->len, __entry->data_len,
|
||||
__entry->ip_proto == htons(ETH_P_IP) ? "IPv4" : "IPv6",
|
||||
__entry->xport_proto == IPPROTO_TCP ? "TCP" : "UDP",
|
||||
__get_str(saddr), be16_to_cpu(__entry->src),
|
||||
__get_str(daddr), be16_to_cpu(__entry->dest))
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(print_icmp,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
|
||||
const char *saddr, const char *daddr),
|
||||
|
||||
TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, skbaddr)
|
||||
__field(int, len)
|
||||
__field(u16, ip_proto)
|
||||
__field(u8, type)
|
||||
__field(__be16, sequence)
|
||||
__string(saddr, saddr)
|
||||
__string(daddr, daddr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->skbaddr = skb;
|
||||
__entry->len = skb->len;
|
||||
__entry->ip_proto = ip_proto;
|
||||
__entry->type = type;
|
||||
__entry->sequence = sequence;
|
||||
__assign_str(saddr, saddr);
|
||||
__assign_str(daddr, daddr);
|
||||
),
|
||||
|
||||
TP_printk("ICMP: skbaddr=%pK, len=%d, [%s] type=%u sequence=%u source=%s dest=%s",
|
||||
__entry->skbaddr, __entry->len,
|
||||
__entry->ip_proto == htons(ETH_P_IP) ? "IPv4" : "IPv6",
|
||||
__entry->type, be16_to_cpu(__entry->sequence), __get_str(saddr),
|
||||
__get_str(daddr))
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_icmp, print_icmp_tx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
|
||||
const char *saddr, const char *daddr),
|
||||
|
||||
TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_icmp, print_icmp_rx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
|
||||
const char *saddr, const char *daddr),
|
||||
|
||||
TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(print_tcp,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct tcphdr *tp),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, tp),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, skbaddr)
|
||||
__field(int, len)
|
||||
__string(saddr, saddr)
|
||||
__string(daddr, daddr)
|
||||
__field(__be16, source)
|
||||
__field(__be16, dest)
|
||||
__field(__be32, seq)
|
||||
__field(__be32, ack_seq)
|
||||
__field(u8, syn)
|
||||
__field(u8, ack)
|
||||
__field(u8, fin)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->skbaddr = skb;
|
||||
__entry->len = skb->len;
|
||||
__assign_str(saddr, saddr);
|
||||
__assign_str(daddr, daddr);
|
||||
__entry->source = tp->source;
|
||||
__entry->dest = tp->dest;
|
||||
__entry->seq = tp->seq;
|
||||
__entry->ack_seq = tp->ack_seq;
|
||||
__entry->syn = tp->syn;
|
||||
__entry->ack = tp->ack;
|
||||
__entry->fin = tp->fin;
|
||||
),
|
||||
|
||||
TP_printk("TCP: skbaddr=%pK, len=%d source=%s %u dest=%s %u seq=%u ack_seq=%u syn=%u ack=%u fin=%u",
|
||||
__entry->skbaddr, __entry->len,
|
||||
__get_str(saddr), be16_to_cpu(__entry->source),
|
||||
__get_str(daddr), be16_to_cpu(__entry->dest),
|
||||
be32_to_cpu(__entry->seq), be32_to_cpu(__entry->ack_seq),
|
||||
!!__entry->syn, !!__entry->ack, !!__entry->fin)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_tcp, print_tcp_tx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct tcphdr *tp),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, tp)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_tcp, print_tcp_rx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct tcphdr *tp),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, tp)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(print_udp,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct udphdr *uh, u16 ip_id),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, uh, ip_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, skbaddr)
|
||||
__field(int, len)
|
||||
__string(saddr, saddr)
|
||||
__string(daddr, daddr)
|
||||
__field(__be16, source)
|
||||
__field(__be16, dest)
|
||||
__field(__be16, ip_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->skbaddr = skb;
|
||||
__entry->len = skb->len;
|
||||
__assign_str(saddr, saddr);
|
||||
__assign_str(daddr, daddr);
|
||||
__entry->source = uh->source;
|
||||
__entry->dest = uh->dest;
|
||||
__entry->ip_id = ip_id;
|
||||
),
|
||||
|
||||
TP_printk("UDP: skbaddr=%pK, len=%d source=%s %u dest=%s %u ip_id=%u",
|
||||
__entry->skbaddr, __entry->len,
|
||||
__get_str(saddr), be16_to_cpu(__entry->source),
|
||||
__get_str(daddr), be16_to_cpu(__entry->dest),
|
||||
__entry->ip_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_udp, print_udp_tx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct udphdr *uh, u16 ip_id),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, uh, ip_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(print_udp, print_udp_rx,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
|
||||
struct udphdr *uh, u16 ip_id),
|
||||
|
||||
TP_ARGS(skb, saddr, daddr, uh, ip_id)
|
||||
);
|
||||
|
||||
TRACE_EVENT(print_pfn,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb, unsigned long *pfn_list, int num_elements),
|
||||
|
||||
TP_ARGS(skb, pfn_list, num_elements),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, skbaddr)
|
||||
__field(int, num_elements)
|
||||
__dynamic_array(unsigned long, pfn_list, num_elements)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->skbaddr = skb;
|
||||
__entry->num_elements = num_elements;
|
||||
memcpy(__get_dynamic_array(pfn_list), pfn_list,
|
||||
num_elements * sizeof(*pfn_list));
|
||||
),
|
||||
|
||||
TP_printk("skbaddr=%pK count=%d pfn=%s",
|
||||
__entry->skbaddr,
|
||||
__entry->num_elements,
|
||||
__print_array(__get_dynamic_array(pfn_list),
|
||||
__entry->num_elements,
|
||||
sizeof(unsigned long))
|
||||
)
|
||||
);
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet_perf module */
|
||||
/*****************************************************************************/
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_perf_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_perf_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_perf_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
|
||||
);
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet_shs module */
|
||||
/*****************************************************************************/
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_wq_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_wq_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_mod_template, rmnet_shs_wq_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS
|
||||
(rmnet_freq_template,
|
||||
|
||||
TP_PROTO(u8 core, u32 newfreq),
|
||||
|
||||
TP_ARGS(core, newfreq),
|
||||
|
||||
TP_STRUCT__entry(__field(u8, core)
|
||||
__field(u32, newfreq)
|
||||
),
|
||||
|
||||
TP_fast_assign(__entry->core = core;
|
||||
__entry->newfreq = newfreq;
|
||||
),
|
||||
|
||||
TP_printk("freq policy core:%u freq floor :%u",
|
||||
__entry->core, __entry->newfreq)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_freq_template, rmnet_freq_boost,
|
||||
|
||||
TP_PROTO(u8 core, u32 newfreq),
|
||||
|
||||
TP_ARGS(core, newfreq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT
|
||||
(rmnet_freq_template, rmnet_freq_reset,
|
||||
|
||||
TP_PROTO(u8 core, u32 newfreq),
|
||||
|
||||
TP_ARGS(core, newfreq)
|
||||
);
|
||||
|
||||
TRACE_EVENT
|
||||
(rmnet_freq_update,
|
||||
|
||||
TP_PROTO(u8 core, u32 lowfreq, u32 highfreq),
|
||||
|
||||
TP_ARGS(core, lowfreq, highfreq),
|
||||
|
||||
TP_STRUCT__entry(__field(u8, core)
|
||||
__field(u32, lowfreq)
|
||||
__field(u32, highfreq)
|
||||
),
|
||||
|
||||
TP_fast_assign(__entry->core = core;
|
||||
__entry->lowfreq = lowfreq;
|
||||
__entry->highfreq = highfreq;
|
||||
|
||||
),
|
||||
|
||||
TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u",
|
||||
__entry->core, __entry->lowfreq, __entry->highfreq)
|
||||
);
|
||||
#endif /* _TRACE_RMNET_H */
|
||||
|
||||
#include <trace/define_trace.h>
|
811
qcom/opensource/datarmnet/core/rmnet_vnd.c
Normal file
811
qcom/opensource/datarmnet/core/rmnet_vnd.c
Normal file
@ -0,0 +1,811 @@
|
||||
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*
|
||||
* RMNET Data virtual network driver
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/inet.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/ipv6.h>
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_handlers.h"
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_vnd.h"
|
||||
#include "rmnet_genl.h"
|
||||
#include "rmnet_ll.h"
|
||||
#include "rmnet_ctl.h"
|
||||
|
||||
#include "qmi_rmnet.h"
|
||||
#include "rmnet_qmi.h"
|
||||
#include "rmnet_trace.h"
|
||||
|
||||
typedef void (*rmnet_perf_tether_egress_hook_t)(struct sk_buff *skb);
|
||||
rmnet_perf_tether_egress_hook_t rmnet_perf_tether_egress_hook __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_perf_tether_egress_hook);
|
||||
|
||||
typedef void (*rmnet_perf_egress_hook1_t)(struct sk_buff *skb);
|
||||
rmnet_perf_egress_hook1_t rmnet_perf_egress_hook1 __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_perf_egress_hook1);
|
||||
|
||||
typedef void (*rmnet_aps_pre_queue_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
rmnet_aps_pre_queue_t rmnet_aps_pre_queue __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_aps_pre_queue);
|
||||
|
||||
typedef int (*rmnet_aps_post_queue_t)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
rmnet_aps_post_queue_t rmnet_aps_post_queue __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_aps_post_queue);
|
||||
|
||||
typedef void (*rmnet_wlan_ll_tuple_hook_t)(struct sk_buff *skb);
|
||||
rmnet_wlan_ll_tuple_hook_t rmnet_wlan_ll_tuple_hook __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_wlan_ll_tuple_hook);
|
||||
|
||||
/* RX/TX Fixup */
|
||||
|
||||
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
|
||||
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
|
||||
|
||||
u64_stats_update_begin(&pcpu_ptr->syncp);
|
||||
pcpu_ptr->stats.rx_pkts++;
|
||||
pcpu_ptr->stats.rx_bytes += skb_len;
|
||||
u64_stats_update_end(&pcpu_ptr->syncp);
|
||||
}
|
||||
|
||||
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
|
||||
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
|
||||
|
||||
u64_stats_update_begin(&pcpu_ptr->syncp);
|
||||
pcpu_ptr->stats.tx_pkts++;
|
||||
pcpu_ptr->stats.tx_bytes += skb_len;
|
||||
u64_stats_update_end(&pcpu_ptr->syncp);
|
||||
}
|
||||
|
||||
/* Network Device Operations */
|
||||
|
||||
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
int ip_type;
|
||||
u32 mark;
|
||||
unsigned int len;
|
||||
rmnet_perf_tether_egress_hook_t rmnet_perf_tether_egress;
|
||||
rmnet_aps_post_queue_t aps_post_queue;
|
||||
rmnet_wlan_ll_tuple_hook_t rmnet_wlan_ll_tuple;
|
||||
bool low_latency = false;
|
||||
bool need_to_drop = false;
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
aps_post_queue = rcu_dereference(rmnet_aps_post_queue);
|
||||
if (aps_post_queue)
|
||||
if (unlikely(aps_post_queue(dev, skb))) {
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (priv->real_dev) {
|
||||
ip_type = (ip_hdr(skb)->version == 4) ?
|
||||
AF_INET : AF_INET6;
|
||||
mark = skb->mark;
|
||||
len = skb->len;
|
||||
trace_rmnet_xmit_skb(skb);
|
||||
rmnet_perf_tether_egress = rcu_dereference(rmnet_perf_tether_egress_hook);
|
||||
if (rmnet_perf_tether_egress) {
|
||||
rmnet_perf_tether_egress(skb);
|
||||
}
|
||||
|
||||
rmnet_wlan_ll_tuple = rcu_dereference(rmnet_wlan_ll_tuple_hook);
|
||||
if (rmnet_wlan_ll_tuple) {
|
||||
rmnet_wlan_ll_tuple(skb);
|
||||
}
|
||||
|
||||
qmi_rmnet_get_flow_state(dev, skb, &need_to_drop, &low_latency);
|
||||
if (unlikely(need_to_drop)) {
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (RMNET_APS_LLC(skb->priority))
|
||||
low_latency = true;
|
||||
|
||||
if ((low_latency || RMNET_APS_LLB(skb->priority)) &&
|
||||
skb_is_gso(skb)) {
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs, *tmp;
|
||||
|
||||
features = dev->features & ~NETIF_F_GSO_MASK;
|
||||
segs = skb_gso_segment(skb, features);
|
||||
if (IS_ERR_OR_NULL(segs)) {
|
||||
this_cpu_add(priv->pcpu_stats->stats.tx_drops,
|
||||
skb_shinfo(skb)->gso_segs);
|
||||
priv->stats.ll_tso_errs++;
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
for (skb = segs; skb; skb = tmp) {
|
||||
tmp = skb->next;
|
||||
skb->dev = dev;
|
||||
priv->stats.ll_tso_segs++;
|
||||
skb_mark_not_on_list(skb);
|
||||
rmnet_egress_handler(skb, low_latency);
|
||||
}
|
||||
} else if (!low_latency && skb_is_gso(skb)) {
|
||||
u64 gso_limit = priv->real_dev->gso_max_size ? : 1;
|
||||
u16 gso_goal = 0;
|
||||
netdev_features_t features = NETIF_F_SG;
|
||||
u16 orig_gso_size = skb_shinfo(skb)->gso_size;
|
||||
unsigned int orig_gso_type = skb_shinfo(skb)->gso_type;
|
||||
struct sk_buff *segs, *tmp;
|
||||
|
||||
features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||
|
||||
if (skb->len < gso_limit || gso_limit > 65535) {
|
||||
priv->stats.tso_segment_skip++;
|
||||
rmnet_egress_handler(skb, low_latency);
|
||||
} else {
|
||||
do_div(gso_limit, skb_shinfo(skb)->gso_size);
|
||||
gso_goal = gso_limit * skb_shinfo(skb)->gso_size;
|
||||
skb_shinfo(skb)->gso_size = gso_goal;
|
||||
|
||||
segs = __skb_gso_segment(skb, features, false);
|
||||
if (IS_ERR_OR_NULL(segs)) {
|
||||
skb_shinfo(skb)->gso_size = orig_gso_size;
|
||||
skb_shinfo(skb)->gso_type = orig_gso_type;
|
||||
|
||||
priv->stats.tso_segment_fail++;
|
||||
rmnet_egress_handler(skb, low_latency);
|
||||
} else {
|
||||
consume_skb(skb);
|
||||
|
||||
for (skb = segs; skb; skb = tmp) {
|
||||
tmp = skb->next;
|
||||
skb->dev = dev;
|
||||
|
||||
skb_shinfo(skb)->gso_size = orig_gso_size;
|
||||
skb_shinfo(skb)->gso_type = orig_gso_type;
|
||||
|
||||
priv->stats.tso_segment_success++;
|
||||
skb_mark_not_on_list(skb);
|
||||
rmnet_egress_handler(skb, low_latency);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rmnet_egress_handler(skb, low_latency);
|
||||
}
|
||||
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
|
||||
qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev), NULL, NULL);
|
||||
} else {
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
|
||||
{
|
||||
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
rmnet_dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rmnet_vnd_get_iflink(const struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
|
||||
return priv->real_dev->ifindex;
|
||||
}
|
||||
|
||||
static int rmnet_vnd_init(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
|
||||
if (!priv->pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
err = gro_cells_init(&priv->gro_cells, dev);
|
||||
if (err) {
|
||||
free_percpu(priv->pcpu_stats);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmnet_vnd_uninit(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
void *qos;
|
||||
|
||||
gro_cells_destroy(&priv->gro_cells);
|
||||
free_percpu(priv->pcpu_stats);
|
||||
|
||||
qos = rcu_dereference(priv->qos_info);
|
||||
RCU_INIT_POINTER(priv->qos_info, NULL);
|
||||
qmi_rmnet_qos_exit_pre(qos);
|
||||
}
|
||||
|
||||
static void rmnet_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *s)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_vnd_stats total_stats;
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
unsigned int cpu, start;
|
||||
|
||||
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
|
||||
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
|
||||
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
|
||||
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
|
||||
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
|
||||
|
||||
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
|
||||
}
|
||||
|
||||
s->rx_packets = total_stats.rx_pkts;
|
||||
s->rx_bytes = total_stats.rx_bytes;
|
||||
s->tx_packets = total_stats.tx_pkts;
|
||||
s->tx_bytes = total_stats.tx_bytes;
|
||||
s->tx_dropped = total_stats.tx_drops;
|
||||
}
|
||||
|
||||
static u16 rmnet_vnd_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
u64 boost_period = 0;
|
||||
int boost_trigger = 0;
|
||||
int txq = 0;
|
||||
rmnet_perf_egress_hook1_t rmnet_perf_egress1;
|
||||
rmnet_aps_pre_queue_t aps_pre_queue;
|
||||
|
||||
rmnet_perf_egress1 = rcu_dereference(rmnet_perf_egress_hook1);
|
||||
if (rmnet_perf_egress1) {
|
||||
rmnet_perf_egress1(skb);
|
||||
}
|
||||
|
||||
if (trace_print_icmp_tx_enabled()) {
|
||||
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
|
||||
u16 ip_proto = 0;
|
||||
__be16 sequence = 0;
|
||||
u8 type = 0;
|
||||
|
||||
memset(saddr, 0, INET6_ADDRSTRLEN);
|
||||
memset(daddr, 0, INET6_ADDRSTRLEN);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
|
||||
goto skip_trace_print_icmp_tx;
|
||||
|
||||
if (icmp_hdr(skb)->type != ICMP_ECHOREPLY &&
|
||||
icmp_hdr(skb)->type != ICMP_ECHO)
|
||||
goto skip_trace_print_icmp_tx;
|
||||
|
||||
ip_proto = htons(ETH_P_IP);
|
||||
type = icmp_hdr(skb)->type;
|
||||
sequence = icmp_hdr(skb)->un.echo.sequence;
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
if (ipv6_hdr(skb)->nexthdr != NEXTHDR_ICMP)
|
||||
goto skip_trace_print_icmp_tx;
|
||||
|
||||
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_ECHO_REQUEST &&
|
||||
icmp6_hdr(skb)->icmp6_type != ICMPV6_ECHO_REPLY)
|
||||
goto skip_trace_print_icmp_tx;
|
||||
|
||||
ip_proto = htons(ETH_P_IPV6);
|
||||
type = icmp6_hdr(skb)->icmp6_type;
|
||||
sequence = icmp6_hdr(skb)->icmp6_sequence;
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
if (!ip_proto)
|
||||
goto skip_trace_print_icmp_tx;
|
||||
|
||||
trace_print_icmp_tx(skb, ip_proto, type, sequence, saddr, daddr);
|
||||
}
|
||||
|
||||
skip_trace_print_icmp_tx:
|
||||
if (trace_print_tcp_tx_enabled()) {
|
||||
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
|
||||
|
||||
memset(saddr, 0, INET6_ADDRSTRLEN);
|
||||
memset(daddr, 0, INET6_ADDRSTRLEN);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (ip_hdr(skb)->protocol != IPPROTO_TCP)
|
||||
goto skip_trace_print_tcp_tx;
|
||||
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)
|
||||
goto skip_trace_print_tcp_tx;
|
||||
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
trace_print_tcp_tx(skb, saddr, daddr, tcp_hdr(skb));
|
||||
}
|
||||
|
||||
skip_trace_print_tcp_tx:
|
||||
if (trace_print_udp_tx_enabled()) {
|
||||
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
|
||||
u16 ip_id = 0;
|
||||
|
||||
memset(saddr, 0, INET6_ADDRSTRLEN);
|
||||
memset(daddr, 0, INET6_ADDRSTRLEN);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (ip_hdr(skb)->protocol != IPPROTO_UDP)
|
||||
goto skip_trace_print_udp_tx;
|
||||
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
|
||||
ip_id = ntohs(ip_hdr(skb)->id);
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
|
||||
goto skip_trace_print_udp_tx;
|
||||
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
trace_print_udp_tx(skb, saddr, daddr, udp_hdr(skb), ip_id);
|
||||
}
|
||||
|
||||
skip_trace_print_udp_tx:
|
||||
if (trace_print_skb_gso_enabled()) {
|
||||
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
|
||||
u16 ip_proto = 0, xport_proto = 0;
|
||||
|
||||
if (!skb_shinfo(skb)->gso_size)
|
||||
goto skip_trace;
|
||||
|
||||
memset(saddr, 0, INET6_ADDRSTRLEN);
|
||||
memset(daddr, 0, INET6_ADDRSTRLEN);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
xport_proto = IPPROTO_TCP;
|
||||
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
|
||||
xport_proto = IPPROTO_UDP;
|
||||
else
|
||||
goto skip_trace;
|
||||
|
||||
ip_proto = htons(ETH_P_IP);
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
|
||||
xport_proto = IPPROTO_TCP;
|
||||
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
|
||||
xport_proto = IPPROTO_UDP;
|
||||
else
|
||||
goto skip_trace;
|
||||
|
||||
ip_proto = htons(ETH_P_IPV6);
|
||||
snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
|
||||
}
|
||||
|
||||
trace_print_skb_gso(skb,
|
||||
xport_proto == IPPROTO_TCP ? tcp_hdr(skb)->source :
|
||||
udp_hdr(skb)->source,
|
||||
xport_proto == IPPROTO_TCP ? tcp_hdr(skb)->dest :
|
||||
udp_hdr(skb)->dest,
|
||||
ip_proto, xport_proto, saddr, daddr);
|
||||
}
|
||||
|
||||
skip_trace:
|
||||
if (priv->real_dev)
|
||||
txq = qmi_rmnet_get_queue(dev, skb);
|
||||
|
||||
if (rmnet_core_userspace_connected) {
|
||||
rmnet_update_pid_and_check_boost(task_pid_nr(current),
|
||||
skb->len,
|
||||
&boost_trigger,
|
||||
&boost_period);
|
||||
|
||||
if (boost_trigger)
|
||||
(void) boost_period;
|
||||
}
|
||||
|
||||
aps_pre_queue = rcu_dereference(rmnet_aps_pre_queue);
|
||||
if (aps_pre_queue)
|
||||
aps_pre_queue(dev, skb);
|
||||
|
||||
return (txq < dev->real_num_tx_queues) ? txq : 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops rmnet_vnd_ops = {
|
||||
.ndo_start_xmit = rmnet_vnd_start_xmit,
|
||||
.ndo_change_mtu = rmnet_vnd_change_mtu,
|
||||
.ndo_get_iflink = rmnet_vnd_get_iflink,
|
||||
.ndo_add_slave = rmnet_add_bridge,
|
||||
.ndo_del_slave = rmnet_del_bridge,
|
||||
.ndo_init = rmnet_vnd_init,
|
||||
.ndo_uninit = rmnet_vnd_uninit,
|
||||
.ndo_get_stats64 = rmnet_get_stats64,
|
||||
.ndo_select_queue = rmnet_vnd_select_queue,
|
||||
};
|
||||
|
||||
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"Checksum ok",
|
||||
"Checksum valid bit not set",
|
||||
"Checksum validation failed",
|
||||
"Checksum error bad buffer",
|
||||
"Checksum error bad ip version",
|
||||
"Checksum error bad transport",
|
||||
"Checksum skipped on ip fragment",
|
||||
"Checksum skipped",
|
||||
"Checksum computed in software",
|
||||
"Checksum computed in hardware",
|
||||
"Coalescing packets received",
|
||||
"Coalesced packets",
|
||||
"Coalescing header NLO errors",
|
||||
"Coalescing header pcount errors",
|
||||
"Coalescing checksum errors",
|
||||
"Coalescing packet reconstructs",
|
||||
"Coalescing IP version invalid",
|
||||
"Coalescing L4 header invalid",
|
||||
"Coalescing close Non-coalescable",
|
||||
"Coalescing close L3 mismatch",
|
||||
"Coalescing close L4 mismatch",
|
||||
"Coalescing close HW NLO limit",
|
||||
"Coalescing close HW packet limit",
|
||||
"Coalescing close HW byte limit",
|
||||
"Coalescing close HW time limit",
|
||||
"Coalescing close HW eviction",
|
||||
"Coalescing close Coalescable",
|
||||
"Coalescing packets over VEID0",
|
||||
"Coalescing packets over VEID1",
|
||||
"Coalescing packets over VEID2",
|
||||
"Coalescing packets over VEID3",
|
||||
"Coalescing packets over VEID4",
|
||||
"Coalescing packets over VEID5",
|
||||
"Coalescing packets over VEID6",
|
||||
"Coalescing packets over VEID7",
|
||||
"Coalescing packets over VEID8",
|
||||
"Coalescing packets over VEID9",
|
||||
"Coalescing packets over VEID10",
|
||||
"Coalescing packets over VEID11",
|
||||
"Coalescing packets over VEID12",
|
||||
"Coalescing packets over VEID13",
|
||||
"Coalescing packets over VEID14",
|
||||
"Coalescing packets over VEID15",
|
||||
"Coalescing TCP frames",
|
||||
"Coalescing TCP bytes",
|
||||
"Coalescing UDP frames",
|
||||
"Coalescing UDP bytes",
|
||||
"Uplink priority packets",
|
||||
"TSO packets",
|
||||
"TSO packets arriving incorrectly",
|
||||
"TSO segment success",
|
||||
"TSO segment fail",
|
||||
"TSO segment skip",
|
||||
"LL TSO segment success",
|
||||
"LL TSO segment fail",
|
||||
"APS priority packets",
|
||||
};
|
||||
|
||||
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"MAP Cmd last version",
|
||||
"MAP Cmd last ep id",
|
||||
"MAP Cmd last transaction id",
|
||||
"DL header last seen sequence",
|
||||
"DL header last seen bytes",
|
||||
"DL header last seen packets",
|
||||
"DL header last seen flows",
|
||||
"DL header pkts received",
|
||||
"DL header total bytes received",
|
||||
"DL header total pkts received",
|
||||
"DL trailer last seen sequence",
|
||||
"DL trailer pkts received",
|
||||
"UL agg reuse",
|
||||
"UL agg alloc",
|
||||
"DL chaining [0-10)",
|
||||
"DL chaining [10-20)",
|
||||
"DL chaining [20-30)",
|
||||
"DL chaining [30-40)",
|
||||
"DL chaining [40-50)",
|
||||
"DL chaining [50-60)",
|
||||
"DL chaining >= 60",
|
||||
"DL chaining frags [0-1]",
|
||||
"DL chaining frags [2-3]",
|
||||
"DL chaining frags [4-7]",
|
||||
"DL chaining frags [8-11]",
|
||||
"DL chaining frags [12-15]",
|
||||
"DL chaining frags = 16",
|
||||
"PB Byte Marker Count",
|
||||
};
|
||||
|
||||
static const char rmnet_ll_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"LL TX queues",
|
||||
"LL TX queue errors",
|
||||
"LL TX completions",
|
||||
"LL TX completion errors",
|
||||
"LL RX queues",
|
||||
"LL RX queue errors",
|
||||
"LL RX status errors",
|
||||
"LL RX empty transfers",
|
||||
"LL RX OOM errors",
|
||||
"LL RX packets",
|
||||
"LL RX temp buffer allocations",
|
||||
"LL TX disabled",
|
||||
"LL TX enabled",
|
||||
"LL TX FC queued",
|
||||
"LL TX FC sent",
|
||||
"LL TX FC err",
|
||||
};
|
||||
|
||||
static const char rmnet_qmap_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"QMAP RX success",
|
||||
"QMAP RX errors",
|
||||
"QMAP TX queued",
|
||||
"QMAP TX errors",
|
||||
"QMAP TX complete (MHI)",
|
||||
};
|
||||
|
||||
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
|
||||
{
|
||||
size_t off = 0;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(buf, &rmnet_gstrings_stats,
|
||||
sizeof(rmnet_gstrings_stats));
|
||||
off += sizeof(rmnet_gstrings_stats);
|
||||
memcpy(buf + off,
|
||||
&rmnet_port_gstrings_stats,
|
||||
sizeof(rmnet_port_gstrings_stats));
|
||||
off += sizeof(rmnet_port_gstrings_stats);
|
||||
memcpy(buf + off, &rmnet_ll_gstrings_stats,
|
||||
sizeof(rmnet_ll_gstrings_stats));
|
||||
off += sizeof(rmnet_ll_gstrings_stats);
|
||||
memcpy(buf + off, &rmnet_qmap_gstrings_stats,
|
||||
sizeof(rmnet_qmap_gstrings_stats));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int rmnet_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return ARRAY_SIZE(rmnet_gstrings_stats) +
|
||||
ARRAY_SIZE(rmnet_port_gstrings_stats) +
|
||||
ARRAY_SIZE(rmnet_ll_gstrings_stats) +
|
||||
ARRAY_SIZE(rmnet_qmap_gstrings_stats);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_priv_stats *st = &priv->stats;
|
||||
struct rmnet_port_priv_stats *stp;
|
||||
struct rmnet_ll_stats *llp;
|
||||
struct rmnet_port *port;
|
||||
size_t off = 0;
|
||||
u64 qmap_s[ARRAY_SIZE(rmnet_qmap_gstrings_stats)];
|
||||
|
||||
port = rmnet_get_port(priv->real_dev);
|
||||
|
||||
if (!data || !port)
|
||||
return;
|
||||
|
||||
stp = &port->stats;
|
||||
llp = rmnet_ll_get_stats();
|
||||
|
||||
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
|
||||
off += ARRAY_SIZE(rmnet_gstrings_stats);
|
||||
memcpy(data + off, stp,
|
||||
ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
|
||||
off += ARRAY_SIZE(rmnet_port_gstrings_stats);
|
||||
memcpy(data + off, llp,
|
||||
ARRAY_SIZE(rmnet_ll_gstrings_stats) * sizeof(u64));
|
||||
|
||||
off += ARRAY_SIZE(rmnet_ll_gstrings_stats);
|
||||
memset(qmap_s, 0, sizeof(qmap_s));
|
||||
rmnet_ctl_get_stats(qmap_s, ARRAY_SIZE(rmnet_qmap_gstrings_stats));
|
||||
memcpy(data + off, qmap_s,
|
||||
ARRAY_SIZE(rmnet_qmap_gstrings_stats) * sizeof(u64));
|
||||
}
|
||||
|
||||
static int rmnet_stats_reset(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_port_priv_stats *stp;
|
||||
struct rmnet_priv_stats *st;
|
||||
struct rmnet_port *port;
|
||||
|
||||
port = rmnet_get_port(priv->real_dev);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
stp = &port->stats;
|
||||
|
||||
memset(stp, 0, sizeof(*stp));
|
||||
|
||||
st = &priv->stats;
|
||||
|
||||
memset(st, 0, sizeof(*st));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops rmnet_ethtool_ops = {
|
||||
.get_ethtool_stats = rmnet_get_ethtool_stats,
|
||||
.get_strings = rmnet_get_strings,
|
||||
.get_sset_count = rmnet_get_sset_count,
|
||||
.nway_reset = rmnet_stats_reset,
|
||||
};
|
||||
|
||||
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
|
||||
* flags, ARP type, needed headroom, etc...
|
||||
*/
|
||||
void rmnet_vnd_setup(struct net_device *rmnet_dev)
|
||||
{
|
||||
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
|
||||
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
|
||||
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
|
||||
eth_random_addr(rmnet_dev->perm_addr);
|
||||
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
|
||||
|
||||
/* Raw IP mode */
|
||||
rmnet_dev->header_ops = NULL; /* No header */
|
||||
rmnet_dev->type = ARPHRD_RAWIP;
|
||||
rmnet_dev->hard_header_len = 0;
|
||||
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||
|
||||
rmnet_dev->needs_free_netdev = true;
|
||||
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
|
||||
}
|
||||
|
||||
/* Exposed API */
|
||||
|
||||
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
||||
struct rmnet_port *port,
|
||||
struct net_device *real_dev,
|
||||
struct rmnet_endpoint *ep)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
|
||||
int rc;
|
||||
|
||||
if (ep->egress_dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (rmnet_get_endpoint(port, id))
|
||||
return -EBUSY;
|
||||
|
||||
rmnet_dev->hw_features = NETIF_F_RXCSUM;
|
||||
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||
rmnet_dev->hw_features |= NETIF_F_SG;
|
||||
rmnet_dev->hw_features |= NETIF_F_GRO_HW;
|
||||
rmnet_dev->hw_features |= NETIF_F_GSO_UDP_L4;
|
||||
rmnet_dev->hw_features |= NETIF_F_ALL_TSO;
|
||||
|
||||
priv->real_dev = real_dev;
|
||||
|
||||
rmnet_dev->gso_max_size = 65535;
|
||||
|
||||
rc = register_netdevice(rmnet_dev);
|
||||
if (!rc) {
|
||||
ep->egress_dev = rmnet_dev;
|
||||
ep->mux_id = id;
|
||||
port->nr_rmnet_devs++;
|
||||
|
||||
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
|
||||
|
||||
priv->mux_id = id;
|
||||
rcu_assign_pointer(priv->qos_info,
|
||||
qmi_rmnet_qos_init(real_dev, rmnet_dev, id));
|
||||
|
||||
netdev_dbg(rmnet_dev, "rmnet dev created\n");
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
|
||||
struct rmnet_endpoint *ep)
|
||||
{
|
||||
if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
|
||||
return -EINVAL;
|
||||
|
||||
ep->egress_dev = NULL;
|
||||
port->nr_rmnet_devs--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
|
||||
priv = netdev_priv(rmnet_dev);
|
||||
return priv->mux_id;
|
||||
}
|
||||
|
||||
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
|
||||
{
|
||||
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
|
||||
/* Although we expect similar number of enable/disable
|
||||
* commands, optimize for the disable. That is more
|
||||
* latency sensitive than enable
|
||||
*/
|
||||
if (unlikely(enable))
|
||||
netif_wake_queue(rmnet_dev);
|
||||
else
|
||||
netif_stop_queue(rmnet_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rmnet_vnd_reset_mac_addr(struct net_device *dev)
|
||||
{
|
||||
if (dev->netdev_ops != &rmnet_vnd_ops)
|
||||
return;
|
||||
|
||||
eth_random_addr(dev->perm_addr);
|
||||
}
|
31
qcom/opensource/datarmnet/core/rmnet_vnd.h
Normal file
31
qcom/opensource/datarmnet/core/rmnet_vnd.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* Copyright (c) 2013-2018, 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data Virtual Network Device APIs
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_VND_H_
|
||||
#define _RMNET_VND_H_
|
||||
|
||||
int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
|
||||
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
||||
struct rmnet_port *port,
|
||||
struct net_device *real_dev,
|
||||
struct rmnet_endpoint *ep);
|
||||
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
|
||||
struct rmnet_endpoint *ep);
|
||||
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
|
||||
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
|
||||
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
|
||||
void rmnet_vnd_setup(struct net_device *dev);
|
||||
void rmnet_vnd_reset_mac_addr(struct net_device *dev);
|
||||
#endif /* _RMNET_VND_H_ */
|
92
qcom/opensource/datarmnet/core/wda.h
Normal file
92
qcom/opensource/datarmnet/core/wda.h
Normal file
@ -0,0 +1,92 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2018,2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/version.h>
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM wda
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
||||
#ifndef RMNET_TRACE_INCLUDE_PATH
|
||||
#if defined(CONFIG_RMNET_LA_PLATFORM)
|
||||
#ifdef CONFIG_ARCH_KHAJE
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../../vendor/qcom/opensource/datarmnet/core
|
||||
#else
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
|
||||
#endif /* CONFIG_ARCH_KHAJE */
|
||||
#elif defined(__arch_um__)
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
|
||||
#else
|
||||
#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
|
||||
#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
|
||||
#endif /* RMNET_TRACE_INCLUDE_PATH */
|
||||
#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_FILE wda
|
||||
|
||||
#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_WDA_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(wda_set_powersave_mode,
|
||||
|
||||
TP_PROTO(int enable),
|
||||
|
||||
TP_ARGS(enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("set powersave mode to %s",
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
TRACE_EVENT(wda_client_state_up,
|
||||
|
||||
TP_PROTO(u32 instance, u32 ep_type, u32 iface),
|
||||
|
||||
TP_ARGS(instance, ep_type, iface),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, instance)
|
||||
__field(u32, ep_type)
|
||||
__field(u32, iface)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->instance = instance;
|
||||
__entry->ep_type = ep_type;
|
||||
__entry->iface = iface;
|
||||
),
|
||||
|
||||
TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u",
|
||||
__entry->instance, __entry->ep_type, __entry->iface)
|
||||
);
|
||||
|
||||
TRACE_EVENT(wda_client_state_down,
|
||||
|
||||
TP_PROTO(int from_cb),
|
||||
|
||||
TP_ARGS(from_cb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, from_cb)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->from_cb = from_cb;
|
||||
),
|
||||
|
||||
TP_printk("Client: Connection with WDA lost Exit by callback %d",
|
||||
__entry->from_cb)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_WDA_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
582
qcom/opensource/datarmnet/core/wda_qmi.c
Normal file
582
qcom/opensource/datarmnet/core/wda_qmi.c
Normal file
@ -0,0 +1,582 @@
|
||||
/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
#include "rmnet_qmi.h"
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "wda.h"
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include "qmi_rmnet_i.h"
|
||||
|
||||
struct wda_qmi_data {
|
||||
void *rmnet_port;
|
||||
struct workqueue_struct *wda_wq;
|
||||
struct work_struct svc_arrive;
|
||||
struct qmi_handle handle;
|
||||
struct sockaddr_qrtr ssctl;
|
||||
struct svc_info svc;
|
||||
int restart_state;
|
||||
};
|
||||
|
||||
static void wda_svc_config(struct work_struct *work);
|
||||
/* **************************************************** */
|
||||
#define WDA_SERVICE_ID_V01 0x1A
|
||||
#define WDA_SERVICE_VERS_V01 0x01
|
||||
#define WDA_TIMEOUT_JF msecs_to_jiffies(1000)
|
||||
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14
|
||||
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 48
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
|
||||
|
||||
enum wda_powersave_config_mask_enum_v01 {
|
||||
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
|
||||
WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00,
|
||||
WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01,
|
||||
WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02,
|
||||
WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF,
|
||||
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647
|
||||
};
|
||||
|
||||
struct wda_set_powersave_config_req_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct data_ep_id_type_v01 ep_id;
|
||||
/* Optional */
|
||||
uint8_t req_data_cfg_valid;
|
||||
enum wda_powersave_config_mask_enum_v01 req_data_cfg;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_config_resp_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct qmi_response_type_v01 resp;
|
||||
/* Optional */
|
||||
uint8_t data_cfg_valid;
|
||||
enum wda_powersave_config_mask_enum_v01 data_cfg;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_mode_req_msg_v01 {
|
||||
/* Mandatory */
|
||||
uint8_t powersave_control_flag;
|
||||
/* Optional */
|
||||
uint8_t allow_dfc_notify_valid;
|
||||
uint8_t allow_dfc_notify;
|
||||
uint8_t allow_bearer_id_list_valid;
|
||||
uint8_t allow_bearer_id_list_len;
|
||||
uint8_t allow_bearer_id_list[PS_MAX_BEARERS];
|
||||
uint8_t auto_shut_allow_bearer_valid;
|
||||
uint8_t auto_shut_allow_bearer;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_mode_resp_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct data_ep_id_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
ep_id),
|
||||
.ei_array = data_ep_id_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
req_data_cfg_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum
|
||||
wda_powersave_config_mask_enum_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
req_data_cfg),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
data_cfg_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum
|
||||
wda_powersave_config_mask_enum_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
data_cfg),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
powersave_control_flag),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_dfc_notify_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_dfc_notify),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list_len),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = PS_MAX_BEARERS,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = VAR_LEN_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
allow_bearer_id_list),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
auto_shut_allow_bearer_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
auto_shut_allow_bearer),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_resp_msg_v01,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable,
|
||||
u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
|
||||
struct wda_set_powersave_mode_resp_msg_v01 *resp;
|
||||
struct wda_set_powersave_mode_req_msg_v01 *req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
if (!data || !data->rmnet_port)
|
||||
return -EINVAL;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
|
||||
if (!resp) {
|
||||
kfree(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = qmi_txn_init(&data->handle, &txn,
|
||||
wda_set_powersave_mode_resp_msg_v01_ei, resp);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Failed init for response, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
req->powersave_control_flag = enable;
|
||||
|
||||
if (enable && num_bearers && bearer_id &&
|
||||
num_bearers <= PS_MAX_BEARERS) {
|
||||
req->allow_dfc_notify_valid = 1;
|
||||
req->allow_dfc_notify = 1;
|
||||
|
||||
req->allow_bearer_id_list_valid = 1;
|
||||
req->allow_bearer_id_list_len = num_bearers;
|
||||
memcpy(req->allow_bearer_id_list, bearer_id, num_bearers);
|
||||
|
||||
req->auto_shut_allow_bearer_valid = 1;
|
||||
req->auto_shut_allow_bearer = 1;
|
||||
}
|
||||
|
||||
ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
|
||||
wda_set_powersave_mode_req_msg_v01_ei, req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
pr_err("%s() Failed sending request, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Response waiting failed, err: %d\n",
|
||||
__func__, ret);
|
||||
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("%s() Request rejected, result: %d, err: %d\n",
|
||||
__func__, resp->resp.result, resp->resp.error);
|
||||
ret = -resp->resp.result;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(resp);
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
|
||||
int dl_marker)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(wda_handle,
|
||||
struct wda_qmi_data, handle);
|
||||
struct wda_set_powersave_config_resp_msg_v01 *resp;
|
||||
struct wda_set_powersave_config_req_msg_v01 *req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
|
||||
if (!resp) {
|
||||
kfree(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = qmi_txn_init(wda_handle, &txn,
|
||||
wda_set_powersave_config_resp_msg_v01_ei, resp);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Failed init for response, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
req->ep_id.ep_type = data->svc.ep_type;
|
||||
req->ep_id.iface_id = data->svc.iface_id;
|
||||
req->req_data_cfg_valid = 1;
|
||||
req->req_data_cfg = dl_marker ? WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 :
|
||||
WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01;
|
||||
ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
|
||||
wda_set_powersave_config_req_msg_v01_ei, req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
pr_err("%s() Failed sending request, err: %d\n", __func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Response waiting failed, err: %d\n",
|
||||
__func__, ret);
|
||||
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("%s() Request rejected, result: %d, error: %d\n",
|
||||
__func__, resp->resp.result, resp->resp.error);
|
||||
ret = -resp->resp.result;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(resp);
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wda_svc_config(struct work_struct *work)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
|
||||
svc_arrive);
|
||||
struct qmi_info *qmi;
|
||||
int rc, dl_marker = 0;
|
||||
|
||||
while (!rtnl_trylock()) {
|
||||
if (!data->restart_state)
|
||||
cond_resched();
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
dl_marker = rmnet_get_dlmarker_info(data->rmnet_port);
|
||||
rtnl_unlock();
|
||||
|
||||
if (data->restart_state == 1)
|
||||
return;
|
||||
|
||||
rc = wda_set_powersave_config_req(&data->handle, dl_marker);
|
||||
if (rc < 0) {
|
||||
pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (data->restart_state == 1)
|
||||
return;
|
||||
while (!rtnl_trylock()) {
|
||||
if (!data->restart_state)
|
||||
cond_resched();
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
|
||||
if (!qmi) {
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
qmi->wda_pending = NULL;
|
||||
qmi->wda_client = (void *)data;
|
||||
trace_wda_client_state_up(data->svc.instance,
|
||||
data->svc.ep_type,
|
||||
data->svc.iface_id);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
pr_info("Connection established with the WDA Service, DL Marker %s\n",
|
||||
dl_marker ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
|
||||
handle);
|
||||
|
||||
data->ssctl.sq_family = AF_QIPCRTR;
|
||||
data->ssctl.sq_node = svc->node;
|
||||
data->ssctl.sq_port = svc->port;
|
||||
|
||||
queue_work(data->wda_wq, &data->svc_arrive);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
|
||||
handle);
|
||||
|
||||
if (!data)
|
||||
pr_info("%s() data is null\n", __func__);
|
||||
}
|
||||
|
||||
static struct qmi_ops server_ops = {
|
||||
.new_server = wda_svc_arrive,
|
||||
.del_server = wda_svc_exit,
|
||||
};
|
||||
|
||||
int
|
||||
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
|
||||
{
|
||||
struct wda_qmi_data *data;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
if (!port || !qmi)
|
||||
return -EINVAL;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->wda_wq = create_singlethread_workqueue("wda_wq");
|
||||
if (!data->wda_wq) {
|
||||
pr_err("%s Could not create workqueue\n", __func__);
|
||||
goto err0;
|
||||
}
|
||||
|
||||
data->rmnet_port = port;
|
||||
data->restart_state = 0;
|
||||
memcpy(&data->svc, psvc, sizeof(data->svc));
|
||||
INIT_WORK(&data->svc_arrive, wda_svc_config);
|
||||
|
||||
rc = qmi_handle_init(&data->handle,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN,
|
||||
&server_ops, NULL);
|
||||
if (rc < 0) {
|
||||
pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
|
||||
WDA_SERVICE_VERS_V01, psvc->instance);
|
||||
if (rc < 0) {
|
||||
pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
qmi->wda_pending = (void *)data;
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
qmi_handle_release(&data->handle);
|
||||
err1:
|
||||
destroy_workqueue(data->wda_wq);
|
||||
err0:
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void wda_qmi_client_exit(void *wda_data)
|
||||
{
|
||||
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
|
||||
|
||||
if (!data) {
|
||||
pr_info("%s() data is null\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
data->restart_state = 1;
|
||||
trace_wda_client_state_down(0);
|
||||
destroy_workqueue(data->wda_wq);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
int wda_set_powersave_mode(void *wda_data, uint8_t enable, u8 num_bearers,
|
||||
u8 *bearer_id)
|
||||
{
|
||||
trace_wda_set_powersave_mode(enable);
|
||||
return wda_set_powersave_mode_req(wda_data, enable, num_bearers,
|
||||
bearer_id);
|
||||
}
|
||||
|
||||
void wda_qmi_client_release(void *wda_data)
|
||||
{
|
||||
if (!wda_data)
|
||||
return;
|
||||
qmi_handle_release(&((struct wda_qmi_data *)wda_data)->handle);
|
||||
}
|
25
qcom/opensource/datarmnet/datarmnet_dlkm_vendor_board.mk
Normal file
25
qcom/opensource/datarmnet/datarmnet_dlkm_vendor_board.mk
Normal file
@ -0,0 +1,25 @@
|
||||
TARGET_DATARMNET_ENABLE := false
|
||||
|
||||
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
|
||||
ifeq ($(TARGET_KERNEL_DLKM_DATARMNET_OVERRIDE), true)
|
||||
TARGET_DATARMNET_ENABLE := true
|
||||
endif
|
||||
else
|
||||
TARGET_DATARMNET_ENABLE := true
|
||||
endif
|
||||
|
||||
ifeq ($(TARGET_DATARMNET_ENABLE), true)
|
||||
#Build rmnet core
|
||||
DATA_DLKM_BOARD_PLATFORMS_LIST := pineapple
|
||||
DATA_DLKM_BOARD_PLATFORMS_LIST += blair
|
||||
DATA_DLKM_BOARD_PLATFORMS_LIST += monaco
|
||||
DATA_DLKM_BOARD_PLATFORMS_LIST += pitti
|
||||
DATA_DLKM_BOARD_PLATFORMS_LIST += volcano
|
||||
|
||||
ifneq ($(TARGET_BOARD_AUTO),true)
|
||||
ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
|
||||
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_core.ko
|
||||
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_ctl.ko
|
||||
endif
|
||||
endif
|
||||
endif
|
@ -0,0 +1,2 @@
|
||||
PRODUCT_PACKAGES += rmnet_core.ko
|
||||
PRODUCT_PACKAGES += rmnet_ctl.ko
|
103
qcom/opensource/datarmnet/define_modules.bzl
Normal file
103
qcom/opensource/datarmnet/define_modules.bzl
Normal file
@ -0,0 +1,103 @@
|
||||
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
|
||||
load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
|
||||
|
||||
def define_modules(target, variant):
|
||||
kernel_build_variant = "{}_{}".format(target, variant)
|
||||
include_base = "../../../{}".format(native.package_name())
|
||||
|
||||
#The below will take care of the defconfig
|
||||
#include_defconfig = ":{}_defconfig".format(variant)
|
||||
|
||||
ddk_module(
|
||||
name = "{}_rmnet_ctl".format(kernel_build_variant),
|
||||
out = "rmnet_ctl.ko",
|
||||
srcs = [
|
||||
"core/rmnet_ctl_ipa.c",
|
||||
"core/rmnet_ctl.h",
|
||||
"core/rmnet_ctl_client.h",
|
||||
],
|
||||
kconfig = "core/Kconfig",
|
||||
conditional_srcs = {
|
||||
"CONFIG_ARCH_PINEAPPLE": {
|
||||
True: [
|
||||
"core/rmnet_ctl_client.c",
|
||||
],
|
||||
},
|
||||
"CONFIG_ARCH_BLAIR": {
|
||||
True: [
|
||||
"core/rmnet_ctl_client.c",
|
||||
],
|
||||
},
|
||||
"CONFIG_ARCH_MONACO": {
|
||||
True: [
|
||||
"core/rmnet_ctl_client.c",
|
||||
],
|
||||
},
|
||||
"CONFIG_ARCH_PITTI": {
|
||||
True: [
|
||||
"core/rmnet_ctl_client.c",
|
||||
],
|
||||
},
|
||||
"CONFIG_ARCH_VOLCANO": {
|
||||
True: [
|
||||
"core/rmnet_ctl_client.c",
|
||||
],
|
||||
},
|
||||
},
|
||||
kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
|
||||
deps = [
|
||||
"//vendor/qcom/opensource/dataipa:{}_ipam".format(kernel_build_variant),
|
||||
"//msm-kernel:all_headers",
|
||||
"//vendor/qcom/opensource/dataipa:include_headers",
|
||||
],
|
||||
)
|
||||
|
||||
ddk_module(
|
||||
name = "{}_rmnet_core".format(kernel_build_variant),
|
||||
out = "rmnet_core.ko",
|
||||
srcs = [
|
||||
"core/rmnet_config.c",
|
||||
"core/rmnet_descriptor.c",
|
||||
"core/rmnet_genl.c",
|
||||
"core/rmnet_handlers.c",
|
||||
"core/rmnet_map_command.c",
|
||||
"core/rmnet_map_data.c",
|
||||
"core/rmnet_module.c",
|
||||
"core/rmnet_vnd.c",
|
||||
"core/dfc_qmap.c",
|
||||
"core/dfc_qmi.c",
|
||||
"core/qmi_rmnet.c",
|
||||
"core/wda_qmi.c",
|
||||
"core/rmnet_ll.c",
|
||||
"core/rmnet_ll_ipa.c",
|
||||
"core/rmnet_qmap.c",
|
||||
"core/rmnet_ll_qmap.c",
|
||||
],
|
||||
local_defines = [
|
||||
"RMNET_TRACE_INCLUDE_PATH={}/core".format(include_base),
|
||||
],
|
||||
kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
|
||||
deps = [
|
||||
":rmnet_core_headers",
|
||||
":{}_rmnet_ctl".format(kernel_build_variant),
|
||||
"//vendor/qcom/opensource/dataipa:{}_ipam".format(kernel_build_variant),
|
||||
"//vendor/qcom/opensource/datarmnet-ext/mem:{}_rmnet_mem".format(kernel_build_variant),
|
||||
"//msm-kernel:all_headers",
|
||||
"//vendor/qcom/opensource/dataipa:include_headers",
|
||||
"//vendor/qcom/opensource/datarmnet-ext/mem:rmnet_mem_headers",
|
||||
],
|
||||
)
|
||||
|
||||
copy_to_dist_dir(
|
||||
name = "{}_modules_dist".format(kernel_build_variant),
|
||||
data = [
|
||||
"{}_rmnet_core".format(kernel_build_variant),
|
||||
"{}_rmnet_ctl".format(kernel_build_variant),
|
||||
],
|
||||
dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
|
||||
flat = True,
|
||||
wipe_dist_dir = False,
|
||||
allow_duplicate_filenames = False,
|
||||
mode_overrides = {"**/*": "644"},
|
||||
log = "info",
|
||||
)
|
Loading…
Reference in New Issue
Block a user