Merge keystone/android12-5.10-keystone-qcom-release.43+ (960f45e) into msm-5.10

* refs/heads/tmp-960f45e:
  ANDROID: minor fixups of xt_IDLETIMER support
  FROMGIT: usb: typec: Add the missed altmode_id_remove() in typec_register_altmode()
  FROMGIT: usb: typec: tcpm: Relax disconnect threshold during power negotiation
  FROMGIT: usb: typec: tcpm: Ignore Vsafe0v in PR_SWAP_SNK_SRC_SOURCE_ON state
  FROMGIT: usb: typec: tcpci: Fix up sink disconnect thresholds for PD
  ANDROID: GKI: Enable some necessary CFG80211 configs for WIFI
  ANDROID: Add send_sig_info to the reserved symbol list
  FROMLIST: kbuild: mkcompile_h: consider timestamp if KBUILD_BUILD_TIMESTAMP is set
  gki_config: Disable CONFIG_DEBUG_STACK_USAGE
  FROMGIT: mm: improve mprotect(R|W) efficiency on pages referenced once
  FROMGIT: mm: slub: move sysfs slab alloc/free interfaces to debugfs

Change-Id: Ie15dfc375a177f88b34b8607a83d56ea93a5edd1
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2021-06-20 22:48:11 -07:00
commit ec9ebbfc5a
14 changed files with 68719 additions and 64739 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1453,6 +1453,7 @@
sdio_writel
sdio_writesb
send_sig
send_sig_info
seq_hex_dump
seq_lseek
seq_open

View File

@ -267,8 +267,8 @@ CONFIG_BT_HCIUART_BCM=y
CONFIG_BT_HCIUART_QCA=y
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
# CONFIG_CFG80211_DEFAULT_PS is not set
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_MAC80211=y
CONFIG_RFKILL=y
CONFIG_PCI=y
@ -665,7 +665,6 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_MISC is not set
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_PINNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KASAN=y
CONFIG_KASAN_HW_TAGS=y

View File

@ -244,6 +244,8 @@ CONFIG_BT_HCIUART_BCM=y
CONFIG_BT_HCIUART_QCA=y
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_REG_CELLULAR_HINTS=y
# CONFIG_CFG80211_DEFAULT_PS is not set
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_MAC80211=y
@ -601,7 +603,6 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_MISC is not set
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_PINNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KFENCE=y
CONFIG_KFENCE_SAMPLE_INTERVAL=500

View File

@ -572,8 +572,10 @@ typec_register_altmode(struct device *parent,
int ret;
alt = kzalloc(sizeof(*alt), GFP_KERNEL);
if (!alt)
if (!alt) {
altmode_id_remove(parent, id);
return ERR_PTR(-ENOMEM);
}
alt->adev.svid = desc->svid;
alt->adev.mode = desc->mode;

View File

@ -22,8 +22,12 @@
#define PD_RETRY_COUNT_DEFAULT 3
#define PD_RETRY_COUNT_3_0_OR_HIGHER 2
#define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
#define AUTO_DISCHARGE_PD_HEADROOM_MV 850
#define AUTO_DISCHARGE_PPS_HEADROOM_MV 1250
#define VSINKPD_MIN_IR_DROP_MV 750
#define VSRC_NEW_MIN_PERCENT 95
#define VSRC_VALID_MIN_MV 500
#define VPPS_NEW_MIN_PERCENT 95
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
@ -364,11 +368,13 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
} else if (mode == TYPEC_PWR_MODE_PD) {
if (pps_active)
threshold = (95 * requested_vbus_voltage_mv / 100) -
AUTO_DISCHARGE_PD_HEADROOM_MV;
threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
VSINKDISCONNECT_PD_MIN_PERCENT / 100;
else
threshold = (95 * requested_vbus_voltage_mv / 100) -
AUTO_DISCHARGE_PPS_HEADROOM_MV;
threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
VSINKDISCONNECT_PD_MIN_PERCENT / 100;
} else {
/* 3.5V for non-pd sink */
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;

View File

@ -2629,6 +2629,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
} else {
next_state = SNK_WAIT_CAPABILITIES;
}
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
tcpm_set_state(port, next_state, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
@ -2642,6 +2647,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->send_discover)
port->vdm_sm_running = true;
/* Threshold was relaxed before sending Request. Restore it back. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
tcpm_set_state(port, SNK_READY, 0);
break;
case DR_SWAP_SEND:
@ -3361,6 +3371,12 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
if (ret < 0)
return ret;
/*
* Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
* It is safer to modify the threshold here.
*/
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
@ -3458,6 +3474,9 @@ static int tcpm_pd_send_pps_request(struct tcpm_port *port)
if (ret < 0)
return ret;
/* Relax the threshold as voltage will be adjusted right after Accept Message. */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
@ -4285,6 +4304,10 @@ static void run_state_machine(struct tcpm_port *port)
port->hard_reset_count = 0;
ret = tcpm_pd_send_request(port);
if (ret < 0) {
/* Restore back to the original state */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
/* Let the Source send capabilities again. */
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
} else {
@ -4295,6 +4318,10 @@ static void run_state_machine(struct tcpm_port *port)
case SNK_NEGOTIATE_PPS_CAPABILITIES:
ret = tcpm_pd_send_pps_request(port);
if (ret < 0) {
/* Restore back to the original state */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
port->pps_status = ret;
/*
* If this was called due to updates to sink
@ -5332,6 +5359,7 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
}
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
case PR_SWAP_SNK_SRC_SOURCE_ON:
/* Do nothing, vsafe0v is expected during transition */
break;
default:

View File

@ -5652,6 +5652,7 @@ net/wireless/core.c
net/wireless/core.h
net/wireless/debugfs.h
net/wireless/ethtool.c
net/wireless/extra-certs.c
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c

View File

@ -35,6 +35,51 @@
#include "internal.h"
/* Determine whether we can avoid taking write faults for known dirty pages. */
static bool may_avoid_write_fault(pte_t pte, struct vm_area_struct *vma,
unsigned long cp_flags)
{
/*
* The dirty accountable bit indicates that we can always make the page
* writable regardless of the number of references.
*/
if (!(cp_flags & MM_CP_DIRTY_ACCT)) {
/* Otherwise, we must have exclusive access to the page. */
if (!(vma_is_anonymous(vma) && (vma->vm_flags & VM_WRITE)))
return false;
if (page_count(pte_page(pte)) != 1)
return false;
}
/*
* Don't do this optimization for clean pages as we need to be notified
* of the transition from clean to dirty.
*/
if (!pte_dirty(pte))
return false;
/* Same for softdirty. */
if (!pte_soft_dirty(pte) && (vma->vm_flags & VM_SOFTDIRTY))
return false;
/*
* For userfaultfd the user program needs to monitor write faults so we
* can't do this optimization.
*/
if (pte_uffd_wp(pte))
return false;
/*
* It is unclear whether this optimization can be done safely for NUMA
* pages.
*/
if (cp_flags & MM_CP_PROT_NUMA)
return false;
return true;
}
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
@ -43,7 +88,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl;
unsigned long pages = 0;
int target_node = NUMA_NO_NODE;
bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
@ -131,12 +175,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = pte_clear_uffd_wp(ptent);
}
/* Avoid taking write faults for known dirty pages */
if (dirty_accountable && pte_dirty(ptent) &&
(pte_soft_dirty(ptent) ||
!(vma->vm_flags & VM_SOFTDIRTY))) {
if (may_avoid_write_fault(ptent, vma, cp_flags))
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
pages++;
} else if (is_swap_pte(oldpte)) {

View File

@ -681,4 +681,10 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c)
return false;
}
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
void debugfs_slab_release(struct kmem_cache *);
#else
static inline void debugfs_slab_release(struct kmem_cache *s) { }
#endif
#endif /* MM_SLAB_H */

View File

@ -438,6 +438,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_release(s);
@ -465,6 +466,7 @@ static int shutdown_cache(struct kmem_cache *s)
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
debugfs_slab_release(s);
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
sysfs_slab_release(s);

288
mm/slub.c
View File

@ -36,6 +36,7 @@
#include <linux/memcontrol.h>
#include <linux/random.h>
#include <linux/debugfs.h>
#include <trace/events/kmem.h>
#include <trace/hooks/mm.h>
@ -210,6 +211,12 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
#endif
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
static void debugfs_slab_add(struct kmem_cache *);
#else
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
@ -4497,6 +4504,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
if (err)
__kmem_cache_release(s);
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
return err;
}
@ -4637,6 +4647,8 @@ static long validate_slab_cache(struct kmem_cache *s)
return count;
}
#ifdef CONFIG_DEBUG_FS
/*
* Generate lists of code addresses where slabcache objects are allocated
* and freed.
@ -4660,6 +4672,8 @@ struct loc_track {
struct location *loc;
};
static struct dentry *slab_debugfs_root;
static void free_loc_track(struct loc_track *t)
{
if (t->max)
@ -4776,87 +4790,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
add_location(t, s, get_track(s, p, alloc));
put_map(map);
}
static int list_locations(struct kmem_cache *s, char *buf,
enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
struct kmem_cache_node *n;
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_KERNEL)) {
return sprintf(buf, "Out of memory\n");
}
/* Push back cpu slabs */
flush_all(s);
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, slab_list)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
len += sprintf(buf + len, "%pS", (void *)l->addr);
else
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time,
(long)div_u64(l->sum_time, l->count),
l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
if (l->min_pid != l->max_pid)
len += sprintf(buf + len, " pid=%ld-%ld",
l->min_pid, l->max_pid);
else
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 &&
!cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60)
len += scnprintf(buf + len, PAGE_SIZE - len - 50,
" cpus=%*pbl",
cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60)
len += scnprintf(buf + len, PAGE_SIZE - len - 50,
" nodes=%*pbl",
nodemask_pr_args(&l->nodes));
len += sprintf(buf + len, "\n");
}
free_loc_track(&t);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SLUB_DEBUG */
#ifdef SLUB_RESILIENCY_TEST
@ -5316,21 +5250,6 @@ static ssize_t validate_store(struct kmem_cache *s,
}
SLAB_ATTR(validate);
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_ALLOC);
}
SLAB_ATTR_RO(alloc_calls);
static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_FREE);
}
SLAB_ATTR_RO(free_calls);
#endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_FAILSLAB
@ -5491,8 +5410,6 @@ static struct attribute *slab_attrs[] = {
&poison_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
&alloc_calls_attr.attr,
&free_calls_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
@ -5781,6 +5698,181 @@ static int __init slab_sysfs_init(void)
__initcall(slab_sysfs_init);
#endif /* CONFIG_SYSFS */
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
struct location *l;
unsigned int idx = *(unsigned int *)v;
struct loc_track *t = seq->private;
if (idx < t->count) {
l = &t->loc[idx];
seq_printf(seq, "%7ld ", l->count);
if (l->addr)
seq_printf(seq, "%pS", (void *)l->addr);
else
seq_puts(seq, "<not-available>");
if (l->sum_time != l->min_time) {
seq_printf(seq, " age=%ld/%llu/%ld",
l->min_time, div_u64(l->sum_time, l->count),
l->max_time);
} else
seq_printf(seq, " age=%ld", l->min_time);
if (l->min_pid != l->max_pid)
seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
else
seq_printf(seq, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
seq_printf(seq, " cpus=%*pbl",
cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
seq_printf(seq, " nodes=%*pbl",
nodemask_pr_args(&l->nodes));
seq_puts(seq, "\n");
}
if (!idx && !t->count)
seq_puts(seq, "No data\n");
return 0;
}
static void slab_debugfs_stop(struct seq_file *seq, void *v)
{
kfree(v);
}
static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{
loff_t *spos = v;
struct loc_track *t = seq->private;
if (*ppos < t->count) {
*ppos = ++*spos;
return spos;
}
*ppos = ++*spos;
return NULL;
}
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
loff_t *spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
if (!spos)
return NULL;
*spos = *ppos;
return spos;
}
static const struct seq_operations slab_debugfs_sops = {
.start = slab_debugfs_start,
.next = slab_debugfs_next,
.stop = slab_debugfs_stop,
.show = slab_debugfs_show,
};
static int slab_debug_trace_open(struct inode *inode, struct file *filep)
{
struct kmem_cache_node *n;
enum track_item alloc;
int node;
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
sizeof(struct loc_track));
struct kmem_cache *s = file_inode(filep)->i_private;
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
alloc = TRACK_ALLOC;
else
alloc = TRACK_FREE;
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
return -ENOMEM;
/* Push back cpu slabs */
flush_all(s);
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
process_slab(t, s, page, alloc);
list_for_each_entry(page, &n->full, slab_list)
process_slab(t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
return 0;
}
static int slab_debug_trace_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct loc_track *t = seq->private;
free_loc_track(t);
return seq_release_private(inode, file);
}
static const struct file_operations slab_debugfs_fops = {
.open = slab_debug_trace_open,
.read = seq_read,
.llseek = seq_lseek,
.release = slab_debug_trace_release,
};
static void debugfs_slab_add(struct kmem_cache *s)
{
struct dentry *slab_cache_dir;
if (unlikely(!slab_debugfs_root))
return;
slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
debugfs_create_file("alloc_traces", 0400,
slab_cache_dir, s, &slab_debugfs_fops);
debugfs_create_file("free_traces", 0400,
slab_cache_dir, s, &slab_debugfs_fops);
}
void debugfs_slab_release(struct kmem_cache *s)
{
debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
}
static int __init slab_debugfs_init(void)
{
struct kmem_cache *s;
slab_debugfs_root = debugfs_create_dir("slab", NULL);
list_for_each_entry(s, &slab_caches, list)
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
return 0;
}
__initcall(slab_debugfs_init);
#endif
/*
* The /proc/slabinfo ABI
*/

View File

@ -112,7 +112,7 @@ static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
iface);
if (NLMSG_MAX_SIZE <= res) {
pr_err("message too long (%d)", res);
pr_err("message too long (%d)\n", res);
return;
}
@ -122,25 +122,25 @@ static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
state ? "active" : "inactive");
if (NLMSG_MAX_SIZE <= res) {
pr_err("message too long (%d)", res);
pr_err("message too long (%d)\n", res);
return;
}
if (state) {
res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
if (NLMSG_MAX_SIZE <= res)
pr_err("message too long (%d)", res);
pr_err("message too long (%d)\n", res);
} else {
res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
if (NLMSG_MAX_SIZE <= res)
pr_err("message too long (%d)", res);
pr_err("message too long (%d)\n", res);
}
time_ns = timespec64_to_ns(&ts);
res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
if (NLMSG_MAX_SIZE <= res) {
timestamp_msg[0] = '\0';
pr_err("message too long (%d)", res);
pr_err("message too long (%d)\n", res);
}
pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
@ -323,12 +323,12 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
pr_debug("couldn't add file to sysfs");
pr_debug("couldn't add file to sysfs\n");
goto out_free_attr;
}
list_add(&info->timer->entry, &idletimer_tg_list);
pr_debug("timer type value is 0.");
pr_debug("timer type value is 0.\n");
info->timer->timer_type = 0;
info->timer->refcnt = 1;
info->timer->send_nl_msg = false;
@ -389,7 +389,7 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
pr_debug("couldn't add file to sysfs");
pr_debug("couldn't add file to sysfs\n");
goto out_free_attr;
}
@ -397,7 +397,7 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
kobject_uevent(idletimer_tg_kobj,KOBJ_ADD);
list_add(&info->timer->entry, &idletimer_tg_list);
pr_debug("timer type value is %u", info->timer_type);
pr_debug("timer type value is %u\n", info->timer_type);
info->timer->timer_type = info->timer_type;
info->timer->refcnt = 1;
info->timer->send_nl_msg = (info->send_nl_msg != 0);

View File

@ -70,15 +70,23 @@ UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)"
# Only replace the real compile.h if the new one is different,
# in order to preserve the timestamp and avoid unnecessary
# recompilations.
# We don't consider the file changed if only the date/time changed.
# We don't consider the file changed if only the date/time changed,
# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for
# reproducible builds with that value referring to a commit timestamp).
# A kernel config change will increase the generation number, thus
# causing compile.h to be updated (including date/time) due to the
# changed comment in the
# first line.
if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then
IGNORE_PATTERN="UTS_VERSION"
else
IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED"
fi
if [ -r $TARGET ] && \
grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \
grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \
grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \
grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \
cmp -s .tmpver.1 .tmpver.2; then
rm -f .tmpcompile
else