Do some ground preparatory work before adding guest_enter() and guest_exit() context tracking callbacks. Those will be later used to read the guest cputime safely when we run in full dynticks mode. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Gleb Natapov <gleb@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
71 lines
2.3 KiB
C
71 lines
2.3 KiB
C
#ifndef _LINUX_KERNEL_VTIME_H
|
|
#define _LINUX_KERNEL_VTIME_H
|
|
|
|
struct task_struct;
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
extern void vtime_task_switch(struct task_struct *prev);
|
|
extern void vtime_account_system(struct task_struct *tsk);
|
|
extern void vtime_account_idle(struct task_struct *tsk);
|
|
extern void vtime_account_user(struct task_struct *tsk);
|
|
extern void vtime_account(struct task_struct *tsk);
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
extern bool vtime_accounting_enabled(void);
|
|
#else
|
|
static inline bool vtime_accounting_enabled(void) { return true; }
|
|
#endif
|
|
|
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
static inline void vtime_task_switch(struct task_struct *prev) { }
|
|
static inline void vtime_account_system(struct task_struct *tsk) { }
|
|
static inline void vtime_account_user(struct task_struct *tsk) { }
|
|
static inline void vtime_account(struct task_struct *tsk) { }
|
|
static inline bool vtime_accounting_enabled(void) { return false; }
|
|
#endif
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
|
|
static inline void vtime_user_enter(struct task_struct *tsk)
|
|
{
|
|
vtime_account_system(tsk);
|
|
}
|
|
static inline void vtime_user_exit(struct task_struct *tsk)
|
|
{
|
|
vtime_account_user(tsk);
|
|
}
|
|
#else
|
|
static inline void vtime_user_enter(struct task_struct *tsk) { }
|
|
static inline void vtime_user_exit(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
extern void irqtime_account_irq(struct task_struct *tsk);
|
|
#else
|
|
static inline void irqtime_account_irq(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
static inline void vtime_account_irq_enter(struct task_struct *tsk)
|
|
{
|
|
/*
|
|
* Hardirq can interrupt idle task anytime. So we need vtime_account()
|
|
* that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
|
|
* Softirq can also interrupt idle task directly if it calls
|
|
* local_bh_enable(). Such case probably don't exist but we never know.
|
|
* Ksoftirqd is not concerned because idle time is flushed on context
|
|
* switch. Softirqs in the end of hardirqs are also not a problem because
|
|
* the idle time is flushed on hardirq time already.
|
|
*/
|
|
vtime_account(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
|
{
|
|
/* On hard|softirq exit we always account to hard|softirq cputime */
|
|
vtime_account_system(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
#endif /* _LINUX_KERNEL_VTIME_H */
|