s390/time,idle: get rid of unsigned long long
Get rid of unsigned long long, and use unsigned long instead everywhere. The usage of unsigned long long is a leftover from 31 bit kernel support. Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
a38fd87484
commit
eba8e1af5a
|
@ -14,12 +14,12 @@
|
|||
|
||||
struct s390_idle_data {
|
||||
seqcount_t seqcount;
|
||||
unsigned long long idle_count;
|
||||
unsigned long long idle_time;
|
||||
unsigned long long clock_idle_enter;
|
||||
unsigned long long clock_idle_exit;
|
||||
unsigned long long timer_idle_enter;
|
||||
unsigned long long timer_idle_exit;
|
||||
unsigned long idle_count;
|
||||
unsigned long idle_time;
|
||||
unsigned long clock_idle_enter;
|
||||
unsigned long clock_idle_exit;
|
||||
unsigned long timer_idle_enter;
|
||||
unsigned long timer_idle_exit;
|
||||
unsigned long mt_cycles_enter[8];
|
||||
};
|
||||
|
||||
|
|
|
@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16];
|
|||
|
||||
/* Query TOD offset result */
|
||||
struct ptff_qto {
|
||||
unsigned long long physical_clock;
|
||||
unsigned long long tod_offset;
|
||||
unsigned long long logical_tod_offset;
|
||||
unsigned long long tod_epoch_difference;
|
||||
unsigned long physical_clock;
|
||||
unsigned long tod_offset;
|
||||
unsigned long logical_tod_offset;
|
||||
unsigned long tod_epoch_difference;
|
||||
} __packed;
|
||||
|
||||
static inline int ptff_query(unsigned int nr)
|
||||
|
@ -151,9 +151,9 @@ struct ptff_qui {
|
|||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
static inline unsigned long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
unsigned long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = clock_comparator_max;
|
||||
|
@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void)
|
|||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
static inline void local_tick_enable(unsigned long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
|
@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp)
|
|||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
static inline unsigned long get_tod_clock(void)
|
||||
{
|
||||
union tod_clock clk;
|
||||
|
||||
|
@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void)
|
|||
return clk.tod;
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_fast(void)
|
||||
static inline unsigned long get_tod_clock_fast(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
unsigned long long clk;
|
||||
unsigned long clk;
|
||||
|
||||
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
|
||||
return clk;
|
||||
|
@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base;
|
|||
* Therefore preemption must be disabled, otherwise the returned
|
||||
* value is not guaranteed to be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
static inline unsigned long get_tod_clock_monotonic(void)
|
||||
{
|
||||
unsigned long long tod;
|
||||
unsigned long tod;
|
||||
|
||||
preempt_disable_notrace();
|
||||
tod = get_tod_clock() - tod_clock_base.tod;
|
||||
|
@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
|
|||
* -> ns = (th * 125) + ((tl * 125) >> 9);
|
||||
*
|
||||
*/
|
||||
static inline unsigned long long tod_to_ns(unsigned long long todval)
|
||||
static inline unsigned long tod_to_ns(unsigned long todval)
|
||||
{
|
||||
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
|
||||
}
|
||||
|
@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a > (long long) b;
|
||||
return (long) a > (long) b;
|
||||
return a > b;
|
||||
}
|
||||
|
||||
|
@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after_eq(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after_eq(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a >= (long long) b;
|
||||
return (long) a >= (long) b;
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ void account_idle_time_irq(void)
|
|||
void arch_cpu_idle(void)
|
||||
{
|
||||
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
|
||||
unsigned long long idle_time;
|
||||
unsigned long idle_time;
|
||||
unsigned long psw_mask;
|
||||
|
||||
/* Wait for external, I/O or machine check interrupt. */
|
||||
|
@ -73,7 +73,7 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long idle_count;
|
||||
unsigned long idle_count;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
@ -82,14 +82,14 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
if (READ_ONCE(idle->clock_idle_enter))
|
||||
idle_count++;
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return sprintf(buf, "%llu\n", idle_count);
|
||||
return sprintf(buf, "%lu\n", idle_count);
|
||||
}
|
||||
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||
|
||||
static ssize_t show_idle_time(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned int seq;
|
||||
|
||||
|
@ -109,14 +109,14 @@ static ssize_t show_idle_time(struct device *dev,
|
|||
}
|
||||
}
|
||||
idle_time += in_idle;
|
||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||
return sprintf(buf, "%lu\n", idle_time >> 12);
|
||||
}
|
||||
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
||||
|
||||
u64 arch_cpu_idle_time(int cpu)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||
unsigned long long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
|
|
@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
|||
|
||||
unsigned char ptff_function_mask[16];
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
static unsigned long lpar_offset;
|
||||
static unsigned long initial_leap_seconds;
|
||||
static unsigned long tod_steering_end;
|
||||
static long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
|
@ -96,7 +96,7 @@ void __init time_early_init(void)
|
|||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
|
|||
|
||||
static u64 read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
unsigned long long now, adj;
|
||||
unsigned long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
|
@ -362,7 +362,7 @@ static inline int check_sync_clock(void)
|
|||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
static void clock_sync_global(unsigned long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
@ -378,7 +378,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
panic("TOD clock sync offset %li is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
||||
|
@ -394,7 +394,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
static void clock_sync_local(unsigned long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != clock_comparator_max) {
|
||||
|
@ -418,7 +418,7 @@ static void __init time_init_wq(void)
|
|||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long clock_delta;
|
||||
unsigned long clock_delta;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -538,7 +538,7 @@ static int stpinfo_valid(void)
|
|||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta, flags;
|
||||
u64 clock_delta, flags;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
|
@ -720,8 +720,8 @@ static ssize_t ctn_id_show(struct device *dev,
|
|||
|
||||
mutex_lock(&stp_mutex);
|
||||
if (stpinfo_valid())
|
||||
ret = sprintf(buf, "%016llx\n",
|
||||
*(unsigned long long *) stp_info.ctnid);
|
||||
ret = sprintf(buf, "%016lx\n",
|
||||
*(unsigned long *) stp_info.ctnid);
|
||||
mutex_unlock(&stp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
|
|||
if (!stzi.lsoib.p)
|
||||
return sprintf(buf, "0,0\n");
|
||||
|
||||
return sprintf(buf, "%llu,%d\n",
|
||||
return sprintf(buf, "%lu,%d\n",
|
||||
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
|
||||
stzi.lsoib.nlso - stzi.lsoib.also);
|
||||
}
|
||||
|
|
|
@ -1287,7 +1287,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
|||
/* already expired? */
|
||||
if (cputm >> 63)
|
||||
return 0;
|
||||
return min(sltime, tod_to_ns(cputm));
|
||||
return min_t(u64, sltime, tod_to_ns(cputm));
|
||||
}
|
||||
} else if (cpu_timer_interrupts_enabled(vcpu)) {
|
||||
sltime = kvm_s390_get_cpu_timer(vcpu);
|
||||
|
|
|
@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
|
|||
orb = &private->orb;
|
||||
cc = stsch(sch->schid, &schib);
|
||||
|
||||
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
|
||||
printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, "
|
||||
"device information:\n", get_tod_clock());
|
||||
printk(KERN_WARNING "cio: orb:\n");
|
||||
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
|
||||
|
|
Loading…
Reference in New Issue