Root/arch/s390/kernel/time.c

1/*
2 * arch/s390/kernel/time.c
3 * Time of day based timer functions.
4 *
5 * S390 version
6 * Copyright IBM Corp. 1999, 2008
7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 *
11 * Derived from "arch/i386/kernel/time.c"
12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
13 */
14
15#define KMSG_COMPONENT "time"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/errno.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/param.h>
23#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/interrupt.h>
26#include <linux/cpu.h>
27#include <linux/stop_machine.h>
28#include <linux/time.h>
29#include <linux/sysdev.h>
30#include <linux/delay.h>
31#include <linux/init.h>
32#include <linux/smp.h>
33#include <linux/types.h>
34#include <linux/profile.h>
35#include <linux/timex.h>
36#include <linux/notifier.h>
37#include <linux/clocksource.h>
38#include <linux/clockchips.h>
39#include <linux/gfp.h>
40#include <asm/uaccess.h>
41#include <asm/delay.h>
42#include <asm/s390_ext.h>
43#include <asm/div64.h>
44#include <asm/vdso.h>
45#include <asm/irq.h>
46#include <asm/irq_regs.h>
47#include <asm/timer.h>
48#include <asm/etr.h>
49#include <asm/cio.h>
50
51/* change this if you have some constant time drift */
52#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
53#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
54
55u64 sched_clock_base_cc = -1; /* Force to data section. */
56EXPORT_SYMBOL_GPL(sched_clock_base_cc);
57
58static DEFINE_PER_CPU(struct clock_event_device, comparators);
59
60/*
61 * Scheduler clock - returns current time in nanosec units.
62 */
63unsigned long long notrace sched_clock(void)
64{
65    return (get_clock_monotonic() * 125) >> 9;
66}
67
68/*
69 * Monotonic_clock - returns # of nanoseconds passed since time_init()
70 */
71unsigned long long monotonic_clock(void)
72{
73    return sched_clock();
74}
75EXPORT_SYMBOL(monotonic_clock);
76
77void tod_to_timeval(__u64 todval, struct timespec *xt)
78{
79    unsigned long long sec;
80
81    sec = todval >> 12;
82    do_div(sec, 1000000);
83    xt->tv_sec = sec;
84    todval -= (sec * 1000000) << 12;
85    xt->tv_nsec = ((todval * 1000) >> 12);
86}
87EXPORT_SYMBOL(tod_to_timeval);
88
89void clock_comparator_work(void)
90{
91    struct clock_event_device *cd;
92
93    S390_lowcore.clock_comparator = -1ULL;
94    set_clock_comparator(S390_lowcore.clock_comparator);
95    cd = &__get_cpu_var(comparators);
96    cd->event_handler(cd);
97}
98
99/*
100 * Fixup the clock comparator.
101 */
102static void fixup_clock_comparator(unsigned long long delta)
103{
104    /* If nobody is waiting there's nothing to fix. */
105    if (S390_lowcore.clock_comparator == -1ULL)
106        return;
107    S390_lowcore.clock_comparator += delta;
108    set_clock_comparator(S390_lowcore.clock_comparator);
109}
110
111static int s390_next_event(unsigned long delta,
112               struct clock_event_device *evt)
113{
114    S390_lowcore.clock_comparator = get_clock() + delta;
115    set_clock_comparator(S390_lowcore.clock_comparator);
116    return 0;
117}
118
119static void s390_set_mode(enum clock_event_mode mode,
120              struct clock_event_device *evt)
121{
122}
123
124/*
125 * Set up lowcore and control register of the current cpu to
126 * enable TOD clock and clock comparator interrupts.
127 */
128void init_cpu_timer(void)
129{
130    struct clock_event_device *cd;
131    int cpu;
132
133    S390_lowcore.clock_comparator = -1ULL;
134    set_clock_comparator(S390_lowcore.clock_comparator);
135
136    cpu = smp_processor_id();
137    cd = &per_cpu(comparators, cpu);
138    cd->name = "comparator";
139    cd->features = CLOCK_EVT_FEAT_ONESHOT;
140    cd->mult = 16777;
141    cd->shift = 12;
142    cd->min_delta_ns = 1;
143    cd->max_delta_ns = LONG_MAX;
144    cd->rating = 400;
145    cd->cpumask = cpumask_of(cpu);
146    cd->set_next_event = s390_next_event;
147    cd->set_mode = s390_set_mode;
148
149    clockevents_register_device(cd);
150
151    /* Enable clock comparator timer interrupt. */
152    __ctl_set_bit(0,11);
153
154    /* Always allow the timing alert external interrupt. */
155    __ctl_set_bit(0, 4);
156}
157
158static void clock_comparator_interrupt(unsigned int ext_int_code,
159                       unsigned int param32,
160                       unsigned long param64)
161{
162    if (S390_lowcore.clock_comparator == -1ULL)
163        set_clock_comparator(S390_lowcore.clock_comparator);
164}
165
166static void etr_timing_alert(struct etr_irq_parm *);
167static void stp_timing_alert(struct stp_irq_parm *);
168
169static void timing_alert_interrupt(unsigned int ext_int_code,
170                   unsigned int param32, unsigned long param64)
171{
172    if (param32 & 0x00c40000)
173        etr_timing_alert((struct etr_irq_parm *) &param32);
174    if (param32 & 0x00038000)
175        stp_timing_alert((struct stp_irq_parm *) &param32);
176}
177
178static void etr_reset(void);
179static void stp_reset(void);
180
181void read_persistent_clock(struct timespec *ts)
182{
183    tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
184}
185
186void read_boot_clock(struct timespec *ts)
187{
188    tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
189}
190
191static cycle_t read_tod_clock(struct clocksource *cs)
192{
193    return get_clock();
194}
195
196static struct clocksource clocksource_tod = {
197    .name = "tod",
198    .rating = 400,
199    .read = read_tod_clock,
200    .mask = -1ULL,
201    .mult = 1000,
202    .shift = 12,
203    .flags = CLOCK_SOURCE_IS_CONTINUOUS,
204};
205
206struct clocksource * __init clocksource_default_clock(void)
207{
208    return &clocksource_tod;
209}
210
211void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
212            struct clocksource *clock, u32 mult)
213{
214    if (clock != &clocksource_tod)
215        return;
216
217    /* Make userspace gettimeofday spin until we're done. */
218    ++vdso_data->tb_update_count;
219    smp_wmb();
220    vdso_data->xtime_tod_stamp = clock->cycle_last;
221    vdso_data->xtime_clock_sec = wall_time->tv_sec;
222    vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
223    vdso_data->wtom_clock_sec = wtm->tv_sec;
224    vdso_data->wtom_clock_nsec = wtm->tv_nsec;
225    vdso_data->ntp_mult = mult;
226    smp_wmb();
227    ++vdso_data->tb_update_count;
228}
229
230extern struct timezone sys_tz;
231
232void update_vsyscall_tz(void)
233{
234    /* Make userspace gettimeofday spin until we're done. */
235    ++vdso_data->tb_update_count;
236    smp_wmb();
237    vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
238    vdso_data->tz_dsttime = sys_tz.tz_dsttime;
239    smp_wmb();
240    ++vdso_data->tb_update_count;
241}
242
243/*
244 * Initialize the TOD clock and the CPU timer of
245 * the boot cpu.
246 */
247void __init time_init(void)
248{
249    /* Reset time synchronization interfaces. */
250    etr_reset();
251    stp_reset();
252
253    /* request the clock comparator external interrupt */
254    if (register_external_interrupt(0x1004, clock_comparator_interrupt))
255                panic("Couldn't request external interrupt 0x1004");
256
257    /* request the timing alert external interrupt */
258    if (register_external_interrupt(0x1406, timing_alert_interrupt))
259        panic("Couldn't request external interrupt 0x1406");
260
261    if (clocksource_register(&clocksource_tod) != 0)
262        panic("Could not register TOD clock source");
263
264    /* Enable TOD clock interrupts on the boot cpu. */
265    init_cpu_timer();
266
267    /* Enable cpu timer interrupts on the boot cpu. */
268    vtime_init();
269}
270
271/*
272 * The time is "clock". old is what we think the time is.
273 * Adjust the value by a multiple of jiffies and add the delta to ntp.
274 * "delay" is an approximation how long the synchronization took. If
275 * the time correction is positive, then "delay" is subtracted from
276 * the time difference and only the remaining part is passed to ntp.
277 */
278static unsigned long long adjust_time(unsigned long long old,
279                      unsigned long long clock,
280                      unsigned long long delay)
281{
282    unsigned long long delta, ticks;
283    struct timex adjust;
284
285    if (clock > old) {
286        /* It is later than we thought. */
287        delta = ticks = clock - old;
288        delta = ticks = (delta < delay) ? 0 : delta - delay;
289        delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
290        adjust.offset = ticks * (1000000 / HZ);
291    } else {
292        /* It is earlier than we thought. */
293        delta = ticks = old - clock;
294        delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
295        delta = -delta;
296        adjust.offset = -ticks * (1000000 / HZ);
297    }
298    sched_clock_base_cc += delta;
299    if (adjust.offset != 0) {
300        pr_notice("The ETR interface has adjusted the clock "
301              "by %li microseconds\n", adjust.offset);
302        adjust.modes = ADJ_OFFSET_SINGLESHOT;
303        do_adjtimex(&adjust);
304    }
305    return delta;
306}
307
308static DEFINE_PER_CPU(atomic_t, clock_sync_word);
309static DEFINE_MUTEX(clock_sync_mutex);
310static unsigned long clock_sync_flags;
311
312#define CLOCK_SYNC_HAS_ETR 0
313#define CLOCK_SYNC_HAS_STP 1
314#define CLOCK_SYNC_ETR 2
315#define CLOCK_SYNC_STP 3
316
317/*
318 * The synchronous get_clock function. It will write the current clock
319 * value to the clock pointer and return 0 if the clock is in sync with
320 * the external time source. If the clock mode is local it will return
321 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
322 * reference.
323 */
324int get_sync_clock(unsigned long long *clock)
325{
326    atomic_t *sw_ptr;
327    unsigned int sw0, sw1;
328
329    sw_ptr = &get_cpu_var(clock_sync_word);
330    sw0 = atomic_read(sw_ptr);
331    *clock = get_clock();
332    sw1 = atomic_read(sw_ptr);
333    put_cpu_var(clock_sync_word);
334    if (sw0 == sw1 && (sw0 & 0x80000000U))
335        /* Success: time is in sync. */
336        return 0;
337    if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
338        !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
339        return -ENOSYS;
340    if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
341        !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
342        return -EACCES;
343    return -EAGAIN;
344}
345EXPORT_SYMBOL(get_sync_clock);
346
347/*
348 * Make get_sync_clock return -EAGAIN.
349 */
350static void disable_sync_clock(void *dummy)
351{
352    atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
353    /*
354     * Clear the in-sync bit 2^31. All get_sync_clock calls will
355     * fail until the sync bit is turned back on. In addition
356     * increase the "sequence" counter to avoid the race of an
357     * etr event and the complete recovery against get_sync_clock.
358     */
359    atomic_clear_mask(0x80000000, sw_ptr);
360    atomic_inc(sw_ptr);
361}
362
363/*
364 * Make get_sync_clock return 0 again.
365 * Needs to be called from a context disabled for preemption.
366 */
367static void enable_sync_clock(void)
368{
369    atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
370    atomic_set_mask(0x80000000, sw_ptr);
371}
372
373/*
374 * Function to check if the clock is in sync.
375 */
376static inline int check_sync_clock(void)
377{
378    atomic_t *sw_ptr;
379    int rc;
380
381    sw_ptr = &get_cpu_var(clock_sync_word);
382    rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
383    put_cpu_var(clock_sync_word);
384    return rc;
385}
386
387/* Single threaded workqueue used for etr and stp sync events */
388static struct workqueue_struct *time_sync_wq;
389
390static void __init time_init_wq(void)
391{
392    if (time_sync_wq)
393        return;
394    time_sync_wq = create_singlethread_workqueue("timesync");
395}
396
397/*
398 * External Time Reference (ETR) code.
399 */
400static int etr_port0_online;
401static int etr_port1_online;
402static int etr_steai_available;
403
404static int __init early_parse_etr(char *p)
405{
406    if (strncmp(p, "off", 3) == 0)
407        etr_port0_online = etr_port1_online = 0;
408    else if (strncmp(p, "port0", 5) == 0)
409        etr_port0_online = 1;
410    else if (strncmp(p, "port1", 5) == 0)
411        etr_port1_online = 1;
412    else if (strncmp(p, "on", 2) == 0)
413        etr_port0_online = etr_port1_online = 1;
414    return 0;
415}
416early_param("etr", early_parse_etr);
417
418enum etr_event {
419    ETR_EVENT_PORT0_CHANGE,
420    ETR_EVENT_PORT1_CHANGE,
421    ETR_EVENT_PORT_ALERT,
422    ETR_EVENT_SYNC_CHECK,
423    ETR_EVENT_SWITCH_LOCAL,
424    ETR_EVENT_UPDATE,
425};
426
427/*
428 * Valid bit combinations of the eacr register are (x = don't care):
429 * e0 e1 dp p0 p1 ea es sl
430 * 0 0 x 0 0 0 0 0 initial, disabled state
431 * 0 0 x 0 1 1 0 0 port 1 online
432 * 0 0 x 1 0 1 0 0 port 0 online
433 * 0 0 x 1 1 1 0 0 both ports online
434 * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
435 * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
436 * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
437 * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
438 * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
439 * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
440 * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
441 * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
442 * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
443 * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
444 * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
445 * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
446 * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
447 * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
448 * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
449 * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
450 */
451static struct etr_eacr etr_eacr;
452static u64 etr_tolec; /* time of last eacr update */
453static struct etr_aib etr_port0;
454static int etr_port0_uptodate;
455static struct etr_aib etr_port1;
456static int etr_port1_uptodate;
457static unsigned long etr_events;
458static struct timer_list etr_timer;
459
460static void etr_timeout(unsigned long dummy);
461static void etr_work_fn(struct work_struct *work);
462static DEFINE_MUTEX(etr_work_mutex);
463static DECLARE_WORK(etr_work, etr_work_fn);
464
465/*
466 * Reset ETR attachment.
467 */
468static void etr_reset(void)
469{
470    etr_eacr = (struct etr_eacr) {
471        .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
472        .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
473        .es = 0, .sl = 0 };
474    if (etr_setr(&etr_eacr) == 0) {
475        etr_tolec = get_clock();
476        set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
477        if (etr_port0_online && etr_port1_online)
478            set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
479    } else if (etr_port0_online || etr_port1_online) {
480        pr_warning("The real or virtual hardware system does "
481               "not provide an ETR interface\n");
482        etr_port0_online = etr_port1_online = 0;
483    }
484}
485
486static int __init etr_init(void)
487{
488    struct etr_aib aib;
489
490    if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
491        return 0;
492    time_init_wq();
493    /* Check if this machine has the steai instruction. */
494    if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
495        etr_steai_available = 1;
496    setup_timer(&etr_timer, etr_timeout, 0UL);
497    if (etr_port0_online) {
498        set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
499        queue_work(time_sync_wq, &etr_work);
500    }
501    if (etr_port1_online) {
502        set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
503        queue_work(time_sync_wq, &etr_work);
504    }
505    return 0;
506}
507
508arch_initcall(etr_init);
509
510/*
511 * Two sorts of ETR machine checks. The architecture reads:
512 * "When a machine-check niterruption occurs and if a switch-to-local or
513 * ETR-sync-check interrupt request is pending but disabled, this pending
514 * disabled interruption request is indicated and is cleared".
515 * Which means that we can get etr_switch_to_local events from the machine
516 * check handler although the interruption condition is disabled. Lovely..
517 */
518
519/*
520 * Switch to local machine check. This is called when the last usable
521 * ETR port goes inactive. After switch to local the clock is not in sync.
522 */
523void etr_switch_to_local(void)
524{
525    if (!etr_eacr.sl)
526        return;
527    disable_sync_clock(NULL);
528    if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
529        etr_eacr.es = etr_eacr.sl = 0;
530        etr_setr(&etr_eacr);
531        queue_work(time_sync_wq, &etr_work);
532    }
533}
534
535/*
536 * ETR sync check machine check. This is called when the ETR OTE and the
537 * local clock OTE are farther apart than the ETR sync check tolerance.
538 * After a ETR sync check the clock is not in sync. The machine check
539 * is broadcasted to all cpus at the same time.
540 */
541void etr_sync_check(void)
542{
543    if (!etr_eacr.es)
544        return;
545    disable_sync_clock(NULL);
546    if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
547        etr_eacr.es = 0;
548        etr_setr(&etr_eacr);
549        queue_work(time_sync_wq, &etr_work);
550    }
551}
552
553/*
554 * ETR timing alert. There are two causes:
555 * 1) port state change, check the usability of the port
556 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
557 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
558 * or ETR-data word 4 (edf4) has changed.
559 */
560static void etr_timing_alert(struct etr_irq_parm *intparm)
561{
562    if (intparm->pc0)
563        /* ETR port 0 state change. */
564        set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
565    if (intparm->pc1)
566        /* ETR port 1 state change. */
567        set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
568    if (intparm->eai)
569        /*
570         * ETR port alert on either port 0, 1 or both.
571         * Both ports are not up-to-date now.
572         */
573        set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
574    queue_work(time_sync_wq, &etr_work);
575}
576
577static void etr_timeout(unsigned long dummy)
578{
579    set_bit(ETR_EVENT_UPDATE, &etr_events);
580    queue_work(time_sync_wq, &etr_work);
581}
582
583/*
584 * Check if the etr mode is pss.
585 */
586static inline int etr_mode_is_pps(struct etr_eacr eacr)
587{
588    return eacr.es && !eacr.sl;
589}
590
591/*
592 * Check if the etr mode is etr.
593 */
594static inline int etr_mode_is_etr(struct etr_eacr eacr)
595{
596    return eacr.es && eacr.sl;
597}
598
599/*
600 * Check if the port can be used for TOD synchronization.
601 * For PPS mode the port has to receive OTEs. For ETR mode
602 * the port has to receive OTEs, the ETR stepping bit has to
603 * be zero and the validity bits for data frame 1, 2, and 3
604 * have to be 1.
605 */
606static int etr_port_valid(struct etr_aib *aib, int port)
607{
608    unsigned int psc;
609
610    /* Check that this port is receiving OTEs. */
611    if (aib->tsp == 0)
612        return 0;
613
614    psc = port ? aib->esw.psc1 : aib->esw.psc0;
615    if (psc == etr_lpsc_pps_mode)
616        return 1;
617    if (psc == etr_lpsc_operational_step)
618        return !aib->esw.y && aib->slsw.v1 &&
619            aib->slsw.v2 && aib->slsw.v3;
620    return 0;
621}
622
623/*
624 * Check if two ports are on the same network.
625 */
626static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
627{
628    // FIXME: any other fields we have to compare?
629    return aib1->edf1.net_id == aib2->edf1.net_id;
630}
631
632/*
633 * Wrapper for etr_stei that converts physical port states
634 * to logical port states to be consistent with the output
635 * of stetr (see etr_psc vs. etr_lpsc).
636 */
637static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
638{
639    BUG_ON(etr_steai(aib, func) != 0);
640    /* Convert port state to logical port state. */
641    if (aib->esw.psc0 == 1)
642        aib->esw.psc0 = 2;
643    else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
644        aib->esw.psc0 = 1;
645    if (aib->esw.psc1 == 1)
646        aib->esw.psc1 = 2;
647    else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
648        aib->esw.psc1 = 1;
649}
650
651/*
652 * Check if the aib a2 is still connected to the same attachment as
653 * aib a1, the etv values differ by one and a2 is valid.
654 */
655static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
656{
657    int state_a1, state_a2;
658
659    /* Paranoia check: e0/e1 should better be the same. */
660    if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
661        a1->esw.eacr.e1 != a2->esw.eacr.e1)
662        return 0;
663
664    /* Still connected to the same etr ? */
665    state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
666    state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
667    if (state_a1 == etr_lpsc_operational_step) {
668        if (state_a2 != etr_lpsc_operational_step ||
669            a1->edf1.net_id != a2->edf1.net_id ||
670            a1->edf1.etr_id != a2->edf1.etr_id ||
671            a1->edf1.etr_pn != a2->edf1.etr_pn)
672            return 0;
673    } else if (state_a2 != etr_lpsc_pps_mode)
674        return 0;
675
676    /* The ETV value of a2 needs to be ETV of a1 + 1. */
677    if (a1->edf2.etv + 1 != a2->edf2.etv)
678        return 0;
679
680    if (!etr_port_valid(a2, p))
681        return 0;
682
683    return 1;
684}
685
686struct clock_sync_data {
687    atomic_t cpus;
688    int in_sync;
689    unsigned long long fixup_cc;
690    int etr_port;
691    struct etr_aib *etr_aib;
692};
693
694static void clock_sync_cpu(struct clock_sync_data *sync)
695{
696    atomic_dec(&sync->cpus);
697    enable_sync_clock();
698    /*
699     * This looks like a busy wait loop but it isn't. etr_sync_cpus
700     * is called on all other cpus while the TOD clocks is stopped.
701     * __udelay will stop the cpu on an enabled wait psw until the
702     * TOD is running again.
703     */
704    while (sync->in_sync == 0) {
705        __udelay(1);
706        /*
707         * A different cpu changes *in_sync. Therefore use
708         * barrier() to force memory access.
709         */
710        barrier();
711    }
712    if (sync->in_sync != 1)
713        /* Didn't work. Clear per-cpu in sync bit again. */
714        disable_sync_clock(NULL);
715    /*
716     * This round of TOD syncing is done. Set the clock comparator
717     * to the next tick and let the processor continue.
718     */
719    fixup_clock_comparator(sync->fixup_cc);
720}
721
722/*
723 * Sync the TOD clock using the port refered to by aibp. This port
724 * has to be enabled and the other port has to be disabled. The
725 * last eacr update has to be more than 1.6 seconds in the past.
726 */
727static int etr_sync_clock(void *data)
728{
729    static int first;
730    unsigned long long clock, old_clock, delay, delta;
731    struct clock_sync_data *etr_sync;
732    struct etr_aib *sync_port, *aib;
733    int port;
734    int rc;
735
736    etr_sync = data;
737
738    if (xchg(&first, 1) == 1) {
739        /* Slave */
740        clock_sync_cpu(etr_sync);
741        return 0;
742    }
743
744    /* Wait until all other cpus entered the sync function. */
745    while (atomic_read(&etr_sync->cpus) != 0)
746        cpu_relax();
747
748    port = etr_sync->etr_port;
749    aib = etr_sync->etr_aib;
750    sync_port = (port == 0) ? &etr_port0 : &etr_port1;
751    enable_sync_clock();
752
753    /* Set clock to next OTE. */
754    __ctl_set_bit(14, 21);
755    __ctl_set_bit(0, 29);
756    clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
757    old_clock = get_clock();
758    if (set_clock(clock) == 0) {
759        __udelay(1); /* Wait for the clock to start. */
760        __ctl_clear_bit(0, 29);
761        __ctl_clear_bit(14, 21);
762        etr_stetr(aib);
763        /* Adjust Linux timing variables. */
764        delay = (unsigned long long)
765            (aib->edf2.etv - sync_port->edf2.etv) << 32;
766        delta = adjust_time(old_clock, clock, delay);
767        etr_sync->fixup_cc = delta;
768        fixup_clock_comparator(delta);
769        /* Verify that the clock is properly set. */
770        if (!etr_aib_follows(sync_port, aib, port)) {
771            /* Didn't work. */
772            disable_sync_clock(NULL);
773            etr_sync->in_sync = -EAGAIN;
774            rc = -EAGAIN;
775        } else {
776            etr_sync->in_sync = 1;
777            rc = 0;
778        }
779    } else {
780        /* Could not set the clock ?!? */
781        __ctl_clear_bit(0, 29);
782        __ctl_clear_bit(14, 21);
783        disable_sync_clock(NULL);
784        etr_sync->in_sync = -EAGAIN;
785        rc = -EAGAIN;
786    }
787    xchg(&first, 0);
788    return rc;
789}
790
791static int etr_sync_clock_stop(struct etr_aib *aib, int port)
792{
793    struct clock_sync_data etr_sync;
794    struct etr_aib *sync_port;
795    int follows;
796    int rc;
797
798    /* Check if the current aib is adjacent to the sync port aib. */
799    sync_port = (port == 0) ? &etr_port0 : &etr_port1;
800    follows = etr_aib_follows(sync_port, aib, port);
801    memcpy(sync_port, aib, sizeof(*aib));
802    if (!follows)
803        return -EAGAIN;
804    memset(&etr_sync, 0, sizeof(etr_sync));
805    etr_sync.etr_aib = aib;
806    etr_sync.etr_port = port;
807    get_online_cpus();
808    atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
809    rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
810    put_online_cpus();
811    return rc;
812}
813
814/*
815 * Handle the immediate effects of the different events.
816 * The port change event is used for online/offline changes.
817 */
818static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
819{
820    if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
821        eacr.es = 0;
822    if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
823        eacr.es = eacr.sl = 0;
824    if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
825        etr_port0_uptodate = etr_port1_uptodate = 0;
826
827    if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
828        if (eacr.e0)
829            /*
830             * Port change of an enabled port. We have to
831             * assume that this can have caused an stepping
832             * port switch.
833             */
834            etr_tolec = get_clock();
835        eacr.p0 = etr_port0_online;
836        if (!eacr.p0)
837            eacr.e0 = 0;
838        etr_port0_uptodate = 0;
839    }
840    if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
841        if (eacr.e1)
842            /*
843             * Port change of an enabled port. We have to
844             * assume that this can have caused an stepping
845             * port switch.
846             */
847            etr_tolec = get_clock();
848        eacr.p1 = etr_port1_online;
849        if (!eacr.p1)
850            eacr.e1 = 0;
851        etr_port1_uptodate = 0;
852    }
853    clear_bit(ETR_EVENT_UPDATE, &etr_events);
854    return eacr;
855}
856
857/*
858 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
859 * one of the ports needs an update.
860 */
861static void etr_set_tolec_timeout(unsigned long long now)
862{
863    unsigned long micros;
864
865    if ((!etr_eacr.p0 || etr_port0_uptodate) &&
866        (!etr_eacr.p1 || etr_port1_uptodate))
867        return;
868    micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
869    micros = (micros > 1600000) ? 0 : 1600000 - micros;
870    mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
871}
872
873/*
874 * Set up a time that expires after 1/2 second.
875 */
876static void etr_set_sync_timeout(void)
877{
878    mod_timer(&etr_timer, jiffies + HZ/2);
879}
880
881/*
882 * Update the aib information for one or both ports.
883 */
884static struct etr_eacr etr_handle_update(struct etr_aib *aib,
885                     struct etr_eacr eacr)
886{
887    /* With both ports disabled the aib information is useless. */
888    if (!eacr.e0 && !eacr.e1)
889        return eacr;
890
891    /* Update port0 or port1 with aib stored in etr_work_fn. */
892    if (aib->esw.q == 0) {
893        /* Information for port 0 stored. */
894        if (eacr.p0 && !etr_port0_uptodate) {
895            etr_port0 = *aib;
896            if (etr_port0_online)
897                etr_port0_uptodate = 1;
898        }
899    } else {
900        /* Information for port 1 stored. */
901        if (eacr.p1 && !etr_port1_uptodate) {
902            etr_port1 = *aib;
903            if (etr_port0_online)
904                etr_port1_uptodate = 1;
905        }
906    }
907
908    /*
909     * Do not try to get the alternate port aib if the clock
910     * is not in sync yet.
911     */
912    if (!eacr.es || !check_sync_clock())
913        return eacr;
914
915    /*
916     * If steai is available we can get the information about
917     * the other port immediately. If only stetr is available the
918     * data-port bit toggle has to be used.
919     */
920    if (etr_steai_available) {
921        if (eacr.p0 && !etr_port0_uptodate) {
922            etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
923            etr_port0_uptodate = 1;
924        }
925        if (eacr.p1 && !etr_port1_uptodate) {
926            etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
927            etr_port1_uptodate = 1;
928        }
929    } else {
930        /*
931         * One port was updated above, if the other
932         * port is not uptodate toggle dp bit.
933         */
934        if ((eacr.p0 && !etr_port0_uptodate) ||
935            (eacr.p1 && !etr_port1_uptodate))
936            eacr.dp ^= 1;
937        else
938            eacr.dp = 0;
939    }
940    return eacr;
941}
942
943/*
944 * Write new etr control register if it differs from the current one.
945 * Return 1 if etr_tolec has been updated as well.
946 */
947static void etr_update_eacr(struct etr_eacr eacr)
948{
949    int dp_changed;
950
951    if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
952        /* No change, return. */
953        return;
954    /*
955     * The disable of an active port of the change of the data port
956     * bit can/will cause a change in the data port.
957     */
958    dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
959        (etr_eacr.dp ^ eacr.dp) != 0;
960    etr_eacr = eacr;
961    etr_setr(&etr_eacr);
962    if (dp_changed)
963        etr_tolec = get_clock();
964}
965
966/*
967 * ETR work. In this function you'll find the main logic. In
968 * particular this is the only function that calls etr_update_eacr(),
969 * it "controls" the etr control register.
970 */
971static void etr_work_fn(struct work_struct *work)
972{
973    unsigned long long now;
974    struct etr_eacr eacr;
975    struct etr_aib aib;
976    int sync_port;
977
978    /* prevent multiple execution. */
979    mutex_lock(&etr_work_mutex);
980
981    /* Create working copy of etr_eacr. */
982    eacr = etr_eacr;
983
984    /* Check for the different events and their immediate effects. */
985    eacr = etr_handle_events(eacr);
986
987    /* Check if ETR is supposed to be active. */
988    eacr.ea = eacr.p0 || eacr.p1;
989    if (!eacr.ea) {
990        /* Both ports offline. Reset everything. */
991        eacr.dp = eacr.es = eacr.sl = 0;
992        on_each_cpu(disable_sync_clock, NULL, 1);
993        del_timer_sync(&etr_timer);
994        etr_update_eacr(eacr);
995        goto out_unlock;
996    }
997
998    /* Store aib to get the current ETR status word. */
999    BUG_ON(etr_stetr(&aib) != 0);
1000    etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1001    now = get_clock();
1002
1003    /*
1004     * Update the port information if the last stepping port change
1005     * or data port change is older than 1.6 seconds.
1006     */
1007    if (now >= etr_tolec + (1600000 << 12))
1008        eacr = etr_handle_update(&aib, eacr);
1009
1010    /*
1011     * Select ports to enable. The prefered synchronization mode is PPS.
1012     * If a port can be enabled depends on a number of things:
1013     * 1) The port needs to be online and uptodate. A port is not
1014     * disabled just because it is not uptodate, but it is only
1015     * enabled if it is uptodate.
1016     * 2) The port needs to have the same mode (pps / etr).
1017     * 3) The port needs to be usable -> etr_port_valid() == 1
1018     * 4) To enable the second port the clock needs to be in sync.
1019     * 5) If both ports are useable and are ETR ports, the network id
1020     * has to be the same.
1021     * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1022     */
1023    if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1024        eacr.sl = 0;
1025        eacr.e0 = 1;
1026        if (!etr_mode_is_pps(etr_eacr))
1027            eacr.es = 0;
1028        if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1029            eacr.e1 = 0;
1030        // FIXME: uptodate checks ?
1031        else if (etr_port0_uptodate && etr_port1_uptodate)
1032            eacr.e1 = 1;
1033        sync_port = (etr_port0_uptodate &&
1034                 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1035    } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1036        eacr.sl = 0;
1037        eacr.e0 = 0;
1038        eacr.e1 = 1;
1039        if (!etr_mode_is_pps(etr_eacr))
1040            eacr.es = 0;
1041        sync_port = (etr_port1_uptodate &&
1042                 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1043    } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1044        eacr.sl = 1;
1045        eacr.e0 = 1;
1046        if (!etr_mode_is_etr(etr_eacr))
1047            eacr.es = 0;
1048        if (!eacr.es || !eacr.p1 ||
1049            aib.esw.psc1 != etr_lpsc_operational_alt)
1050            eacr.e1 = 0;
1051        else if (etr_port0_uptodate && etr_port1_uptodate &&
1052             etr_compare_network(&etr_port0, &etr_port1))
1053            eacr.e1 = 1;
1054        sync_port = (etr_port0_uptodate &&
1055                 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1056    } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1057        eacr.sl = 1;
1058        eacr.e0 = 0;
1059        eacr.e1 = 1;
1060        if (!etr_mode_is_etr(etr_eacr))
1061            eacr.es = 0;
1062        sync_port = (etr_port1_uptodate &&
1063                 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1064    } else {
1065        /* Both ports not usable. */
1066        eacr.es = eacr.sl = 0;
1067        sync_port = -1;
1068    }
1069
1070    /*
1071     * If the clock is in sync just update the eacr and return.
1072     * If there is no valid sync port wait for a port update.
1073     */
1074    if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1075        etr_update_eacr(eacr);
1076        etr_set_tolec_timeout(now);
1077        goto out_unlock;
1078    }
1079
1080    /*
1081     * Prepare control register for clock syncing
1082     * (reset data port bit, set sync check control.
1083     */
1084    eacr.dp = 0;
1085    eacr.es = 1;
1086
1087    /*
1088     * Update eacr and try to synchronize the clock. If the update
1089     * of eacr caused a stepping port switch (or if we have to
1090     * assume that a stepping port switch has occured) or the
1091     * clock syncing failed, reset the sync check control bit
1092     * and set up a timer to try again after 0.5 seconds
1093     */
1094    etr_update_eacr(eacr);
1095    if (now < etr_tolec + (1600000 << 12) ||
1096        etr_sync_clock_stop(&aib, sync_port) != 0) {
1097        /* Sync failed. Try again in 1/2 second. */
1098        eacr.es = 0;
1099        etr_update_eacr(eacr);
1100        etr_set_sync_timeout();
1101    } else
1102        etr_set_tolec_timeout(now);
1103out_unlock:
1104    mutex_unlock(&etr_work_mutex);
1105}
1106
1107/*
1108 * Sysfs interface functions
1109 */
1110static struct sysdev_class etr_sysclass = {
1111    .name = "etr",
1112};
1113
1114static struct sys_device etr_port0_dev = {
1115    .id = 0,
1116    .cls = &etr_sysclass,
1117};
1118
1119static struct sys_device etr_port1_dev = {
1120    .id = 1,
1121    .cls = &etr_sysclass,
1122};
1123
1124/*
1125 * ETR class attributes
1126 */
1127static ssize_t etr_stepping_port_show(struct sysdev_class *class,
1128                    struct sysdev_class_attribute *attr,
1129                    char *buf)
1130{
1131    return sprintf(buf, "%i\n", etr_port0.esw.p);
1132}
1133
1134static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1135
1136static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
1137                          struct sysdev_class_attribute *attr,
1138                    char *buf)
1139{
1140    char *mode_str;
1141
1142    if (etr_mode_is_pps(etr_eacr))
1143        mode_str = "pps";
1144    else if (etr_mode_is_etr(etr_eacr))
1145        mode_str = "etr";
1146    else
1147        mode_str = "local";
1148    return sprintf(buf, "%s\n", mode_str);
1149}
1150
1151static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1152
1153/*
1154 * ETR port attributes
1155 */
1156static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1157{
1158    if (dev == &etr_port0_dev)
1159        return etr_port0_online ? &etr_port0 : NULL;
1160    else
1161        return etr_port1_online ? &etr_port1 : NULL;
1162}
1163
1164static ssize_t etr_online_show(struct sys_device *dev,
1165                struct sysdev_attribute *attr,
1166                char *buf)
1167{
1168    unsigned int online;
1169
1170    online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1171    return sprintf(buf, "%i\n", online);
1172}
1173
1174static ssize_t etr_online_store(struct sys_device *dev,
1175                struct sysdev_attribute *attr,
1176                const char *buf, size_t count)
1177{
1178    unsigned int value;
1179
1180    value = simple_strtoul(buf, NULL, 0);
1181    if (value != 0 && value != 1)
1182        return -EINVAL;
1183    if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1184        return -EOPNOTSUPP;
1185    mutex_lock(&clock_sync_mutex);
1186    if (dev == &etr_port0_dev) {
1187        if (etr_port0_online == value)
1188            goto out; /* Nothing to do. */
1189        etr_port0_online = value;
1190        if (etr_port0_online && etr_port1_online)
1191            set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1192        else
1193            clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1194        set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1195        queue_work(time_sync_wq, &etr_work);
1196    } else {
1197        if (etr_port1_online == value)
1198            goto out; /* Nothing to do. */
1199        etr_port1_online = value;
1200        if (etr_port0_online && etr_port1_online)
1201            set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1202        else
1203            clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1204        set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1205        queue_work(time_sync_wq, &etr_work);
1206    }
1207out:
1208    mutex_unlock(&clock_sync_mutex);
1209    return count;
1210}
1211
1212static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1213
1214static ssize_t etr_stepping_control_show(struct sys_device *dev,
1215                    struct sysdev_attribute *attr,
1216                    char *buf)
1217{
1218    return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1219               etr_eacr.e0 : etr_eacr.e1);
1220}
1221
1222static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1223
1224static ssize_t etr_mode_code_show(struct sys_device *dev,
1225                struct sysdev_attribute *attr, char *buf)
1226{
1227    if (!etr_port0_online && !etr_port1_online)
1228        /* Status word is not uptodate if both ports are offline. */
1229        return -ENODATA;
1230    return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231               etr_port0.esw.psc0 : etr_port0.esw.psc1);
1232}
1233
1234static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1235
1236static ssize_t etr_untuned_show(struct sys_device *dev,
1237                struct sysdev_attribute *attr, char *buf)
1238{
1239    struct etr_aib *aib = etr_aib_from_dev(dev);
1240
1241    if (!aib || !aib->slsw.v1)
1242        return -ENODATA;
1243    return sprintf(buf, "%i\n", aib->edf1.u);
1244}
1245
1246static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1247
1248static ssize_t etr_network_id_show(struct sys_device *dev,
1249                struct sysdev_attribute *attr, char *buf)
1250{
1251    struct etr_aib *aib = etr_aib_from_dev(dev);
1252
1253    if (!aib || !aib->slsw.v1)
1254        return -ENODATA;
1255    return sprintf(buf, "%i\n", aib->edf1.net_id);
1256}
1257
1258static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1259
1260static ssize_t etr_id_show(struct sys_device *dev,
1261            struct sysdev_attribute *attr, char *buf)
1262{
1263    struct etr_aib *aib = etr_aib_from_dev(dev);
1264
1265    if (!aib || !aib->slsw.v1)
1266        return -ENODATA;
1267    return sprintf(buf, "%i\n", aib->edf1.etr_id);
1268}
1269
1270static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1271
1272static ssize_t etr_port_number_show(struct sys_device *dev,
1273            struct sysdev_attribute *attr, char *buf)
1274{
1275    struct etr_aib *aib = etr_aib_from_dev(dev);
1276
1277    if (!aib || !aib->slsw.v1)
1278        return -ENODATA;
1279    return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1280}
1281
1282static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1283
1284static ssize_t etr_coupled_show(struct sys_device *dev,
1285            struct sysdev_attribute *attr, char *buf)
1286{
1287    struct etr_aib *aib = etr_aib_from_dev(dev);
1288
1289    if (!aib || !aib->slsw.v3)
1290        return -ENODATA;
1291    return sprintf(buf, "%i\n", aib->edf3.c);
1292}
1293
1294static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1295
1296static ssize_t etr_local_time_show(struct sys_device *dev,
1297            struct sysdev_attribute *attr, char *buf)
1298{
1299    struct etr_aib *aib = etr_aib_from_dev(dev);
1300
1301    if (!aib || !aib->slsw.v3)
1302        return -ENODATA;
1303    return sprintf(buf, "%i\n", aib->edf3.blto);
1304}
1305
1306static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1307
1308static ssize_t etr_utc_offset_show(struct sys_device *dev,
1309            struct sysdev_attribute *attr, char *buf)
1310{
1311    struct etr_aib *aib = etr_aib_from_dev(dev);
1312
1313    if (!aib || !aib->slsw.v3)
1314        return -ENODATA;
1315    return sprintf(buf, "%i\n", aib->edf3.buo);
1316}
1317
1318static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1319
1320static struct sysdev_attribute *etr_port_attributes[] = {
1321    &attr_online,
1322    &attr_stepping_control,
1323    &attr_state_code,
1324    &attr_untuned,
1325    &attr_network,
1326    &attr_id,
1327    &attr_port,
1328    &attr_coupled,
1329    &attr_local_time,
1330    &attr_utc_offset,
1331    NULL
1332};
1333
1334static int __init etr_register_port(struct sys_device *dev)
1335{
1336    struct sysdev_attribute **attr;
1337    int rc;
1338
1339    rc = sysdev_register(dev);
1340    if (rc)
1341        goto out;
1342    for (attr = etr_port_attributes; *attr; attr++) {
1343        rc = sysdev_create_file(dev, *attr);
1344        if (rc)
1345            goto out_unreg;
1346    }
1347    return 0;
1348out_unreg:
1349    for (; attr >= etr_port_attributes; attr--)
1350        sysdev_remove_file(dev, *attr);
1351    sysdev_unregister(dev);
1352out:
1353    return rc;
1354}
1355
1356static void __init etr_unregister_port(struct sys_device *dev)
1357{
1358    struct sysdev_attribute **attr;
1359
1360    for (attr = etr_port_attributes; *attr; attr++)
1361        sysdev_remove_file(dev, *attr);
1362    sysdev_unregister(dev);
1363}
1364
1365static int __init etr_init_sysfs(void)
1366{
1367    int rc;
1368
1369    rc = sysdev_class_register(&etr_sysclass);
1370    if (rc)
1371        goto out;
1372    rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1373    if (rc)
1374        goto out_unreg_class;
1375    rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1376    if (rc)
1377        goto out_remove_stepping_port;
1378    rc = etr_register_port(&etr_port0_dev);
1379    if (rc)
1380        goto out_remove_stepping_mode;
1381    rc = etr_register_port(&etr_port1_dev);
1382    if (rc)
1383        goto out_remove_port0;
1384    return 0;
1385
1386out_remove_port0:
1387    etr_unregister_port(&etr_port0_dev);
1388out_remove_stepping_mode:
1389    sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1390out_remove_stepping_port:
1391    sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1392out_unreg_class:
1393    sysdev_class_unregister(&etr_sysclass);
1394out:
1395    return rc;
1396}
1397
1398device_initcall(etr_init_sysfs);
1399
1400/*
1401 * Server Time Protocol (STP) code.
1402 */
1403static int stp_online;
1404static struct stp_sstpi stp_info;
1405static void *stp_page;
1406
1407static void stp_work_fn(struct work_struct *work);
1408static DEFINE_MUTEX(stp_work_mutex);
1409static DECLARE_WORK(stp_work, stp_work_fn);
1410static struct timer_list stp_timer;
1411
1412static int __init early_parse_stp(char *p)
1413{
1414    if (strncmp(p, "off", 3) == 0)
1415        stp_online = 0;
1416    else if (strncmp(p, "on", 2) == 0)
1417        stp_online = 1;
1418    return 0;
1419}
1420early_param("stp", early_parse_stp);
1421
1422/*
1423 * Reset STP attachment.
1424 */
1425static void __init stp_reset(void)
1426{
1427    int rc;
1428
1429    stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
1430    rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1431    if (rc == 0)
1432        set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1433    else if (stp_online) {
1434        pr_warning("The real or virtual hardware system does "
1435               "not provide an STP interface\n");
1436        free_page((unsigned long) stp_page);
1437        stp_page = NULL;
1438        stp_online = 0;
1439    }
1440}
1441
1442static void stp_timeout(unsigned long dummy)
1443{
1444    queue_work(time_sync_wq, &stp_work);
1445}
1446
1447static int __init stp_init(void)
1448{
1449    if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1450        return 0;
1451    setup_timer(&stp_timer, stp_timeout, 0UL);
1452    time_init_wq();
1453    if (!stp_online)
1454        return 0;
1455    queue_work(time_sync_wq, &stp_work);
1456    return 0;
1457}
1458
1459arch_initcall(stp_init);
1460
1461/*
1462 * STP timing alert. There are three causes:
1463 * 1) timing status change
1464 * 2) link availability change
1465 * 3) time control parameter change
1466 * In all three cases we are only interested in the clock source state.
1467 * If a STP clock source is now available use it.
1468 */
1469static void stp_timing_alert(struct stp_irq_parm *intparm)
1470{
1471    if (intparm->tsc || intparm->lac || intparm->tcpc)
1472        queue_work(time_sync_wq, &stp_work);
1473}
1474
1475/*
1476 * STP sync check machine check. This is called when the timing state
1477 * changes from the synchronized state to the unsynchronized state.
1478 * After a STP sync check the clock is not in sync. The machine check
1479 * is broadcasted to all cpus at the same time.
1480 */
1481void stp_sync_check(void)
1482{
1483    disable_sync_clock(NULL);
1484    queue_work(time_sync_wq, &stp_work);
1485}
1486
1487/*
1488 * STP island condition machine check. This is called when an attached
1489 * server attempts to communicate over an STP link and the servers
1490 * have matching CTN ids and have a valid stratum-1 configuration
1491 * but the configurations do not match.
1492 */
1493void stp_island_check(void)
1494{
1495    disable_sync_clock(NULL);
1496    queue_work(time_sync_wq, &stp_work);
1497}
1498
1499
1500static int stp_sync_clock(void *data)
1501{
1502    static int first;
1503    unsigned long long old_clock, delta;
1504    struct clock_sync_data *stp_sync;
1505    int rc;
1506
1507    stp_sync = data;
1508
1509    if (xchg(&first, 1) == 1) {
1510        /* Slave */
1511        clock_sync_cpu(stp_sync);
1512        return 0;
1513    }
1514
1515    /* Wait until all other cpus entered the sync function. */
1516    while (atomic_read(&stp_sync->cpus) != 0)
1517        cpu_relax();
1518
1519    enable_sync_clock();
1520
1521    rc = 0;
1522    if (stp_info.todoff[0] || stp_info.todoff[1] ||
1523        stp_info.todoff[2] || stp_info.todoff[3] ||
1524        stp_info.tmd != 2) {
1525        old_clock = get_clock();
1526        rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1527        if (rc == 0) {
1528            delta = adjust_time(old_clock, get_clock(), 0);
1529            fixup_clock_comparator(delta);
1530            rc = chsc_sstpi(stp_page, &stp_info,
1531                    sizeof(struct stp_sstpi));
1532            if (rc == 0 && stp_info.tmd != 2)
1533                rc = -EAGAIN;
1534        }
1535    }
1536    if (rc) {
1537        disable_sync_clock(NULL);
1538        stp_sync->in_sync = -EAGAIN;
1539    } else
1540        stp_sync->in_sync = 1;
1541    xchg(&first, 0);
1542    return 0;
1543}
1544
1545/*
1546 * STP work. Check for the STP state and take over the clock
1547 * synchronization if the STP clock source is usable.
1548 */
1549static void stp_work_fn(struct work_struct *work)
1550{
1551    struct clock_sync_data stp_sync;
1552    int rc;
1553
1554    /* prevent multiple execution. */
1555    mutex_lock(&stp_work_mutex);
1556
1557    if (!stp_online) {
1558        chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1559        del_timer_sync(&stp_timer);
1560        goto out_unlock;
1561    }
1562
1563    rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1564    if (rc)
1565        goto out_unlock;
1566
1567    rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1568    if (rc || stp_info.c == 0)
1569        goto out_unlock;
1570
1571    /* Skip synchronization if the clock is already in sync. */
1572    if (check_sync_clock())
1573        goto out_unlock;
1574
1575    memset(&stp_sync, 0, sizeof(stp_sync));
1576    get_online_cpus();
1577    atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1578    stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
1579    put_online_cpus();
1580
1581    if (!check_sync_clock())
1582        /*
1583         * There is a usable clock but the synchonization failed.
1584         * Retry after a second.
1585         */
1586        mod_timer(&stp_timer, jiffies + HZ);
1587
1588out_unlock:
1589    mutex_unlock(&stp_work_mutex);
1590}
1591
1592/*
1593 * STP class sysfs interface functions
1594 */
1595static struct sysdev_class stp_sysclass = {
1596    .name = "stp",
1597};
1598
1599static ssize_t stp_ctn_id_show(struct sysdev_class *class,
1600                struct sysdev_class_attribute *attr,
1601                char *buf)
1602{
1603    if (!stp_online)
1604        return -ENODATA;
1605    return sprintf(buf, "%016llx\n",
1606               *(unsigned long long *) stp_info.ctnid);
1607}
1608
1609static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1610
1611static ssize_t stp_ctn_type_show(struct sysdev_class *class,
1612                struct sysdev_class_attribute *attr,
1613                char *buf)
1614{
1615    if (!stp_online)
1616        return -ENODATA;
1617    return sprintf(buf, "%i\n", stp_info.ctn);
1618}
1619
1620static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1621
1622static ssize_t stp_dst_offset_show(struct sysdev_class *class,
1623                   struct sysdev_class_attribute *attr,
1624                   char *buf)
1625{
1626    if (!stp_online || !(stp_info.vbits & 0x2000))
1627        return -ENODATA;
1628    return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1629}
1630
1631static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1632
1633static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
1634                    struct sysdev_class_attribute *attr,
1635                    char *buf)
1636{
1637    if (!stp_online || !(stp_info.vbits & 0x8000))
1638        return -ENODATA;
1639    return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1640}
1641
1642static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1643
1644static ssize_t stp_stratum_show(struct sysdev_class *class,
1645                struct sysdev_class_attribute *attr,
1646                char *buf)
1647{
1648    if (!stp_online)
1649        return -ENODATA;
1650    return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1651}
1652
1653static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1654
1655static ssize_t stp_time_offset_show(struct sysdev_class *class,
1656                struct sysdev_class_attribute *attr,
1657                char *buf)
1658{
1659    if (!stp_online || !(stp_info.vbits & 0x0800))
1660        return -ENODATA;
1661    return sprintf(buf, "%i\n", (int) stp_info.tto);
1662}
1663
1664static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1665
1666static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
1667                struct sysdev_class_attribute *attr,
1668                char *buf)
1669{
1670    if (!stp_online || !(stp_info.vbits & 0x4000))
1671        return -ENODATA;
1672    return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1673}
1674
1675static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1676             stp_time_zone_offset_show, NULL);
1677
1678static ssize_t stp_timing_mode_show(struct sysdev_class *class,
1679                struct sysdev_class_attribute *attr,
1680                char *buf)
1681{
1682    if (!stp_online)
1683        return -ENODATA;
1684    return sprintf(buf, "%i\n", stp_info.tmd);
1685}
1686
1687static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1688
1689static ssize_t stp_timing_state_show(struct sysdev_class *class,
1690                struct sysdev_class_attribute *attr,
1691                char *buf)
1692{
1693    if (!stp_online)
1694        return -ENODATA;
1695    return sprintf(buf, "%i\n", stp_info.tst);
1696}
1697
1698static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1699
1700static ssize_t stp_online_show(struct sysdev_class *class,
1701                struct sysdev_class_attribute *attr,
1702                char *buf)
1703{
1704    return sprintf(buf, "%i\n", stp_online);
1705}
1706
1707static ssize_t stp_online_store(struct sysdev_class *class,
1708                struct sysdev_class_attribute *attr,
1709                const char *buf, size_t count)
1710{
1711    unsigned int value;
1712
1713    value = simple_strtoul(buf, NULL, 0);
1714    if (value != 0 && value != 1)
1715        return -EINVAL;
1716    if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1717        return -EOPNOTSUPP;
1718    mutex_lock(&clock_sync_mutex);
1719    stp_online = value;
1720    if (stp_online)
1721        set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1722    else
1723        clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1724    queue_work(time_sync_wq, &stp_work);
1725    mutex_unlock(&clock_sync_mutex);
1726    return count;
1727}
1728
1729/*
1730 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1731 * stp/online but attr_online already exists in this file ..
1732 */
1733static struct sysdev_class_attribute attr_stp_online = {
1734    .attr = { .name = "online", .mode = 0600 },
1735    .show = stp_online_show,
1736    .store = stp_online_store,
1737};
1738
1739static struct sysdev_class_attribute *stp_attributes[] = {
1740    &attr_ctn_id,
1741    &attr_ctn_type,
1742    &attr_dst_offset,
1743    &attr_leap_seconds,
1744    &attr_stp_online,
1745    &attr_stratum,
1746    &attr_time_offset,
1747    &attr_time_zone_offset,
1748    &attr_timing_mode,
1749    &attr_timing_state,
1750    NULL
1751};
1752
1753static int __init stp_init_sysfs(void)
1754{
1755    struct sysdev_class_attribute **attr;
1756    int rc;
1757
1758    rc = sysdev_class_register(&stp_sysclass);
1759    if (rc)
1760        goto out;
1761    for (attr = stp_attributes; *attr; attr++) {
1762        rc = sysdev_class_create_file(&stp_sysclass, *attr);
1763        if (rc)
1764            goto out_unreg;
1765    }
1766    return 0;
1767out_unreg:
1768    for (; attr >= stp_attributes; attr--)
1769        sysdev_class_remove_file(&stp_sysclass, *attr);
1770    sysdev_class_unregister(&stp_sysclass);
1771out:
1772    return rc;
1773}
1774
1775device_initcall(stp_init_sysfs);
1776

Archive Download this file



interactive