Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* |
Hans-Christian Egtvedt | 7760989 | 2007-03-12 18:15:16 +0100 | [diff] [blame] | 2 | * Copyright (C) 2004-2007 Atmel Corporation |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 3 | * |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 8 | #include <linux/clk.h> |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 9 | #include <linux/clockchips.h> |
Haavard Skinnemoen | 9dbef28 | 2008-05-28 13:07:40 +0200 | [diff] [blame] | 10 | #include <linux/init.h> |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/irq.h> |
Haavard Skinnemoen | 9dbef28 | 2008-05-28 13:07:40 +0200 | [diff] [blame] | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/time.h> |
Thomas Gleixner | 0142647 | 2013-03-21 22:49:40 +0100 | [diff] [blame] | 15 | #include <linux/cpu.h> |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 16 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 17 | #include <asm/sysreg.h> |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 18 | |
Haavard Skinnemoen | 3663b73 | 2008-08-05 13:57:38 +0200 | [diff] [blame] | 19 | #include <mach/pm.h> |
Hans-Christian Egtvedt | 7760989 | 2007-03-12 18:15:16 +0100 | [diff] [blame] | 20 | |
Hans-Christian Egtvedt | 7760989 | 2007-03-12 18:15:16 +0100 | [diff] [blame] | 21 | |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 22 | static cycle_t read_cycle_count(struct clocksource *cs) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 23 | { |
| 24 | return (cycle_t)sysreg_read(COUNT); |
| 25 | } |
| 26 | |
David Brownell | 62c6df6 | 2008-02-12 14:45:49 -0800 | [diff] [blame] | 27 | /* |
| 28 | * The architectural cycle count registers are a fine clocksource unless |
| 29 | * the system idle loop use sleep states like "idle": the CPU cycles |
| 30 | * measured by COUNT (and COMPARE) don't happen during sleep states. |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 31 | * Their duration also changes if cpufreq changes the CPU clock rate. |
David Brownell | 62c6df6 | 2008-02-12 14:45:49 -0800 | [diff] [blame] | 32 | * So we rate the clocksource using COUNT as very low quality. |
| 33 | */ |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 34 | static struct clocksource counter = { |
| 35 | .name = "avr32_counter", |
David Brownell | 62c6df6 | 2008-02-12 14:45:49 -0800 | [diff] [blame] | 36 | .rating = 50, |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 37 | .read = read_cycle_count, |
| 38 | .mask = CLOCKSOURCE_MASK(32), |
Thomas Gleixner | 2693506 | 2007-02-16 01:27:38 -0800 | [diff] [blame] | 39 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 40 | }; |
| 41 | |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 42 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 43 | { |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 44 | struct clock_event_device *evdev = dev_id; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 45 | |
Voss, Nikolaus | 56d3eef | 2008-07-18 14:44:48 +0200 | [diff] [blame] | 46 | if (unlikely(!(intc_get_pending(0) & 1))) |
| 47 | return IRQ_NONE; |
| 48 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 49 | /* |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 50 | * Disable the interrupt until the clockevent subsystem |
| 51 | * reprograms it. |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 52 | */ |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 53 | sysreg_write(COMPARE, 0); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 54 | |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 55 | evdev->event_handler(evdev); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 56 | return IRQ_HANDLED; |
| 57 | } |
| 58 | |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 59 | static struct irqaction timer_irqaction = { |
| 60 | .handler = timer_interrupt, |
Voss, Nikolaus | 56d3eef | 2008-07-18 14:44:48 +0200 | [diff] [blame] | 61 | /* Oprofile uses the same irq as the timer, so allow it to be shared */ |
| 62 | .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED, |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 63 | .name = "avr32_comparator", |
| 64 | }; |
| 65 | |
| 66 | static int comparator_next_event(unsigned long delta, |
| 67 | struct clock_event_device *evdev) |
| 68 | { |
| 69 | unsigned long flags; |
| 70 | |
| 71 | raw_local_irq_save(flags); |
| 72 | |
| 73 | /* The time to read COUNT then update COMPARE must be less |
| 74 | * than the min_delta_ns value for this clockevent source. |
| 75 | */ |
| 76 | sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1); |
| 77 | |
| 78 | raw_local_irq_restore(flags); |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | static void comparator_mode(enum clock_event_mode mode, |
| 84 | struct clock_event_device *evdev) |
| 85 | { |
| 86 | switch (mode) { |
| 87 | case CLOCK_EVT_MODE_ONESHOT: |
| 88 | pr_debug("%s: start\n", evdev->name); |
| 89 | /* FALLTHROUGH */ |
| 90 | case CLOCK_EVT_MODE_RESUME: |
Thomas Gleixner | 0142647 | 2013-03-21 22:49:40 +0100 | [diff] [blame] | 91 | /* |
| 92 | * If we're using the COUNT and COMPARE registers we |
| 93 | * need to force idle poll. |
| 94 | */ |
| 95 | cpu_idle_poll_ctrl(true); |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 96 | break; |
| 97 | case CLOCK_EVT_MODE_UNUSED: |
| 98 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 99 | sysreg_write(COMPARE, 0); |
| 100 | pr_debug("%s: stop\n", evdev->name); |
Thomas Gleixner | 0142647 | 2013-03-21 22:49:40 +0100 | [diff] [blame] | 101 | cpu_idle_poll_ctrl(false); |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 102 | break; |
| 103 | default: |
| 104 | BUG(); |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | static struct clock_event_device comparator = { |
| 109 | .name = "avr32_comparator", |
| 110 | .features = CLOCK_EVT_FEAT_ONESHOT, |
| 111 | .shift = 16, |
| 112 | .rating = 50, |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 113 | .set_next_event = comparator_next_event, |
| 114 | .set_mode = comparator_mode, |
| 115 | }; |
| 116 | |
John Stultz | e2032a4 | 2010-03-03 19:57:21 -0800 | [diff] [blame] | 117 | void read_persistent_clock(struct timespec *ts) |
| 118 | { |
Peter Huewe | e9ddbc0 | 2010-04-27 15:23:01 +0200 | [diff] [blame] | 119 | ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0); |
John Stultz | e2032a4 | 2010-03-03 19:57:21 -0800 | [diff] [blame] | 120 | ts->tv_nsec = 0; |
| 121 | } |
| 122 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 123 | void __init time_init(void) |
| 124 | { |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 125 | unsigned long counter_hz; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 126 | int ret; |
| 127 | |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 128 | /* figure rate for counter */ |
| 129 | counter_hz = clk_get_rate(boot_cpu_data.clk); |
John Stultz | 1e2de47 | 2010-11-01 13:12:27 -0700 | [diff] [blame] | 130 | ret = clocksource_register_hz(&counter, counter_hz); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 131 | if (ret) |
Hans-Christian Egtvedt | 7760989 | 2007-03-12 18:15:16 +0100 | [diff] [blame] | 132 | pr_debug("timer: could not register clocksource: %d\n", ret); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 133 | |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 134 | /* setup COMPARE clockevent */ |
| 135 | comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); |
| 136 | comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); |
| 137 | comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 138 | comparator.cpumask = cpumask_of(0); |
David Brownell | e723ff6 | 2008-02-14 11:24:02 -0800 | [diff] [blame] | 139 | |
| 140 | sysreg_write(COMPARE, 0); |
| 141 | timer_irqaction.dev_id = &comparator; |
| 142 | |
| 143 | ret = setup_irq(0, &timer_irqaction); |
| 144 | if (ret) |
| 145 | pr_debug("timer: could not request IRQ 0: %d\n", ret); |
| 146 | else { |
| 147 | clockevents_register_device(&comparator); |
| 148 | |
| 149 | pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, |
| 150 | ((counter_hz + 500) / 1000) / 1000, |
| 151 | ((counter_hz + 500) / 1000) % 1000); |
Hans-Christian Egtvedt | 7760989 | 2007-03-12 18:15:16 +0100 | [diff] [blame] | 152 | } |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 153 | } |