timer-imx-gpt.c 13.5 KB
Newer Older
1 2 3 4 5 6
// SPDX-License-Identifier: GPL-2.0+
//
//  Copyright (C) 2000-2001 Deep Blue Solutions
//  Copyright (C) 2002 Shane Nay (shane@minirl.com)
//  Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
//  Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
7 8 9 10 11

#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
12
#include <linux/delay.h>
13
#include <linux/err.h>
14
#include <linux/sched_clock.h>
15
#include <linux/slab.h>
16 17 18
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
19
#include <soc/imx/timer.h>
20

21
/*
22 23 24 25 26
 * There are 4 versions of the timer hardware on Freescale MXC hardware.
 *  - MX1/MXL
 *  - MX21, MX27.
 *  - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
 *  - MX6DL, MX6SX, MX6Q(rev1.1+)
27 28
 */

29 30
/* defines common for all i.MX */
#define MXC_TCTL		0x00
31
#define MXC_TCTL_TEN		(1 << 0) /* Enable module */
32 33 34 35 36 37 38 39 40 41 42 43 44 45
#define MXC_TPRER		0x04

/* MX1, MX21, MX27 */
#define MX1_2_TCTL_CLK_PCLK1	(1 << 1)
#define MX1_2_TCTL_IRQEN	(1 << 4)
#define MX1_2_TCTL_FRR		(1 << 8)
#define MX1_2_TCMP		0x08
#define MX1_2_TCN		0x10
#define MX1_2_TSTAT		0x14

/* MX21, MX27 */
#define MX2_TSTAT_CAPT		(1 << 1)
#define MX2_TSTAT_COMP		(1 << 0)

46
/* MX31, MX35, MX25, MX5, MX6 */
47 48
#define V2_TCTL_WAITEN		(1 << 3) /* Wait enable mode */
#define V2_TCTL_CLK_IPG		(1 << 6)
49
#define V2_TCTL_CLK_PER		(2 << 6)
50
#define V2_TCTL_CLK_OSC_DIV8	(5 << 6)
51
#define V2_TCTL_FRR		(1 << 9)
52 53
#define V2_TCTL_24MEN		(1 << 10)
#define V2_TPRER_PRE24M		12
54 55 56 57 58
#define V2_IR			0x0c
#define V2_TSTAT		0x08
#define V2_TSTAT_OF1		(1 << 0)
#define V2_TCN			0x24
#define V2_TCMP			0x10
59

60 61
#define V2_TIMER_RATE_OSC_DIV8	3000000

62
struct imx_timer {
63
	enum imx_gpt_type type;
64 65 66 67
	void __iomem *base;
	int irq;
	struct clk *clk_per;
	struct clk *clk_ipg;
68
	const struct imx_gpt_data *gpt;
69 70
	struct clock_event_device ced;
	struct irqaction act;
71 72 73
};

struct imx_gpt_data {
74 75 76
	int reg_tstat;
	int reg_tcn;
	int reg_tcmp;
77
	void (*gpt_setup_tctl)(struct imx_timer *imxtm);
78 79 80
	void (*gpt_irq_enable)(struct imx_timer *imxtm);
	void (*gpt_irq_disable)(struct imx_timer *imxtm);
	void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
81 82
	int (*set_next_event)(unsigned long evt,
			      struct clock_event_device *ced);
83 84
};

85 86 87 88 89
static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
{
	return container_of(ced, struct imx_timer, ced);
}

90
static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
91
{
92 93
	unsigned int tmp;

94 95
	tmp = readl_relaxed(imxtm->base + MXC_TCTL);
	writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
96
}
97
#define imx21_gpt_irq_disable imx1_gpt_irq_disable
98

99
static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
100
{
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	writel_relaxed(0, imxtm->base + V2_IR);
}
#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable

static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
{
	unsigned int tmp;

	tmp = readl_relaxed(imxtm->base + MXC_TCTL);
	writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
}
#define imx21_gpt_irq_enable imx1_gpt_irq_enable

static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
{
	writel_relaxed(1<<0, imxtm->base + V2_IR);
}
#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable

static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
	writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
123 124
}

125
static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
126
{
127
	writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
128
				imxtm->base + MX1_2_TSTAT);
129 130
}

131 132 133 134 135 136
static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
	writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
}
#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge

137
static void __iomem *sched_clock_reg;
138

139
static u64 notrace mxc_read_sched_clock(void)
140
{
141
	return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
142 143
}

144
#if defined(CONFIG_ARM)
145 146 147 148
static struct delay_timer imx_delay_timer;

static unsigned long imx_read_current_timer(void)
{
149
	return readl_relaxed(sched_clock_reg);
150
}
151
#endif
152

153
static int __init mxc_clocksource_init(struct imx_timer *imxtm)
154
{
155
	unsigned int c = clk_get_rate(imxtm->clk_per);
156
	void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
157

158
#if defined(CONFIG_ARM)
159 160 161
	imx_delay_timer.read_current_timer = &imx_read_current_timer;
	imx_delay_timer.freq = c;
	register_current_timer_delay(&imx_delay_timer);
162
#endif
163

164
	sched_clock_reg = reg;
165

166
	sched_clock_register(mxc_read_sched_clock, 32, c);
167 168
	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
			clocksource_mmio_readl_up);
169 170 171 172
}

/* clock event */

173
static int mx1_2_set_next_event(unsigned long evt,
174
			      struct clock_event_device *ced)
175
{
176
	struct imx_timer *imxtm = to_imx_timer(ced);
177 178
	unsigned long tcmp;

179
	tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
180

181
	writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
182

183
	return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
184 185 186
				-ETIME : 0;
}

187
static int v2_set_next_event(unsigned long evt,
188
			      struct clock_event_device *ced)
189
{
190
	struct imx_timer *imxtm = to_imx_timer(ced);
191 192
	unsigned long tcmp;

193
	tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
194

195
	writel_relaxed(tcmp, imxtm->base + V2_TCMP);
196

197
	return evt < 0x7fffffff &&
198
		(int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
199 200 201
				-ETIME : 0;
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static int mxc_shutdown(struct clock_event_device *ced)
{
	struct imx_timer *imxtm = to_imx_timer(ced);
	u32 tcn;

	/* Disable interrupt in GPT module */
	imxtm->gpt->gpt_irq_disable(imxtm);

	tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
	/* Set event time into far-far future */
	writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);

	/* Clear pending interrupt */
	imxtm->gpt->gpt_irq_acknowledge(imxtm);

217
#ifdef DEBUG
218
	printk(KERN_INFO "%s: changing mode\n", __func__);
219 220
#endif /* DEBUG */

221 222 223 224
	return 0;
}

static int mxc_set_oneshot(struct clock_event_device *ced)
225
{
226
	struct imx_timer *imxtm = to_imx_timer(ced);
227 228

	/* Disable interrupt in GPT module */
229
	imxtm->gpt->gpt_irq_disable(imxtm);
230

231
	if (!clockevent_state_oneshot(ced)) {
232
		u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
233
		/* Set event time into far-far future */
234
		writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
235

236
		/* Clear pending interrupt */
237
		imxtm->gpt->gpt_irq_acknowledge(imxtm);
238 239 240
	}

#ifdef DEBUG
241
	printk(KERN_INFO "%s: changing mode\n", __func__);
242 243 244 245 246 247 248 249
#endif /* DEBUG */

	/*
	 * Do not put overhead of interrupt enable/disable into
	 * mxc_set_next_event(), the core has about 4 minutes
	 * to call mxc_set_next_event() or shutdown clock after
	 * mode switching
	 */
250 251 252
	imxtm->gpt->gpt_irq_enable(imxtm);

	return 0;
253 254 255 256 257 258 259
}

/*
 * IRQ handler for the timer
 */
static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
260
	struct clock_event_device *ced = dev_id;
261
	struct imx_timer *imxtm = to_imx_timer(ced);
262 263
	uint32_t tstat;

264
	tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
265

266
	imxtm->gpt->gpt_irq_acknowledge(imxtm);
267

268
	ced->event_handler(ced);
269 270 271 272

	return IRQ_HANDLED;
}

273
static int __init mxc_clockevent_init(struct imx_timer *imxtm)
274
{
275 276 277 278
	struct clock_event_device *ced = &imxtm->ced;
	struct irqaction *act = &imxtm->act;

	ced->name = "mxc_timer1";
279
	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
280 281 282
	ced->set_state_shutdown = mxc_shutdown;
	ced->set_state_oneshot = mxc_set_oneshot;
	ced->tick_resume = mxc_shutdown;
283 284 285
	ced->set_next_event = imxtm->gpt->set_next_event;
	ced->rating = 200;
	ced->cpumask = cpumask_of(0);
286
	ced->irq = imxtm->irq;
287
	clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
288
					0xff, 0xfffffffe);
289

290 291 292 293 294 295
	act->name = "i.MX Timer Tick";
	act->flags = IRQF_TIMER | IRQF_IRQPOLL;
	act->handler = mxc_timer_interrupt;
	act->dev_id = ced;

	return setup_irq(imxtm->irq, act);
296 297
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
{
	u32 tctl_val;

	tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl

static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
{
	u32 tctl_val;

	tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
	if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
		tctl_val |= V2_TCTL_CLK_OSC_DIV8;
	else
		tctl_val |= V2_TCTL_CLK_PER;

	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}

static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
321
{
322 323 324 325 326 327 328 329 330 331 332 333 334 335
	u32 tctl_val;

	tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
	if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
		tctl_val |= V2_TCTL_CLK_OSC_DIV8;
		/* 24 / 8 = 3 MHz */
		writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
		tctl_val |= V2_TCTL_24MEN;
	} else {
		tctl_val |= V2_TCTL_CLK_PER;
	}

	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
336

337
static const struct imx_gpt_data imx1_gpt_data = {
338 339 340
	.reg_tstat = MX1_2_TSTAT,
	.reg_tcn = MX1_2_TCN,
	.reg_tcmp = MX1_2_TCMP,
341 342 343
	.gpt_irq_enable = imx1_gpt_irq_enable,
	.gpt_irq_disable = imx1_gpt_irq_disable,
	.gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
344
	.gpt_setup_tctl = imx1_gpt_setup_tctl,
345
	.set_next_event = mx1_2_set_next_event,
346 347 348
};

static const struct imx_gpt_data imx21_gpt_data = {
349 350 351
	.reg_tstat = MX1_2_TSTAT,
	.reg_tcn = MX1_2_TCN,
	.reg_tcmp = MX1_2_TCMP,
352 353 354
	.gpt_irq_enable = imx21_gpt_irq_enable,
	.gpt_irq_disable = imx21_gpt_irq_disable,
	.gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
355
	.gpt_setup_tctl = imx21_gpt_setup_tctl,
356
	.set_next_event = mx1_2_set_next_event,
357 358 359
};

static const struct imx_gpt_data imx31_gpt_data = {
360 361 362
	.reg_tstat = V2_TSTAT,
	.reg_tcn = V2_TCN,
	.reg_tcmp = V2_TCMP,
363 364 365
	.gpt_irq_enable = imx31_gpt_irq_enable,
	.gpt_irq_disable = imx31_gpt_irq_disable,
	.gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
366
	.gpt_setup_tctl = imx31_gpt_setup_tctl,
367
	.set_next_event = v2_set_next_event,
368 369 370
};

static const struct imx_gpt_data imx6dl_gpt_data = {
371 372 373
	.reg_tstat = V2_TSTAT,
	.reg_tcn = V2_TCN,
	.reg_tcmp = V2_TCMP,
374 375 376
	.gpt_irq_enable = imx6dl_gpt_irq_enable,
	.gpt_irq_disable = imx6dl_gpt_irq_disable,
	.gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
377
	.gpt_setup_tctl = imx6dl_gpt_setup_tctl,
378
	.set_next_event = v2_set_next_event,
379 380
};

381
static int __init _mxc_timer_init(struct imx_timer *imxtm)
382
{
383 384
	int ret;

385 386 387 388 389 390 391 392 393 394 395 396 397 398
	switch (imxtm->type) {
	case GPT_TYPE_IMX1:
		imxtm->gpt = &imx1_gpt_data;
		break;
	case GPT_TYPE_IMX21:
		imxtm->gpt = &imx21_gpt_data;
		break;
	case GPT_TYPE_IMX31:
		imxtm->gpt = &imx31_gpt_data;
		break;
	case GPT_TYPE_IMX6DL:
		imxtm->gpt = &imx6dl_gpt_data;
		break;
	default:
399
		return -EINVAL;
400 401
	}

402
	if (IS_ERR(imxtm->clk_per)) {
403
		pr_err("i.MX timer: unable to get clk\n");
404
		return PTR_ERR(imxtm->clk_per);
405
	}
406

407 408
	if (!IS_ERR(imxtm->clk_ipg))
		clk_prepare_enable(imxtm->clk_ipg);
409

410
	clk_prepare_enable(imxtm->clk_per);
411 412 413 414 415

	/*
	 * Initialise to a known state (all timers off, and timing reset)
	 */

416 417
	writel_relaxed(0, imxtm->base + MXC_TCTL);
	writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
418

419
	imxtm->gpt->gpt_setup_tctl(imxtm);
420 421

	/* init and register the timer to the framework */
422 423 424 425 426
	ret = mxc_clocksource_init(imxtm);
	if (ret)
		return ret;

	return mxc_clockevent_init(imxtm);
427
}
428

429
void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
430
{
431 432 433 434
	struct imx_timer *imxtm;

	imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
	BUG_ON(!imxtm);
435

436 437
	imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
	imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
438

439 440 441
	imxtm->base = ioremap(pbase, SZ_4K);
	BUG_ON(!imxtm->base);

442
	imxtm->type = type;
443
	imxtm->irq = irq;
444

445
	_mxc_timer_init(imxtm);
446 447
}

448
static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type type)
449
{
450 451
	struct imx_timer *imxtm;
	static int initialized;
452
	int ret;
453

454 455
	/* Support one instance only */
	if (initialized)
456
		return 0;
457

458
	imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
459 460
	if (!imxtm)
		return -ENOMEM;
461

462
	imxtm->base = of_iomap(np, 0);
463 464 465
	if (!imxtm->base)
		return -ENXIO;

466
	imxtm->irq = irq_of_parse_and_map(np, 0);
467 468
	if (imxtm->irq <= 0)
		return -EINVAL;
469 470

	imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
471

472
	/* Try osc_per first, and fall back to per otherwise */
473 474 475 476
	imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
	if (IS_ERR(imxtm->clk_per))
		imxtm->clk_per = of_clk_get_by_name(np, "per");

477 478
	imxtm->type = type;

479 480 481
	ret = _mxc_timer_init(imxtm);
	if (ret)
		return ret;
482

483
	initialized = 1;
484 485

	return 0;
486
}
487

488
static int __init imx1_timer_init_dt(struct device_node *np)
489
{
490
	return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
491 492
}

493
static int __init imx21_timer_init_dt(struct device_node *np)
494
{
495
	return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
496 497
}

498
static int __init imx31_timer_init_dt(struct device_node *np)
499 500 501 502 503 504 505 506 507 508 509 510
{
	enum imx_gpt_type type = GPT_TYPE_IMX31;

	/*
	 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
	 * GPT device, while they actually have different programming model.
	 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
	 * working with the new kernel.
	 */
	if (of_machine_is_compatible("fsl,imx6dl"))
		type = GPT_TYPE_IMX6DL;

511
	return mxc_timer_init_dt(np, type);
512 513
}

514
static int __init imx6dl_timer_init_dt(struct device_node *np)
515
{
516
	return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
517 518
}

519 520 521 522 523 524 525 526 527 528 529 530
TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);