1 /* linux/arch/arm/mach-s5pv310/tmu.c
\r
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
\r
4 * http://www.samsung.com/
\r
6 * S5PV310 - TMU driver
\r
8 * This program is free software; you can redistribute it and/or modify
\r
9 * it under the terms of the GNU General Public License version 2 as
\r
10 * published by the Free Software Foundation.
\r
13 #include <linux/module.h>
\r
14 #include <linux/fs.h>
\r
15 #include <linux/string.h>
\r
16 #include <linux/types.h>
\r
17 #include <linux/kernel.h>
\r
18 #include <linux/init.h>
\r
19 #include <linux/delay.h>
\r
20 #include <linux/platform_device.h>
\r
21 #include <linux/power_supply.h>
\r
22 #include <linux/interrupt.h>
\r
23 #include <linux/err.h>
\r
24 #include <linux/io.h>
\r
25 #include <linux/irq.h>
\r
26 #include <linux/gpio.h>
\r
27 #include <linux/slab.h>
\r
28 #include <linux/clk.h>
30 #include <asm/irq.h>
\r
32 #include <mach/regs-tmu.h>
\r
33 #include <mach/cpufreq.h>
\r
34 #include <mach/map.h>
\r
35 #include <plat/s5p-tmu.h>
\r
36 #include <plat/gpio-cfg.h>
\r
38 /* Support Bootloader parameter setting by SBL interface */
\r
39 #undef CONFIG_TMU_DEBUG_ENABLE
\r
41 /* Selectable one room temperature among 3 kinds */
\r
42 #undef OPERATION_TEMP_BASE_78
\r
43 #define OPERATION_TEMP_BASE_61
\r
45 #define TIMMING_AREF 0x30
\r
46 #define AUTO_REFRESH_PERIOD_TQ0 0x2E /* auto refresh preiod 1.95us */
\r
47 #define AUTO_REFRESH_PERIOD_NORMAL 0x5D /* auto refresh period 3.9us */
\r
51 * test value for room temperature
\r
52 * base operation temp : OPERATION_TEMP_BASE_78
\r
55 #ifdef OPERATION_TEMP_BASE_78
\r
56 /* TMU register setting value */
\r
57 #define THD_TEMP 0x80 /* 78 degree : threshold temp */
\r
58 #define TRIGGER_LEV0 0x9 /* 87 degree : throttling temperature */
\r
59 #define TRIGGER_LEV1 0x19 /* 103 degree : Waring temperature */
\r
60 #define TRIGGER_LEV2 0x20 /* 110 degree : Tripping temperature */
\r
61 #define TRIGGER_LEV3 0xFF /* Reserved */
\r
63 /* interrupt level by celcius degree */
\r
64 #define TEMP_MIN_CELCIUS 25
\r
65 #define TEMP_TROTTLED_CELCIUS 87
\r
66 #define TEMP_TQ0_CELCIUS 85
\r
67 #define TEMP_WARNING_CELCIUS 103
\r
68 #define TEMP_TRIPPED_CELCIUS 110
\r
69 #define TEMP_MAX_CELCIUS 125
\r
72 #ifdef OPERATION_TEMP_BASE_61
\r
73 /* test on 35 celsius base */
\r
74 #define THD_TEMP 0x6F /* 61 degree: thershold temp */
\r
75 #define TRIGGER_LEV0 0x3 /* 64 degree: Throttling temperature */
\r
76 #define TRIGGER_LEV1 0x2A /* 103 degree: Waring temperature */
\r
77 #define TRIGGER_LEV2 0x31 /* 110 degree: Tripping temperature */
\r
78 #define TRIGGER_LEV3 0xFF /* Reserved */
\r
79 #define TEMP_TROTTLED_CELCIUS 64
\r
80 #define TEMP_WARNING_CELCIUS 103
\r
81 #define TEMP_TQ0_CELCIUS 85
\r
82 #define TEMP_TRIPPED_CELCIUS 110
\r
83 #define TEMP_MIN_CELCIUS 25
\r
84 #define TEMP_MAX_CELCIUS 125
\r
87 #define TMU_SAVE_NUM 10
\r
88 #define VREF_SLOPE 0x07000F02
\r
90 #define TMU_DC_VALUE 25
\r
91 #define TMU_CODE_25_DEGREE 0x4B
\r
92 #define EFUSE_MIN_VALUE 60
\r
93 #define EFUSE_AVG_VALUE 80
\r
94 #define EFUSE_MAX_VALUE 100
\r
95 #define FIN (24*1000*1000)
\r
97 static struct workqueue_struct *tmu_monitor_wq;
\r
98 unsigned int tmu_save[TMU_SAVE_NUM];
\r
100 enum tmu_status_t {
\r
101 TMU_STATUS_NORMAL = 0,
\r
102 TMU_STATUS_THROTTLED,
\r
103 TMU_STATUS_WARNING,
\r
104 TMU_STATUS_TRIPPED,
\r
108 struct tmu_data_band {
\r
117 struct tmu_data_band tmu_temp_band = {
\r
118 #ifdef OPERATION_TEMP_BASE_61
\r
119 /* 61 : low temp of throttling */
\r
120 .thr_low = TEMP_TROTTLED_CELCIUS - 3,
\r
122 /* 83 : low temp of throttling */
\r
123 .thr_low = TEMP_TROTTLED_CELCIUS - 4,
\r
125 /* 90 : hith temp of warning */
\r
126 .thr_high = TEMP_WARNING_CELCIUS - 5,
\r
127 /* 97 : low temp of warning */
\r
128 .warn_low = TEMP_WARNING_CELCIUS - 6,
\r
129 /* 105 : high temp of warning */
\r
130 .warn_high = TEMP_WARNING_CELCIUS + 3,
\r
131 /* 113 : trip re-try */
\r
132 .trip_retry = TEMP_TRIPPED_CELCIUS + 3,
\r
134 .tq0_temp = TEMP_TQ0_CELCIUS,
\r
137 static DEFINE_MUTEX(tmu_lock);
\r
139 struct s5p_tmu_info {
\r
140 struct device *dev;
\r
143 struct s5p_tmu *ctz;
\r
144 struct tmu_data_band *temp;
\r
146 struct delayed_work monitor_work;
\r
147 struct delayed_work polling_work;
\r
149 unsigned int monitor_period;
\r
150 unsigned int sampling_rate;
\r
152 struct resource *ioarea;
\r
154 unsigned int reg_save[TMU_SAVE_NUM];
\r
159 struct s5p_tmu_info *tmu_info;
\r
161 #ifdef CONFIG_TMU_DEBUG_ENABLE
\r
162 static int set_tmu_test;
\r
163 #ifdef OPERATION_TEMP_BASE_61
\r
164 static int set_thr_stop = (TEMP_TROTTLED_CELCIUS - 3);
\r
166 static int set_thr_stop = (TEMP_TROTTLED_CELCIUS - 4);
\r
168 static int set_thr_temp = TEMP_TROTTLED_CELCIUS;
\r
169 static int set_warn_stop = (TEMP_WARNING_CELCIUS - 6);
\r
170 static int set_warn_temp = TEMP_WARNING_CELCIUS;
\r
171 static int set_trip_temp = TEMP_TRIPPED_CELCIUS;
\r
172 static int set_tq0_temp = TEMP_TQ0_CELCIUS;
\r
174 static int set_sampling_rate;
\r
175 static int set_cpu_level = 3;
\r
177 static int __init tmu_test_param(char *str)
\r
179 int tmu_temp[7] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL,};
\r
181 get_options(str, 7, tmu_temp);
\r
183 set_tmu_test = tmu_temp[0];
\r
184 printk(KERN_INFO "@@@tmu_test enable = %d\n", set_tmu_test);
\r
187 set_thr_stop = tmu_temp[1];
\r
188 printk(KERN_INFO "@@@1st throttling stop temp = %d\n", set_thr_stop);
\r
191 set_thr_temp = tmu_temp[2];
\r
192 printk(KERN_INFO "@@@1st throttling start temp = %d\n", set_thr_temp);
\r
195 set_warn_stop = tmu_temp[3];
\r
196 printk(KERN_INFO "@@@2nd throttling stop temp = %d\n", set_warn_stop);
\r
199 set_warn_temp = tmu_temp[4];
\r
200 printk(KERN_INFO "@@@2nd throttling start temp = %d\n", set_warn_temp);
\r
203 set_trip_temp = tmu_temp[5];
\r
204 printk(KERN_INFO "@@@tripping temp = %d\n", set_trip_temp);
\r
207 set_tq0_temp = tmu_temp[6];
\r
208 printk(KERN_INFO "@@@memory throttling temp = %d\n", set_tq0_temp);
\r
212 early_param("tmu_test", tmu_test_param);
\r
214 static int __init limit_param(char *str)
\r
216 get_option(&str, &set_cpu_level);
\r
217 if (set_cpu_level < 0)
\r
222 early_param("max_cpu_level", limit_param);
\r
224 static int __init sampling_rate_param(char *str)
\r
226 get_option(&str, &set_sampling_rate);
\r
227 if (set_sampling_rate < 0)
\r
228 set_sampling_rate = 0;
\r
232 early_param("tmu_sampling_rate", sampling_rate_param);
\r
234 static void tmu_start_testmode(struct platform_device *pdev)
\r
236 struct s5p_tmu *tz = platform_get_drvdata(pdev);
\r
238 unsigned int thresh_temp_adc, thr_temp_adc, trip_temp_adc;
\r
239 unsigned int warn_temp_adc = 0xFF;
\r
241 clk_enable(tmu_info->clk);
243 /* To use handling routine, change temperature date of tmu info */
\r
244 tmu_info->temp->thr_low = set_thr_stop;
\r
245 tmu_info->temp->thr_high = set_warn_temp - 5;
\r
246 tmu_info->temp->warn_low = set_warn_stop;
\r
247 tmu_info->temp->warn_high = set_warn_temp + 5;
\r
248 tmu_info->temp->trip_retry = set_trip_temp + 3;
\r
249 tmu_info->temp->tq0_temp = set_tq0_temp;
\r
251 pr_info("1st throttling stop_temp = %d, start_temp = %d\n,\
\r
252 2nd throttling stop_temp = %d, start_tmep = %d\n,\
\r
253 tripping temp = %d, tripping retry_temp = %d\n,\
\r
254 memory throttling stop_temp = %d, start_temp = %d\n",
\r
255 tmu_info->temp->thr_low, tmu_info->temp->thr_high - 4,
\r
256 tmu_info->temp->warn_low, tmu_info->temp->warn_high - 3,
\r
257 tmu_info->temp->trip_retry - 3, tmu_info->temp->trip_retry,
\r
258 tmu_info->temp->tq0_temp -5, tmu_info->temp->tq0_temp);
\r
260 /* Compensation temperature THD_TEMP */
\r
261 thresh_temp_adc = set_thr_stop + tz->data.te1 - TMU_DC_VALUE;
\r
262 thr_temp_adc = set_thr_temp + tz->data.te1 - TMU_DC_VALUE
\r
264 warn_temp_adc = set_warn_temp + tz->data.te1 - TMU_DC_VALUE
\r
266 trip_temp_adc = set_trip_temp + tz->data.te1 - TMU_DC_VALUE
\r
268 pr_info("Compensated Threshold: 0x%2x\n", thresh_temp_adc);
\r
270 /* Set interrupt trigger level */
\r
271 __raw_writel(thresh_temp_adc, tz->tmu_base + THRESHOLD_TEMP);
\r
272 __raw_writel(thr_temp_adc, tz->tmu_base + TRG_LEV0);
\r
273 __raw_writel(warn_temp_adc, tz->tmu_base + TRG_LEV1);
\r
274 __raw_writel(trip_temp_adc, tz->tmu_base + TRG_LEV2);
\r
275 __raw_writel(TRIGGER_LEV3, tz->tmu_base + TRG_LEV3);
\r
277 pr_info("Cooling: %dc THD_TEMP:0x%02x: TRIG_LEV0: 0x%02x\
\r
278 TRIG_LEV1: 0x%02x TRIG_LEV2: 0x%02x, TRIG_LEV3: 0x%02x\n",
\r
280 __raw_readl(tz->tmu_base + THRESHOLD_TEMP),
\r
281 __raw_readl(tz->tmu_base + TRG_LEV0),
\r
282 __raw_readl(tz->tmu_base + TRG_LEV1),
\r
283 __raw_readl(tz->tmu_base + TRG_LEV2),
\r
284 __raw_readl(tz->tmu_base + TRG_LEV3));
\r
287 /* TMU core enable */
\r
288 con = __raw_readl(tz->tmu_base + TMU_CON0);
\r
291 __raw_writel(con, tz->tmu_base + TMU_CON0);
\r
293 /*LEV0 LEV1 LEV2 interrupt enable */
\r
294 __raw_writel(INTEN0 | INTEN1 | INTEN2, tz->tmu_base + INTEN);
\r
296 clk_disable(tmu_info->clk);
302 static void set_refresh_rate(unsigned int auto_refresh)
\r
305 * uRlk = FIN / 100000;
\r
306 * refresh_usec = (unsigned int)(fMicrosec * 10);
\r
307 * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1;
\r
309 clk_enable(tmu_info->clk);
311 /* change auto refresh period of dmc0 */
\r
312 __raw_writel(auto_refresh, S5P_VA_DMC0 + TIMMING_AREF);
\r
314 /* change auto refresh period of dmc1 */
\r
315 __raw_writel(auto_refresh, S5P_VA_DMC1 + TIMMING_AREF);
\r
317 clk_disable(tmu_info->clk);
320 static int tmu_tripped_cb(int state)
\r
322 struct power_supply *psy = power_supply_get_by_name("battery");
\r
323 union power_supply_propval value;
\r
326 pr_err("%s:fail to get batter ps\n", __func__);
\r
329 pr_info("%s:is pass, state %d.\n", __func__, state);
\r
332 case TMU_STATUS_NORMAL:
\r
333 value.intval = TMU_STATUS_NORMAL;
\r
335 case TMU_STATUS_THROTTLED:
\r
336 value.intval = TMU_STATUS_THROTTLED;
\r
338 case TMU_STATUS_WARNING:
\r
339 value.intval = TMU_STATUS_WARNING;
\r
341 case TMU_STATUS_TRIPPED:
\r
342 value.intval = TMU_STATUS_TRIPPED;
\r
345 pr_warn("value is not correct.\n");
\r
349 return psy->set_property(psy, POWER_SUPPLY_PROP_TMU_STATUS, &value);
\r
352 #ifdef CONFIG_TMU_DEBUG_ENABLE
\r
353 static void tmu_mon_timer(struct work_struct *work)
\r
355 unsigned char cur_temp_adc;
\r
358 clk_enable(tmu_info->clk);
360 /* Compensation temperature */
\r
362 __raw_readl(tmu_info->ctz->tmu_base + CURRENT_TEMP) & 0xff;
\r
363 cur_temp = cur_temp_adc - tmu_info->ctz->data.te1 + TMU_DC_VALUE;
\r
364 if (cur_temp < 25) {
\r
365 /* temperature code range is from 25 to 125 */
\r
366 pr_info("current temp is under 25 celsius degree!\n");
\r
370 pr_info("cur temp = %d, adc_value = 0x%02x\n", cur_temp, cur_temp_adc);
\r
372 if (set_tmu_test) {
\r
373 pr_info("Current: %d c, Cooling: %d c, Throttling: %d c, \
\r
374 Warning: %d c, Tripping: %d c\n",
\r
375 cur_temp, tmu_info->temp->thr_low,
\r
376 set_thr_temp, set_warn_temp, set_trip_temp);
\r
378 pr_info("Current: %d c, Cooling: %d c Throttling: %d c \
\r
379 Warning: %d c Tripping: %d c\n",
\r
380 cur_temp, tmu_info->temp->thr_low,
\r
381 TEMP_TROTTLED_CELCIUS,
\r
382 TEMP_WARNING_CELCIUS,
\r
383 TEMP_TRIPPED_CELCIUS);
\r
386 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->monitor_work,
\r
387 tmu_info->monitor_period);
\r
389 clk_disable(tmu_info->clk);
392 static void tmu_poll_testmode(void)
\r
394 unsigned char cur_temp_adc;
\r
396 int thr_temp, trip_temp, warn_temp;
\r
397 static int cpufreq_limited_thr = 0;
\r
398 static int cpufreq_limited_warn = 0;
\r
399 static int send_msg_battery = 0;
\r
400 static int auto_refresh_changed = 0;
\r
402 clk_enable(tmu_info->clk);
404 thr_temp = set_thr_temp;
\r
405 warn_temp = set_warn_temp;
\r
406 trip_temp = set_trip_temp;
\r
408 /* Compensation temperature */
\r
410 __raw_readl(tmu_info->ctz->tmu_base + CURRENT_TEMP) & 0xff;
\r
411 cur_temp = cur_temp_adc - tmu_info->ctz->data.te1 + TMU_DC_VALUE;
\r
412 if (cur_temp < 25) {
\r
413 /* temperature code range is from 25 to 125 */
\r
414 pr_info("current temp is under 25 celsius degree!\n");
\r
418 pr_info("current temp = %d, tmu_state = %d\n",
\r
419 cur_temp, tmu_info->ctz->data.tmu_flag);
\r
421 switch (tmu_info->ctz->data.tmu_flag) {
\r
422 case TMU_STATUS_NORMAL:
\r
423 if (cur_temp <= tmu_info->temp->thr_low) {
\r
424 //cancel_delayed_work(&tmu_info->polling_work);
\r
425 if (tmu_tripped_cb(TMU_STATUS_NORMAL) < 0)
\r
426 pr_err("Error inform to battery driver !\n");
\r
428 pr_info("normal: interrupt enable.\n");
\r
430 /* To prevent from interrupt by current pending bit */
\r
431 __raw_writel(INTCLEARALL,
\r
432 tmu_info->ctz->tmu_base + INTCLEAR);
\r
433 enable_irq(tmu_info->irq);
\r
435 clk_disable(tmu_info->clk);
440 if (cur_temp >= set_thr_temp) { /* 85 */
\r
441 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
442 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L2);
\r
443 cpufreq_limited_thr = 1;
\r
444 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
445 pr_err("Error inform to battery driver !\n");
\r
447 pr_info("normal->throttle:\
\r
448 set cpufreq upper limit.\n");
\r
452 case TMU_STATUS_THROTTLED:
\r
453 if (cur_temp >= set_thr_temp && !(cpufreq_limited_thr)) {
\r
454 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L2);
\r
455 cpufreq_limited_thr = 1;
\r
456 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
457 pr_err("Error inform to battery driver !\n");
\r
459 pr_info("throttling:\
\r
460 set cpufreq upper limit.\n");
\r
463 if (cur_temp <= tmu_info->temp->thr_low) {
\r
464 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
465 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
466 cpufreq_limited_thr = 0;
\r
467 pr_info("throttling->normal: free cpufreq upper limit.\n");
\r
470 if (cur_temp >= set_warn_temp) { /* 100 */
\r
471 tmu_info->ctz->data.tmu_flag = TMU_STATUS_WARNING;
\r
472 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
473 cpufreq_limited_thr = 0;
\r
474 if (set_cpu_level == 3)
\r
475 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
\r
476 CPU_L3); /* CPU_L3 */
\r
478 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
\r
479 CPU_L4); /* CPU_L4 */
\r
481 cpufreq_limited_warn = 1;
\r
482 if (tmu_tripped_cb(TMU_STATUS_WARNING) < 0)
\r
483 pr_err("Error inform to battery driver !\n");
\r
485 pr_info("throttling->warning:\
\r
486 up cpufreq upper limit.\n");
\r
490 case TMU_STATUS_WARNING:
\r
491 if (cur_temp >= set_warn_temp && !(cpufreq_limited_warn)) { /* 100 */
\r
492 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
493 cpufreq_limited_thr = 0;
\r
494 if (set_cpu_level == 3)
\r
495 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
\r
496 CPU_L3); /* CPU_L3 */
\r
498 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
\r
499 CPU_L4); /* CPU_L4 */
\r
501 cpufreq_limited_warn = 1;
\r
502 if (tmu_tripped_cb(TMU_STATUS_WARNING) < 0)
\r
503 pr_err("Error inform to battery driver !\n");
\r
505 pr_info("warning: set cpufreq upper limit.\n");
\r
508 /* if (cur_temp < tmu_info->band->warn_low) { */
\r
509 if (cur_temp < set_warn_stop) {
\r
510 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
511 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
512 cpufreq_limited_warn = 0;
\r
513 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
\r
514 CPU_L2); /* CPU_L2 */
\r
515 cpufreq_limited_thr = 1;
\r
516 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
517 pr_err("Error inform to battery driver !\n");
\r
519 pr_info("warning->throttling:\
\r
520 down cpufreq upper limit.\n");
\r
523 if (cur_temp >= set_trip_temp) { /* 110 */
\r
524 tmu_info->ctz->data.tmu_flag = TMU_STATUS_TRIPPED;
\r
525 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
526 pr_err("Error inform to battery driver !\n");
\r
528 pr_info("warning->tripping:\
\r
529 waiting shutdown !!!\n");
\r
533 case TMU_STATUS_INIT: /* sned tmu initial status to battery drvier */
\r
534 disable_irq(tmu_info->irq);
\r
536 if (cur_temp <= tmu_info->temp->thr_low)
\r
537 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
539 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
542 case TMU_STATUS_TRIPPED:
\r
543 if (cur_temp >= set_trip_temp && !(send_msg_battery)) {
\r
544 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
545 pr_err("Error inform to battery driver !\n");
\r
547 pr_info("tripping: waiting shutdown.\n");
\r
548 send_msg_battery = 1;
\r
552 if (cur_temp >= (set_trip_temp + 5)) {
\r
553 panic("Emergency!!!!\
\r
554 tmu tripping event is not treated! \n");
\r
557 if (cur_temp >= tmu_info->temp->trip_retry) {
\r
558 pr_warn("WARNING!!: try to send msg to\
\r
559 battery driver again\n");
\r
560 send_msg_battery = 0;
\r
564 if (cur_temp <= tmu_info->temp->thr_low) {
\r
565 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
566 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
567 cpufreq_limited_thr = 0;
\r
568 pr_info("tripping->normal:\
\r
569 check! occured only test mode.\n");
\r
574 pr_warn("bug: checked tmu_state.\n");
\r
575 if (cur_temp < tmu_info->temp->warn_high) {
\r
576 tmu_info->ctz->data.tmu_flag = TMU_STATUS_WARNING;
\r
578 tmu_info->ctz->data.tmu_flag = TMU_STATUS_TRIPPED;
\r
579 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
580 pr_err("Error inform to battery driver !\n");
\r
585 if (cur_temp >= tmu_info->temp->tq0_temp) {
\r
586 if (!(auto_refresh_changed)) {
\r
587 pr_info("set auto_refresh 1.95us\n");
\r
588 set_refresh_rate(AUTO_REFRESH_PERIOD_TQ0);
\r
589 auto_refresh_changed = 1;
\r
591 } else if (cur_temp <= (tmu_info->temp->tq0_temp - 5)) {
\r
592 if (auto_refresh_changed) {
\r
593 pr_info("set auto_refresh 3.9us\n");
\r
594 set_refresh_rate(AUTO_REFRESH_PERIOD_NORMAL);
\r
595 auto_refresh_changed = 0;
\r
599 /* rescheduling next work */
\r
600 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->polling_work,
\r
601 tmu_info->sampling_rate);
\r
603 clk_disable(tmu_info->clk);
607 static void tmu_poll_timer(struct work_struct *work)
\r
610 static int cpufreq_limited_thr = 0;
\r
611 static int cpufreq_limited_warn = 0;
\r
612 static int send_msg_battery = 0;
\r
613 static int auto_refresh_changed = 0;
\r
615 #ifdef CONFIG_TMU_DEBUG_ENABLE
\r
616 if (set_tmu_test) {
\r
617 tmu_poll_testmode();
\r
621 mutex_lock(&tmu_lock);
\r
623 clk_enable(tmu_info->clk);
625 /* Compensation temperature */
\r
626 cur_temp = (__raw_readl(tmu_info->ctz->tmu_base + CURRENT_TEMP) & 0xff)
\r
627 - tmu_info->ctz->data.te1 + TMU_DC_VALUE;
\r
628 if (cur_temp < 25) {
\r
629 /* temperature code range is from 25 to 125 */
\r
630 pr_info("current temp is under 25 celsius degree!\n");
\r
633 pr_info("current temp = %d, tmu_state = %d\n",
\r
634 cur_temp, tmu_info->ctz->data.tmu_flag);
\r
636 switch (tmu_info->ctz->data.tmu_flag) {
\r
637 case TMU_STATUS_NORMAL:
\r
638 if (cur_temp <= tmu_info->temp->thr_low) {
\r
639 if (tmu_tripped_cb(TMU_STATUS_NORMAL) < 0)
\r
640 pr_err("Error inform to battery driver !\n");
\r
642 pr_info("normal: interrupt enable.\n");
\r
644 /* clear to prevent from interfupt by peindig bit */
\r
645 __raw_writel(INTCLEARALL,
\r
646 tmu_info->ctz->tmu_base + INTCLEAR);
\r
647 enable_irq(tmu_info->irq);
\r
649 clk_disable(tmu_info->clk);
651 mutex_unlock(&tmu_lock);
\r
654 if (cur_temp >= TEMP_TROTTLED_CELCIUS) { /* 87 */
\r
655 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
656 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L2);
\r
657 cpufreq_limited_thr = 1;
\r
658 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
659 pr_err("Error inform to battery driver !\n");
\r
661 pr_info("normal->throttle:\
\r
662 set cpufreq upper limit.\n");
\r
666 case TMU_STATUS_THROTTLED:
\r
667 if (cur_temp >= TEMP_TROTTLED_CELCIUS &&
\r
668 !(cpufreq_limited_thr)) {
\r
669 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L2);
\r
670 cpufreq_limited_thr = 1;
\r
671 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
672 pr_err("Error inform to battery driver !\n");
\r
674 pr_info("throttling:\
\r
675 set cpufreq upper limit.\n");
\r
677 if (cur_temp <= tmu_info->temp->thr_low) {
\r
678 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
679 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
680 cpufreq_limited_thr = 0;
\r
681 pr_info("throttling->normal:\
\r
682 free cpufreq upper limit.\n");
\r
684 if (cur_temp >= TEMP_WARNING_CELCIUS) { /* 103 */
\r
685 tmu_info->ctz->data.tmu_flag = TMU_STATUS_WARNING;
\r
686 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
687 cpufreq_limited_thr = 0;
\r
688 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L4); /* CPU_L4 */
\r
689 cpufreq_limited_warn = 1;
\r
690 if (tmu_tripped_cb(TMU_STATUS_WARNING) < 0)
\r
691 pr_err("Error inform to battery driver !\n");
\r
693 pr_info("throttling->warning:\
\r
694 set cpufreq upper limit.\n");
\r
698 case TMU_STATUS_WARNING:
\r
699 if (cur_temp >= TEMP_WARNING_CELCIUS &&
\r
700 !(cpufreq_limited_warn)) { /* 103 */
\r
701 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
702 cpufreq_limited_thr = 0;
\r
703 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L4);
\r
705 cpufreq_limited_warn = 1;
\r
706 if (tmu_tripped_cb(TMU_STATUS_WARNING) < 0)
\r
707 pr_err("Error inform to battery driver !\n");
\r
709 pr_info("warning: set cpufreq upper limit.\n");
\r
711 if (cur_temp <= tmu_info->temp->warn_low) {
\r
712 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
713 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
714 cpufreq_limited_warn = 0;
\r
715 s5pv310_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, CPU_L2);
\r
716 cpufreq_limited_thr = 1;
\r
717 if (tmu_tripped_cb(TMU_STATUS_THROTTLED) < 0)
\r
718 pr_err("Error inform to battery driver !\n");
\r
720 pr_info("warning->throttling:\
\r
721 up cpufreq upper limit.\n");
\r
723 if (cur_temp >= TEMP_TRIPPED_CELCIUS) { /* 110 */
\r
724 tmu_info->ctz->data.tmu_flag = TMU_STATUS_TRIPPED;
\r
725 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
726 pr_err("Error inform to battery driver !\n");
\r
728 pr_info("warning->tripping:\
\r
729 waiting shutdown !!!\n");
\r
733 case TMU_STATUS_TRIPPED:
\r
734 /* 1st throttling 110 */
\r
735 if (cur_temp >= TEMP_TRIPPED_CELCIUS && !(send_msg_battery)) {
\r
736 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
737 pr_err("Error inform to battery driver !\n");
\r
739 pr_info("tripping: waiting shutdown.\n");
\r
740 send_msg_battery = 1;
\r
743 if (cur_temp >= (TEMP_MAX_CELCIUS - 5)) { /* 120 */
\r
744 panic("Emergency!!!!\
\r
745 tmu tripping event is not treated! \n");
\r
748 if (cur_temp >= tmu_info->temp->trip_retry) {
\r
749 pr_warn("WARNING!!: try to send msg to\
\r
750 battery driver again\n");
\r
751 send_msg_battery = 0;
\r
755 if (cur_temp <= tmu_info->temp->thr_low) {
\r
756 s5pv310_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
\r
757 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
758 cpufreq_limited_thr = 0;
\r
759 pr_info("tripping->normal:\
\r
760 Check! occured only test mode.\n");
\r
764 case TMU_STATUS_INIT: /* sned tmu initial status to battery drvier */
\r
765 disable_irq(tmu_info->irq);
\r
767 if (cur_temp <= tmu_info->temp->thr_low)
\r
768 tmu_info->ctz->data.tmu_flag = TMU_STATUS_NORMAL;
\r
770 tmu_info->ctz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
774 pr_warn("Bug: checked tmu_state.\n");
\r
775 if (cur_temp < tmu_info->temp->warn_high) {
\r
776 tmu_info->ctz->data.tmu_flag = TMU_STATUS_WARNING;
\r
778 tmu_info->ctz->data.tmu_flag = TMU_STATUS_TRIPPED;
\r
779 if (tmu_tripped_cb(TMU_STATUS_TRIPPED) < 0)
\r
780 pr_err("Error inform to battery driver !\n");
\r
785 if (cur_temp >= TEMP_TQ0_CELCIUS) { /* 85 */
\r
786 if (!(auto_refresh_changed)) {
\r
787 pr_info("set auto_refresh 1.95us\n");
\r
788 set_refresh_rate(AUTO_REFRESH_PERIOD_TQ0);
\r
789 auto_refresh_changed = 1;
\r
791 } else if (cur_temp <= (TEMP_TQ0_CELCIUS - 5)) { /* 80 */
\r
792 if (auto_refresh_changed) {
\r
793 pr_info("set auto_refresh 3.9us\n");
\r
794 set_refresh_rate(AUTO_REFRESH_PERIOD_NORMAL);
\r
795 auto_refresh_changed = 0;
\r
799 /* rescheduling next work */
\r
800 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->polling_work,
\r
801 tmu_info->sampling_rate);
\r
803 clk_disable(tmu_info->clk);
805 mutex_unlock(&tmu_lock);
\r
810 static int tmu_initialize(struct platform_device *pdev)
\r
812 struct s5p_tmu *tz = platform_get_drvdata(pdev);
\r
814 unsigned int te_temp;
\r
816 clk_enable(tmu_info->clk);
818 __raw_writel(INTCLEAR2, tz->tmu_base + INTCLEAR);
\r
820 en = (__raw_readl(tz->tmu_base + TMU_STATUS) & 0x1);
\r
823 dev_err(&pdev->dev, "failed to start tmu drvier\n");
\r
825 clk_disable(tmu_info->clk);
830 /* get the compensation parameter */
\r
831 te_temp = __raw_readl(tz->tmu_base + TRIMINFO);
\r
832 tz->data.te1 = te_temp & TRIM_TEMP_MASK;
\r
833 tz->data.te2 = ((te_temp >> 8) & TRIM_TEMP_MASK);
\r
835 pr_info("%s: te_temp = 0x%08x, low 8bit = %d, high 24 bit = %d\n",
\r
836 __func__, te_temp, tz->data.te1, tz->data.te2);
\r
838 if ((EFUSE_MIN_VALUE > tz->data.te1) || (tz->data.te1 > EFUSE_MAX_VALUE)
\r
839 || (tz->data.te2 != 0))
\r
840 tz->data.te1 = EFUSE_AVG_VALUE;
\r
842 /* Need to initial regsiter setting after getting parameter info */
\r
843 /* [28:23] vref [11:8] slope - Tunning parameter */
\r
844 __raw_writel(VREF_SLOPE, tz->tmu_base + TMU_CON0);
\r
846 clk_disable(tmu_info->clk);
851 static void tmu_start(struct platform_device *pdev)
\r
853 struct s5p_tmu *tz = platform_get_drvdata(pdev);
\r
855 unsigned int thresh_temp_adc;
\r
857 clk_enable(tmu_info->clk);
859 __raw_writel(INTCLEARALL, tz->tmu_base + INTCLEAR);
\r
861 #ifdef CONFIG_TMU_DEBUG_ENABLE
\r
862 if (set_tmu_test) {
\r
863 tmu_start_testmode(pdev);
\r
868 pr_info("1st throttling stop_temp = %d, start_temp = %d\n\
\r
869 2nd throttling stop_temp = %d, start_tmep = %d\n\
\r
870 tripping temp = %d, tripping retry_temp = %d\n\
\r
871 memory throttling stop_temp = %d, start_temp = %d\n",
\r
872 tmu_info->temp->thr_low, tmu_info->temp->thr_high - 4,
\r
873 tmu_info->temp->warn_low, tmu_info->temp->warn_high - 3,
\r
874 tmu_info->temp->trip_retry - 3, tmu_info->temp->trip_retry,
\r
875 tmu_info->temp->tq0_temp -5, tmu_info->temp->tq0_temp);
\r
877 /* Compensation temperature THD_TEMP */
\r
878 thresh_temp_adc = THD_TEMP + tz->data.te1 - TMU_CODE_25_DEGREE;
\r
879 pr_info("Compensated Threshold: 0x%2x\n", thresh_temp_adc);
\r
881 /* Set interrupt trigger level */
\r
882 __raw_writel(thresh_temp_adc, tz->tmu_base + THRESHOLD_TEMP);
\r
883 __raw_writel(TRIGGER_LEV0, tz->tmu_base + TRG_LEV0);
\r
884 __raw_writel(TRIGGER_LEV1, tz->tmu_base + TRG_LEV1);
\r
885 __raw_writel(TRIGGER_LEV2, tz->tmu_base + TRG_LEV2);
\r
886 __raw_writel(TRIGGER_LEV3, tz->tmu_base + TRG_LEV3);
\r
889 /* TMU core enable */
\r
890 con = __raw_readl(tz->tmu_base + TMU_CON0);
\r
893 __raw_writel(con, tz->tmu_base + TMU_CON0);
\r
895 /*LEV0 LEV1 LEV2 interrupt enable */
\r
896 __raw_writel(INTEN0 | INTEN1 | INTEN2, tz->tmu_base + INTEN);
\r
898 clk_disable(tmu_info->clk);
900 pr_info("Cooling: %dc THD_TEMP:0x%02x: TRIG_LEV0: 0x%02x\
\r
901 TRIG_LEV1: 0x%02x TRIG_LEV2: 0x%02x\n",
\r
904 THD_TEMP + TRIGGER_LEV0,
\r
905 THD_TEMP + TRIGGER_LEV1,
\r
906 THD_TEMP + TRIGGER_LEV2);
\r
909 static irqreturn_t s5p_tmu_irq(int irq, void *id)
\r
911 struct s5p_tmu *tz = id;
\r
912 unsigned int status;
\r
914 disable_irq_nosync(irq);
\r
916 clk_enable(tmu_info->clk);
918 status = __raw_readl(tz->tmu_base + INTSTAT);
\r
920 pr_info("TMU interrupt occured : status = 0x%08x\n", status);
\r
922 if (status & INTSTAT2) {
\r
923 tz->data.tmu_flag = TMU_STATUS_TRIPPED;
\r
924 __raw_writel(INTCLEAR2, tz->tmu_base + INTCLEAR);
\r
925 } else if (status & INTSTAT1) {
\r
926 tz->data.tmu_flag = TMU_STATUS_WARNING;
\r
927 __raw_writel(INTCLEAR1, tz->tmu_base + INTCLEAR);
\r
928 } else if (status & INTSTAT0) {
\r
929 tz->data.tmu_flag = TMU_STATUS_THROTTLED;
\r
930 __raw_writel(INTCLEAR0, tz->tmu_base + INTCLEAR);
\r
932 pr_err("%s: TMU interrupt error\n", __func__);
\r
933 __raw_writel(INTCLEARALL, tz->tmu_base + INTCLEAR);
\r
935 queue_delayed_work_on(0, tmu_monitor_wq,
\r
936 &tmu_info->polling_work, tmu_info->sampling_rate / 2);
\r
938 clk_disable(tmu_info->clk);
943 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->polling_work,
\r
944 tmu_info->sampling_rate);
\r
946 clk_disable(tmu_info->clk);
948 return IRQ_HANDLED;
\r
951 static int __devinit s5p_tmu_probe(struct platform_device *pdev)
\r
953 struct s5p_tmu *tz = platform_get_drvdata(pdev);
\r
954 struct resource *res;
\r
957 pr_debug("%s: probe=%p\n", __func__, pdev);
\r
959 tmu_info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL);
\r
961 dev_err(&pdev->dev, "failed to alloc memory!\n");
\r
965 tmu_info->dev = &pdev->dev;
\r
966 tmu_info->ctz = tz;
\r
967 tmu_info->ctz->data.tmu_flag = TMU_STATUS_INIT;
\r
968 tmu_info->temp = &tmu_temp_band;
\r
969 /* To poll current temp, set sampling rate to ONE second sampling */
\r
970 tmu_info->sampling_rate = usecs_to_jiffies(1000 * 1000);
\r
971 tmu_info->clk = clk_get(NULL, "tmu_apbif");
972 if (IS_ERR(tmu_info->clk)) {
973 ret = PTR_ERR(tmu_info->clk);
974 dev_err(&pdev->dev, "failed to get clock\n");
978 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
980 dev_err(&pdev->dev, "failed to get memory region resource\n");
\r
985 tmu_info->ioarea = request_mem_region(res->start,
\r
986 res->end-res->start+1,
\r
988 if (!(tmu_info->ioarea)) {
\r
989 dev_err(&pdev->dev, "failed to reserve memory region\n");
\r
994 tz->tmu_base = ioremap(res->start, (res->end - res->start) + 1);
\r
995 if (!(tz->tmu_base)) {
\r
996 dev_err(&pdev->dev, "failed ioremap()\n");
\r
1001 tmu_monitor_wq = create_freezeable_workqueue(dev_name(&pdev->dev));
\r
1002 if (!tmu_monitor_wq) {
\r
1003 pr_info("Creation of tmu_monitor_wq failed\n");
\r
1007 #ifdef CONFIG_TMU_DEBUG_ENABLE
\r
1008 if (set_sampling_rate) {
\r
1009 tmu_info->sampling_rate =
\r
1010 usecs_to_jiffies(set_sampling_rate * 1000);
\r
1011 tmu_info->monitor_period =
\r
1012 usecs_to_jiffies(set_sampling_rate * 10 * 1000);
\r
1014 /* 10sec monitroing */
\r
1015 tmu_info->monitor_period = usecs_to_jiffies(10000 * 1000);
\r
1018 INIT_DELAYED_WORK_DEFERRABLE(&tmu_info->monitor_work, tmu_mon_timer);
\r
1019 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->monitor_work,
\r
1020 tmu_info->monitor_period);
\r
1023 INIT_DELAYED_WORK_DEFERRABLE(&tmu_info->polling_work, tmu_poll_timer);
\r
1025 /* rescheduling next work to inform tmu status to battery driver */
\r
1026 queue_delayed_work_on(0, tmu_monitor_wq, &tmu_info->polling_work,
\r
1027 tmu_info->sampling_rate * 10);
\r
1029 tmu_info->irq = platform_get_irq(pdev, 0);
\r
1030 if (tmu_info->irq < 0) {
\r
1031 dev_err(&pdev->dev, "no irq for thermal\n");
\r
1032 ret = tmu_info->irq;
\r
1036 ret = request_irq(tmu_info->irq, s5p_tmu_irq,
\r
1037 IRQF_DISABLED, "s5p-tmu interrupt", tz);
\r
1039 dev_err(&pdev->dev, "IRQ%d error %d\n", tmu_info->irq, ret);
\r
1043 ret = tmu_initialize(pdev);
\r
1052 cancel_delayed_work(&tmu_info->polling_work);
\r
1053 if (tmu_info->irq >= 0)
\r
1054 free_irq(tmu_info->irq, tz);
\r
1057 iounmap(tz->tmu_base);
\r
1060 release_resource(tmu_info->ioarea);
\r
1061 kfree(tmu_info->ioarea);
\r
1063 clk_put(tmu_info->clk);
1069 dev_err(&pdev->dev, "initialization failed.\n");
\r
1074 static int __devinit s5p_tmu_remove(struct platform_device *pdev)
\r
1076 struct s5p_tmu *tz = platform_get_drvdata(pdev);
\r
1078 cancel_delayed_work(&tmu_info->polling_work);
\r
1080 clk_put(tmu_info->clk);
1082 if (tmu_info->irq >= 0)
\r
1083 free_irq(tmu_info->irq, tz);
\r
1085 iounmap(tz->tmu_base);
\r
1087 release_resource(tmu_info->ioarea);
\r
1088 kfree(tmu_info->ioarea);
\r
1093 pr_info("%s is removed\n", dev_name(&pdev->dev));
\r
1098 static int s5p_tmu_suspend(struct device *dev)
\r
1100 struct s5p_tmu *tz = dev_get_drvdata(dev);
\r
1102 clk_enable(tmu_info->clk);
1104 /* save tmu register value */
\r
1105 tmu_info->reg_save[0] = __raw_readl(tz->tmu_base + TMU_CON0);
\r
1106 tmu_info->reg_save[1] = __raw_readl(tz->tmu_base + SAMPLING_INTERNAL);
\r
1107 tmu_info->reg_save[2] = __raw_readl(tz->tmu_base + CNT_VALUE0);
\r
1108 tmu_info->reg_save[3] = __raw_readl(tz->tmu_base + CNT_VALUE1);
\r
1109 tmu_info->reg_save[4] = __raw_readl(tz->tmu_base + THRESHOLD_TEMP);
\r
1110 tmu_info->reg_save[5] = __raw_readl(tz->tmu_base + INTEN);
\r
1111 tmu_info->reg_save[6] = __raw_readl(tz->tmu_base + TRG_LEV0);
\r
1112 tmu_info->reg_save[7] = __raw_readl(tz->tmu_base + TRG_LEV1);
\r
1113 tmu_info->reg_save[8] = __raw_readl(tz->tmu_base + TRG_LEV2);
\r
1114 tmu_info->reg_save[9] = __raw_readl(tz->tmu_base + TRG_LEV3);
\r
1116 disable_irq(tmu_info->irq);
\r
1118 clk_disable(tmu_info->clk);
1123 static int s5p_tmu_resume(struct device *dev)
\r
1125 struct s5p_tmu *tz = dev_get_drvdata(dev);
\r
1127 clk_enable(tmu_info->clk);
1129 /* save tmu register value */
\r
1130 __raw_writel(tmu_info->reg_save[0], tz->tmu_base + TMU_CON0);
\r
1131 __raw_writel(tmu_info->reg_save[1], tz->tmu_base + SAMPLING_INTERNAL);
\r
1132 __raw_writel(tmu_info->reg_save[2], tz->tmu_base + CNT_VALUE0);
\r
1133 __raw_writel(tmu_info->reg_save[3], tz->tmu_base + CNT_VALUE1);
\r
1134 __raw_writel(tmu_info->reg_save[4], tz->tmu_base + THRESHOLD_TEMP);
\r
1135 __raw_writel(tmu_info->reg_save[5], tz->tmu_base + INTEN);
\r
1136 __raw_writel(tmu_info->reg_save[6], tz->tmu_base + TRG_LEV0);
\r
1137 __raw_writel(tmu_info->reg_save[7], tz->tmu_base + TRG_LEV1);
\r
1138 __raw_writel(tmu_info->reg_save[8], tz->tmu_base + TRG_LEV2);
\r
1139 __raw_writel(tmu_info->reg_save[9], tz->tmu_base + TRG_LEV3);
\r
1141 enable_irq(tmu_info->irq);
\r
1143 clk_disable(tmu_info->clk);
1148 static int s5p_tmu_freeze(struct device *dev)
\r
1150 disable_irq(tmu_info->irq);
\r
1154 static int s5p_tmu_restore(struct device *dev)
\r
1156 struct platform_device *pdev = to_platform_device(dev);
\r
1158 tmu_initialize(pdev);
\r
1160 enable_irq(tmu_info->irq);
\r
1165 static struct dev_pm_ops s5p_tmu_dev_pm_ops = {
\r
1166 .suspend = s5p_tmu_suspend,
\r
1167 .resume = s5p_tmu_resume,
\r
1168 .freeze = s5p_tmu_freeze,
\r
1169 .restore = s5p_tmu_restore,
\r
1172 #define TMU_DEV_PM_OPS (&s5p_tmu_dev_pm_ops)
\r
1174 #define TMU_DEV_PM_OPS NULL
\r
1175 #endif /* CONFIG_PM */
\r
1177 static struct platform_driver s5p_tmu_driver = {
\r
1178 .probe = s5p_tmu_probe,
\r
1179 .remove = s5p_tmu_remove,
\r
1181 .name = "s5p-tmu",
\r
1182 .owner = THIS_MODULE,
\r
1183 .pm = TMU_DEV_PM_OPS,
\r
1187 static int __init s5p_tmu_driver_init(void)
\r
1189 pr_info("init: %s\n", __func__);
\r
1190 return platform_driver_register(&s5p_tmu_driver);
\r
1193 arch_initcall(s5p_tmu_driver_init);
\r