1 /* linux/arch/arm/mach-exynos/tmu.c
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * EXYNOS4 - Thermal Management support
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/platform_device.h>
21 #include <linux/power_supply.h>
22 #include <linux/interrupt.h>
23 #include <linux/err.h>
25 #include <linux/irq.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/kobject.h>
32 #include <mach/regs-tmu.h>
33 #include <mach/cpufreq.h>
36 #include <plat/s5p-tmu.h>
37 #include <plat/map-s5p.h>
38 #include <plat/gpio-cfg.h>
42 #ifdef CONFIG_BUSFREQ_OPP
43 #include <mach/busfreq_exynos4.h>
48 ENABLE_TEMP_MON = 0x1,
49 ENABLE_TEST_MODE = 0x2,
50 } enable_mask = ENABLE_TEMP_MON | ENABLE_TEST_MODE;
51 module_param_named(enable_mask, enable_mask, uint, 0644);
52 #define ENABLE_DBGMASK (ENABLE_TEMP_MON | ENABLE_TEST_MODE)
54 /* for factory mode */
55 #define CONFIG_TMU_SYSFS
57 /* flags that throttling or trippint is treated */
58 #define THROTTLE_FLAG (0x1 << 0)
59 #define WARNING_FLAG (0x1 << 1)
60 #define TRIPPING_FLAG (0x1 << 2)
61 #define MEM_THROTTLE_FLAG (0x1 << 4)
63 #define TIMING_AREF_OFFSET 0x30
65 static struct workqueue_struct *tmu_monitor_wq;
67 static DEFINE_MUTEX(tmu_lock);
70 #if (defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412))
71 #if defined(CONFIG_VIDEO_MALI400MP)
72 extern int mali_voltage_lock_init(void);
73 extern int mali_voltage_lock_push(int lock_vol);
74 extern int mali_voltage_lock_pop(void);
76 #define CONFIG_TC_VOLTAGE /* Temperature compensated voltage */
79 static unsigned int get_curr_temp(struct s5p_tmu_info *info)
81 unsigned char curr_temp_code;
87 /* After reading temperature code from register, compensating
88 * its value and calculating celsius temperatue,
89 * get current temperatue.
92 __raw_readl(info->tmu_base + EXYNOS4_TMU_CURRENT_TEMP) & 0xff;
94 /* Check range of temprature code with curr_temp_code & efusing info */
95 pr_debug("CURRENT_TEMP = 0x%02x\n", curr_temp_code);
96 #if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
97 /* temperature code range are between min 10 and 125 */
98 if ((info->te1 - curr_temp_code) > 15
99 || (curr_temp_code - info->te1) > 100)
101 /* temperature code range are between min 25 and 125 */
102 if ((curr_temp_code - info->te1) < 0
103 || (curr_temp_code - info->te1) > 100)
105 pr_warning("temperature code is in inaccurate -->"
106 "check if vdd_18_ts is on\n"
107 "or surrounding temp is low.\n");
109 /* compensate and calculate current temperature */
110 temperature = curr_temp_code - info->te1 + TMU_DC_VALUE;
111 if (temperature < 0) {
112 /* if temperature lower than 0 degree, set 0 degree */
113 pr_info("current temp is %d celsius degree.\n"
114 "so, set to 0 celsius degree!\n", temperature);
117 return (unsigned int)temperature;
120 static ssize_t show_temperature(struct device *dev,
121 struct device_attribute *attr, char *buf)
123 struct s5p_tmu_info *info = dev_get_drvdata(dev);
124 unsigned int temperature;
129 mutex_lock(&tmu_lock);
131 temperature = get_curr_temp(info);
133 mutex_unlock(&tmu_lock);
135 return sprintf(buf, "%u\n", temperature);
138 static ssize_t show_tmu_state(struct device *dev,
139 struct device_attribute *attr, char *buf)
141 struct s5p_tmu_info *info = dev_get_drvdata(dev);
146 return sprintf(buf, "%d\n", info->tmu_state);
149 static ssize_t show_lot_id(struct device *dev,
150 struct device_attribute *attr, char *buf)
154 id1 = __raw_readl(S5P_VA_CHIPID + 0x14);
155 id2 = __raw_readl(S5P_VA_CHIPID + 0x18);
157 return sprintf(buf, "%08x-%08x\n", id1, id2);
159 static DEVICE_ATTR(temperature, 0444, show_temperature, NULL);
160 static DEVICE_ATTR(tmu_state, 0444, show_tmu_state, NULL);
161 static DEVICE_ATTR(lot_id, 0444, show_lot_id, NULL);
163 static void print_temperature_params(struct s5p_tmu_info *info)
165 struct s5p_platform_tmu *pdata = info->dev->platform_data;
167 pr_info("** temperature set value **\n"
168 "1st throttling stop_temp = %u, start_temp = %u\n"
169 "2nd throttling stop_temp = %u, start_tmep = %u\n"
170 "tripping temp = %u, s/w emergency temp = %u\n"
171 "mem throttling stop_temp = %u, start_temp = %u\n",
172 pdata->ts.stop_1st_throttle,
173 pdata->ts.start_1st_throttle,
174 pdata->ts.stop_2nd_throttle,
175 pdata->ts.start_2nd_throttle,
176 pdata->ts.start_tripping,
177 pdata->ts.start_emergency,
178 pdata->ts.stop_mem_throttle,
179 pdata->ts.start_mem_throttle);
180 #if defined(CONFIG_TC_VOLTAGE)
181 pr_info("tc_voltage stop_temp = %u, start_temp = %u\n",
182 pdata->ts.stop_tc, pdata->ts.start_tc);
186 unsigned int get_refresh_interval(unsigned int freq_ref,
187 unsigned int refresh_nsec)
189 unsigned int uRlk, refresh = 0;
192 * uRlk = FIN / 100000;
193 * refresh_usec = (unsigned int)(fMicrosec * 10);
194 * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1;
196 * (unsigned int)(freq_ref * (unsigned int)(refresh_usec * 10) / 100) - 1;
198 uRlk = freq_ref / 1000000;
199 refresh = ((unsigned int)(uRlk * refresh_nsec / 1000));
201 pr_info("@@@ get_refresh_interval = 0x%02x\n", refresh);
205 struct tmu_early_param {
207 struct temperature_params ts;
209 unsigned cpufreq_level_1st_throttle;
210 unsigned cpufreq_level_2nd_throttle;
212 unsigned int sampling_rate;
213 unsigned int monitor_rate;
215 static struct tmu_early_param tmu_in;
217 static int tmu_print_temp_on_off;
219 static int __init get_temperature_params(char *str)
223 unsigned int mask = (enable_mask & ENABLE_DBGMASK);
225 if (!(mask & ENABLE_TEST_MODE))
228 get_options(str, ARRAY_SIZE(ints), ints);
230 /* output the input value */
231 pr_info("tmu_test=%s\n", str);
236 tmu_in.ts.stop_1st_throttle = (unsigned int)ints[1];
238 tmu_in.ts.start_1st_throttle = (unsigned int)ints[2];
240 tmu_in.ts.stop_2nd_throttle = (unsigned int)ints[3];
242 tmu_in.ts.start_2nd_throttle = (unsigned int)ints[4];
244 tmu_in.ts.start_tripping = (unsigned int)ints[5];
246 tmu_in.ts.start_emergency = (unsigned int)ints[6];
248 tmu_in.ts.stop_mem_throttle = (unsigned int)ints[7];
250 tmu_in.ts.start_mem_throttle = (unsigned int)ints[8];
252 /* output the input value */
253 pr_info("-->1st throttling temp: start[%u], stop[%u]\n"
254 "-->2nd throttling temp: start[%u], stop[%u]\n"
255 "-->trpping temp[%u], emergency temp[%u]\n"
256 "-->mem throttling temp: start[%u], stop[%u]\n",
257 tmu_in.ts.start_1st_throttle, tmu_in.ts.stop_1st_throttle,
258 tmu_in.ts.start_2nd_throttle, tmu_in.ts.stop_2nd_throttle,
259 tmu_in.ts.start_tripping, tmu_in.ts.start_emergency,
260 tmu_in.ts.start_mem_throttle, tmu_in.ts.stop_mem_throttle);
261 #ifdef CONFIG_TC_VOLTAGE
263 tmu_in.ts.stop_tc = (unsigned int)ints[9];
265 tmu_in.ts.start_tc = (unsigned int)ints[10];
266 pr_info("-->temp compensate : start[%u], stop[%u]\n",
267 tmu_in.ts.start_tc, tmu_in.ts.stop_tc);
271 early_param("tmu_test", get_temperature_params);
273 static int __init get_cpufreq_limit_param(char *str)
276 unsigned int mask = (enable_mask & ENABLE_DBGMASK);
278 if (!(mask & ENABLE_TEST_MODE))
281 get_options(str, ARRAY_SIZE(ints), ints);
282 /* output the input value */
283 pr_info("cpu_level=%s\n", str);
288 tmu_in.cpufreq_level_1st_throttle = (unsigned int)ints[1];
290 tmu_in.cpufreq_level_2nd_throttle = (unsigned int)ints[2];
292 pr_info("--> cpufreq_limit: 1st cpu_level = %u, 2nd cpu_level = %u\n",
293 tmu_in.cpufreq_level_1st_throttle,
294 tmu_in.cpufreq_level_2nd_throttle);
298 early_param("cpu_level", get_cpufreq_limit_param);
300 static int __init get_sampling_rate_param(char *str)
303 unsigned int mask = (enable_mask & ENABLE_DBGMASK);
305 if (!(mask & ENABLE_TEST_MODE))
308 get_options(str, ARRAY_SIZE(ints), ints);
309 /* output the input value */
310 pr_info("tmu_sampling_rate=%s\n", str);
315 tmu_in.sampling_rate = (unsigned int)ints[1];
317 tmu_in.monitor_rate = (unsigned int)ints[2];
319 pr_info("--> sampling_rate = %u ms, monitor_rate = %u ms\n",
320 tmu_in.sampling_rate, tmu_in.monitor_rate);
324 early_param("tmu_sampling_rate", get_sampling_rate_param);
326 static void exynos4_poll_cur_temp(struct work_struct *work)
328 unsigned int cur_temp;
329 struct delayed_work *delayed_work = to_delayed_work(work);
330 struct s5p_tmu_info *info =
331 container_of(delayed_work, struct s5p_tmu_info, monitor);
332 unsigned int mask = (enable_mask & ENABLE_DBGMASK);
334 mutex_lock(&tmu_lock);
336 if (mask & ENABLE_TEMP_MON) {
337 cur_temp = get_curr_temp(info);
339 if (tmu_print_temp_on_off)
340 pr_info("curr temp in polling_interval = %u state = %d\n",
341 cur_temp, info->tmu_state);
343 pr_debug("curr temp in polling_interval = %u\n", cur_temp);
345 queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor,
346 info->monitor_period);
348 mutex_unlock(&tmu_lock);
351 static ssize_t tmu_show_print_state(struct device *dev,
352 struct device_attribute *attr, char *buf)
356 ret = sprintf(buf, "[TMU] tmu_print_temp_on_off=%d\n"
357 , tmu_print_temp_on_off);
362 static ssize_t tmu_store_print_state(struct device *dev,
363 struct device_attribute *attr, const char *buf, size_t count)
367 if (!strncmp(buf, "0", 1)) {
368 tmu_print_temp_on_off = 0;
370 } else if (!strncmp(buf, "1", 1)) {
371 tmu_print_temp_on_off = 1;
374 dev_err(dev, "Invalid cmd !!\n");
380 static DEVICE_ATTR(print_state, S_IRUGO | S_IWUSR,\
381 tmu_show_print_state, tmu_store_print_state);
383 void set_refresh_rate(unsigned int auto_refresh)
386 * uRlk = FIN / 100000;
387 * refresh_usec = (unsigned int)(fMicrosec * 10);
388 * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1;
390 pr_debug("set_auto_refresh = 0x%02x\n", auto_refresh);
392 #ifdef CONFIG_ARCH_EXYNOS4
393 #ifdef CONFIG_ARM_TRUSTZONE
394 exynos_smc(SMC_CMD_REG,
395 SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC0_4212 + TIMING_AREF_OFFSET)),
397 exynos_smc(SMC_CMD_REG,
398 SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC1_4212 + TIMING_AREF_OFFSET)),
401 /* change auto refresh period in TIMING_AREF register of dmc0 */
402 __raw_writel(auto_refresh, S5P_VA_DMC0 + TIMING_AREF_OFFSET);
404 /* change auto refresh period in TIMING_AREF regisger of dmc1 */
405 __raw_writel(auto_refresh, S5P_VA_DMC1 + TIMING_AREF_OFFSET);
407 #else /* CONFIG_ARCH_EXYNOS4 */
408 #ifdef CONFIG_ARM_TRUSTZONE
409 exynos_smc(SMC_CMD_REG,
410 SMC_REG_ID_SFR_W((EXYNOS5_PA_DMC + TIMING_AREF_OFFSET)),
413 /* change auto refresh period in TIMING_AREF register of dmc */
414 __raw_writel(auto_refresh, S5P_VA_DMC0 + TIMING_AREF_OFFSET);
416 #endif /* CONFIG_ARCH_EXYNOS4 */
419 static void set_temperature_params(struct s5p_tmu_info *info)
421 struct s5p_platform_tmu *data = info->dev->platform_data;
423 /* In the tmu_test mode, change temperature_params value
427 data->ts = tmu_in.ts;
428 if (tmu_in.set_lock) {
429 info->cpufreq_level_1st_throttle =
430 tmu_in.cpufreq_level_1st_throttle;
431 info->cpufreq_level_2nd_throttle =
432 tmu_in.cpufreq_level_2nd_throttle;
434 if (tmu_in.set_rate) {
435 info->sampling_rate =
436 usecs_to_jiffies(tmu_in.sampling_rate * 1000);
437 info->monitor_period =
438 usecs_to_jiffies(tmu_in.monitor_rate * 1000);
440 print_temperature_params(info);
443 static int notify_change_of_tmu_state(struct s5p_tmu_info *info)
449 snprintf(temp_buf, sizeof(temp_buf), "TMUSTATE=%d", info->tmu_state);
450 envp[env_offset++] = temp_buf;
451 envp[env_offset] = NULL;
453 pr_info("%s: uevent: %d, name = %s\n",
454 __func__, info->tmu_state, temp_buf);
456 return kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp);
459 static void exynos_interrupt_enable(struct s5p_tmu_info *info, int enable)
461 static unsigned int save;
464 save = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN);
467 __raw_writel(save, info->tmu_base + EXYNOS4_TMU_INTEN);
469 __raw_writel(0x0, info->tmu_base + EXYNOS4_TMU_INTEN);
472 #if defined(CONFIG_TC_VOLTAGE)
474 * exynos_tc_volt - locks or frees vdd_arm, vdd_mif/int and vdd_g3d for
475 * temperature compensation.
477 * This function limits or free voltage of cpufreq, busfreq, and mali driver
478 * according to 2nd arguments.
480 static int exynos_tc_volt(struct s5p_tmu_info *info, int enable)
482 struct s5p_platform_tmu *data;
489 data = info->dev->platform_data;
491 if (enable == usage) {
492 pr_debug("TMU: already is %s.\n",
493 enable ? "locked" : "unlocked");
498 ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc);
501 #ifdef CONFIG_BUSFREQ_OPP
502 ret = dev_lock(info->bus_dev, info->dev, info->busfreq_tc);
506 #if defined(CONFIG_VIDEO_MALI400MP)
507 ret = mali_voltage_lock_push(data->temp_compensate.g3d_volt);
509 pr_err("TMU: g3d_push error: %u uV\n",
510 data->temp_compensate.g3d_volt);
515 exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU);
516 #ifdef CONFIG_BUSFREQ_OPP
517 ret = dev_unlock(info->bus_dev, info->dev);
521 #if defined(CONFIG_VIDEO_MALI400MP)
522 ret = mali_voltage_lock_pop();
524 pr_err("TMU: g3d_pop error\n");
530 pr_info("TMU: %s is ok!\n", enable ? "lock" : "unlock");
535 pr_err("TMU: %s is fail.\n", enable ? "lock" : "unlock");
540 static void exynos4_handler_tmu_state(struct work_struct *work)
542 struct delayed_work *delayed_work = to_delayed_work(work);
543 struct s5p_tmu_info *info =
544 container_of(delayed_work, struct s5p_tmu_info, polling);
545 struct s5p_platform_tmu *data = info->dev->platform_data;
546 unsigned int cur_temp;
547 static int auto_refresh_changed;
548 static int check_handle;
551 mutex_lock(&tmu_lock);
553 cur_temp = get_curr_temp(info);
554 trend = cur_temp - info->last_temperature;
555 pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend);
557 switch (info->tmu_state) {
558 #if defined(CONFIG_TC_VOLTAGE)
560 /* lock has priority than unlock */
561 if (cur_temp <= data->ts.start_tc) {
562 if (exynos_tc_volt(info, 1) < 0)
563 pr_err("TMU: lock error!\n");
564 } else if (cur_temp >= data->ts.stop_tc) {
565 if (exynos_tc_volt(info, 0) < 0) {
566 pr_err("TMU: unlock error!\n");
568 info->tmu_state = TMU_STATUS_NORMAL;
569 pr_info("change state: tc -> normal.\n");
572 /* free if upper limit is locked */
574 exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
579 case TMU_STATUS_NORMAL:
580 /* 1. change state: 1st-throttling */
581 if (cur_temp >= data->ts.start_1st_throttle) {
582 info->tmu_state = TMU_STATUS_THROTTLED;
583 pr_info("change state: normal->throttle.\n");
584 #if defined(CONFIG_TC_VOLTAGE)
585 /* check whether temp compesation need or not */
586 } else if (cur_temp <= data->ts.start_tc) {
587 if (exynos_tc_volt(info, 1) < 0) {
588 pr_err("TMU: lock error!\n");
590 info->tmu_state = TMU_STATUS_TC;
591 pr_info("change state: normal->tc.\n");
594 /* 2. polling end and uevent */
595 } else if ((cur_temp <= data->ts.stop_1st_throttle)
596 && (cur_temp <= data->ts.stop_mem_throttle)) {
597 if (check_handle & THROTTLE_FLAG) {
598 exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
599 check_handle &= ~(THROTTLE_FLAG);
601 pr_debug("check_handle = %d\n", check_handle);
602 notify_change_of_tmu_state(info);
603 pr_info("normal: free cpufreq_limit & interrupt enable.\n");
605 /* clear to prevent from interfupt by peindig bit */
606 __raw_writel(INTCLEARALL,
607 info->tmu_base + EXYNOS4_TMU_INTCLEAR);
608 exynos_interrupt_enable(info, 1);
609 enable_irq(info->irq);
610 mutex_unlock(&tmu_lock);
615 case TMU_STATUS_THROTTLED:
616 /* 1. change state: 2nd-throttling or warning */
617 if (cur_temp >= data->ts.start_2nd_throttle) {
618 info->tmu_state = TMU_STATUS_WARNING;
619 pr_info("change state: 1st throttle->2nd throttle.\n");
620 #if defined(CONFIG_TC_VOLTAGE)
621 /* check whether temp compesation need or not */
622 } else if (cur_temp <= data->ts.start_tc) {
623 if (exynos_tc_volt(info, 1) < 0)
624 pr_err("TMU: lock error!\n");
626 info->tmu_state = TMU_STATUS_TC;
628 /* 2. cpufreq limitation and uevent */
629 } else if ((cur_temp >= data->ts.start_1st_throttle) &&
630 !(check_handle & THROTTLE_FLAG)) {
631 if (check_handle & WARNING_FLAG) {
632 exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
633 check_handle &= ~(WARNING_FLAG);
635 exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
636 info->cpufreq_level_1st_throttle);
637 check_handle |= THROTTLE_FLAG;
638 pr_debug("check_handle = %d\n", check_handle);
639 notify_change_of_tmu_state(info);
640 pr_info("throttling: set cpufreq upper limit.\n");
641 /* 3. change state: normal */
642 } else if ((cur_temp <= data->ts.stop_1st_throttle)
644 info->tmu_state = TMU_STATUS_NORMAL;
645 pr_info("change state: 1st throttle->normal.\n");
649 case TMU_STATUS_WARNING:
650 /* 1. change state: tripping */
651 if (cur_temp >= data->ts.start_tripping) {
652 info->tmu_state = TMU_STATUS_TRIPPED;
653 pr_info("change state: 2nd throttle->trip\n");
654 #if defined(CONFIG_TC_VOLTAGE)
655 /* check whether temp compesation need or not */
656 } else if (cur_temp <= data->ts.start_tc) {
657 if (exynos_tc_volt(info, 1) < 0)
658 pr_err("TMU: lock error!\n");
660 info->tmu_state = TMU_STATUS_TC;
662 /* 2. cpufreq limitation and uevent */
663 } else if ((cur_temp >= data->ts.start_2nd_throttle) &&
664 !(check_handle & WARNING_FLAG)) {
665 if (check_handle & THROTTLE_FLAG) {
666 exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
667 check_handle &= ~(THROTTLE_FLAG);
669 exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
670 info->cpufreq_level_2nd_throttle);
672 check_handle |= WARNING_FLAG;
673 pr_debug("check_handle = %d\n", check_handle);
674 notify_change_of_tmu_state(info);
675 pr_info("2nd throttle: cpufreq is limited.\n");
676 /* 3. change state: 1st-throttling */
677 } else if ((cur_temp <= data->ts.stop_2nd_throttle)
679 info->tmu_state = TMU_STATUS_THROTTLED;
680 pr_info("change state: 2nd throttle->1st throttle, "
681 "and release cpufreq upper limit.\n");
685 case TMU_STATUS_TRIPPED:
686 /* 1. call uevent to shut-down */
687 if ((cur_temp >= data->ts.start_tripping) &&
688 (trend > 0) && !(check_handle & TRIPPING_FLAG)) {
689 notify_change_of_tmu_state(info);
690 pr_info("tripping: on waiting shutdown.\n");
691 check_handle |= TRIPPING_FLAG;
692 pr_debug("check_handle = %d\n", check_handle);
693 #if defined(CONFIG_TC_VOLTAGE)
694 /* check whether temp compesation need or not */
695 } else if (cur_temp <= data->ts.start_tc) {
696 if (exynos_tc_volt(info, 1) < 0)
697 pr_err("TMU: lock error!\n");
699 info->tmu_state = TMU_STATUS_TC;
701 /* 2. change state: 2nd-throttling or warning */
702 } else if ((cur_temp <= data->ts.stop_2nd_throttle)
704 info->tmu_state = TMU_STATUS_WARNING;
705 pr_info("change state: trip->2nd throttle, "
706 "Check! occured only test mode.\n");
708 /* 3. chip protection: kernel panic as SW workaround */
709 if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
710 panic("Emergency!!!! tripping is not treated!\n");
711 /* clear to prevent from interfupt by peindig bit */
712 __raw_writel(INTCLEARALL,
713 info->tmu_state + EXYNOS4_TMU_INTCLEAR);
714 enable_irq(info->irq);
715 mutex_unlock(&tmu_lock);
720 case TMU_STATUS_INIT:
721 /* sned tmu initial status to platform */
722 disable_irq(info->irq);
723 if (cur_temp >= data->ts.start_tripping)
724 info->tmu_state = TMU_STATUS_TRIPPED;
725 #if defined(CONFIG_TC_VOLTAGE)
726 /* check whether temp compesation need or not */
727 else if (cur_temp <= data->ts.start_tc) {
728 if (exynos_tc_volt(info, 1) < 0)
729 pr_err("TMU: lock error!\n");
731 info->tmu_state = TMU_STATUS_TC;
734 else if (cur_temp >= data->ts.start_2nd_throttle)
735 info->tmu_state = TMU_STATUS_WARNING;
736 else if (cur_temp >= data->ts.start_1st_throttle)
737 info->tmu_state = TMU_STATUS_THROTTLED;
738 else if (cur_temp <= data->ts.stop_1st_throttle)
739 info->tmu_state = TMU_STATUS_NORMAL;
741 notify_change_of_tmu_state(info);
742 pr_info("%s: inform to init state to platform.\n", __func__);
746 pr_warn("Bug: checked tmu_state.\n");
747 if (cur_temp >= data->ts.start_tripping)
748 info->tmu_state = TMU_STATUS_TRIPPED;
749 #if defined(CONFIG_TC_VOLTAGE)
750 /* check whether temp compesation need or not */
751 else if (cur_temp <= data->ts.start_tc) {
752 if (exynos_tc_volt(info, 1) < 0)
753 pr_err("TMU: lock error!\n");
755 info->tmu_state = TMU_STATUS_TC;
759 info->tmu_state = TMU_STATUS_WARNING;
763 /* memory throttling */
764 if (cur_temp >= data->ts.start_mem_throttle) {
765 if (!(auto_refresh_changed) && (trend > 0)) {
766 pr_info("set auto_refresh 1.95us\n");
767 set_refresh_rate(info->auto_refresh_tq0);
768 auto_refresh_changed = 1;
770 } else if (cur_temp <= (data->ts.stop_mem_throttle)) {
771 if ((auto_refresh_changed) && (trend < 0)) {
772 pr_info("set auto_refresh 3.9us\n");
773 set_refresh_rate(info->auto_refresh_normal);
774 auto_refresh_changed = 0;
778 info->last_temperature = cur_temp;
780 /* reschedule the next work */
781 queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
782 info->sampling_rate);
784 mutex_unlock(&tmu_lock);
789 static int exynos4210_tmu_init(struct s5p_tmu_info *info)
791 struct s5p_platform_tmu *data = info->dev->platform_data;
793 unsigned int temp_code_threshold;
794 unsigned int temp_code_throttle, temp_code_warning, temp_code_trip;
796 /* To compensate temperature sensor
797 * get trim informatoin and save to struct tmu_info
799 tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_TRIMINFO);
800 info->te1 = tmp & TMU_TRIMINFO_MASK;
801 info->te2 = ((tmp >> 8) & TMU_TRIMINFO_MASK);
803 /* check boundary the triminfo */
804 if ((EFUSE_MIN_VALUE > info->te1)
805 || (info->te1 > EFUSE_MAX_VALUE) || (info->te2 != 0))
806 info->te1 = EFUSE_AVG_VALUE;
808 pr_info("%s: triminfo = 0x%08x, low 8bit = 0x%02x, high 24 bit = 0x%06x\n",
809 __func__, tmp, info->te1, info->te2);
811 /* Need to initial regsiter setting after getting parameter info */
812 /* [28:23] vref [11:8] slope - Tunning parameter */
813 __raw_writel(VREF_SLOPE, info->tmu_base + EXYNOS4_TMU_CONTROL);
815 /* Convert celsius temperature value to temperature code value
816 * such as threshold_level, 1st throttle, 2nd throttle,
817 * tripping temperature.
819 temp_code_threshold = data->ts.stop_1st_throttle
820 + info->te1 - TMU_DC_VALUE;
821 temp_code_throttle = data->ts.start_1st_throttle
822 - data->ts.stop_1st_throttle;
823 temp_code_warning = data->ts.start_2nd_throttle
824 - data->ts.stop_1st_throttle;
825 temp_code_trip = data->ts.start_tripping
826 - data->ts.stop_1st_throttle;
828 /* Set interrupt trigger level */
829 __raw_writel(temp_code_threshold, info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
830 __raw_writel(temp_code_throttle, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
831 __raw_writel(temp_code_warning, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
832 __raw_writel(temp_code_trip, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
833 __raw_writel(TRIGGER_LEV_MAX, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
835 pr_info("THD_TEMP:0x%02x: TRIG_LEV0: 0x%02x\n"
836 "TRIG_LEV1: 0x%02x TRIG_LEV2: 0x%02x, TRIG_LEV3: 0x%02x\n",
837 __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP),
838 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0),
839 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1),
840 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2),
841 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3));
845 /* Need to initial regsiter setting after getting parameter info */
846 /* [28:23] vref [11:8] slope - Tunning parameter */
847 __raw_writel(VREF_SLOPE, info->tmu_base + EXYNOS4_TMU_CONTROL);
848 /* TMU core enable */
849 tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL);
850 tmp |= TMUCORE_ENABLE;
851 __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_CONTROL);
853 /* check interrupt status register */
854 pr_debug("tmu interrupt status: 0x%02x\n",
855 __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT));
857 /* LEV0 LEV1 LEV2 interrupt enable */
858 __raw_writel(INTEN0 | INTEN1 | INTEN2, info->tmu_base + EXYNOS4_TMU_INTEN);
862 static int exynos4x12_tmu_init(struct s5p_tmu_info *info)
864 struct s5p_platform_tmu *data = info->dev->platform_data;
866 unsigned char temp_code_throttle, temp_code_warning, temp_code_trip;
868 /* To compensate temperature sensor,
869 * set triminfo control register & get trim informatoin
870 * and save to struct tmu_info
872 tmp = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRIMINFO_CONROL);
874 __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRIMINFO_CONROL);
878 tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_TRIMINFO);
879 info->te1 = tmp & TMU_TRIMINFO_MASK;
881 /* In case of non e-fusing chip, s/w workaround */
885 pr_debug("%s: triminfo reg = 0x%08x, value = %u\n", __func__,
888 /* Convert celsius temperature value to temperature code value
889 * such as 1st throttle, 2nd throttle, tripping temperature.
890 * its ranges are between 25 cesius(0x32) to 125 cesius4(0x96)
892 temp_code_throttle = data->ts.start_1st_throttle
893 + info->te1 - TMU_DC_VALUE;
894 temp_code_warning = data->ts.start_2nd_throttle
895 + info->te1 - TMU_DC_VALUE;
896 temp_code_trip = data->ts.start_tripping
897 + info->te1 - TMU_DC_VALUE;
899 pr_debug("temp_code_throttle: %u, temp_code_warning: %u\n"
900 "temp_code_trip: %u, info->te1 = %u\n",
901 temp_code_throttle, temp_code_warning,
902 temp_code_trip, info->te1);
904 /* Set interrupt trigger level */
905 tmp = ((0xFF << 24) | (temp_code_trip << 16) |
906 (temp_code_warning << 8) | (temp_code_throttle << 0));
907 __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
909 pr_debug("THD_TEMP_RISE: 0x%08x\n",
910 __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE));
912 #if defined(CONFIG_TC_VOLTAGE)
913 /* Get set temperature for tc_voltage and set falling interrupt
916 tmp = (data->ts.start_tc + info->te1 - TMU_DC_VALUE) << 0;
917 __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
918 pr_debug("THD_TEMP_FALL: 0x%08x\n",
919 __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL));
922 /* TMU core enable */
923 tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL);
924 tmp |= (TMUCORE_ENABLE | (0x6 << 20)); /* MUX_ADDR : 110b */
925 __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_CONTROL);
927 /* Because temperature sensing time is appro 940us,
928 * tmu is enabled and 1st valid sample can get 1ms after.
931 /* check interrupt status register */
932 pr_debug("tmu interrupt status: 0x%08x\n",
933 __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT));
935 /* THRESHOLD_TEMP_RISE0, RISE1, RISE2 interrupt enable */
936 __raw_writel(INTEN_RISE0 | INTEN_RISE1 | INTEN_RISE2,
937 info->tmu_base + EXYNOS4_TMU_INTEN);
939 #if defined(CONFIG_TC_VOLTAGE)
940 tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN);
942 __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_INTEN);
948 static int tmu_initialize(struct platform_device *pdev)
950 struct s5p_tmu_info *info = platform_get_drvdata(pdev);
954 /* check if sensing is idle */
955 tmp = (__raw_readl(info->tmu_base + EXYNOS4_TMU_STATUS) & 0x1);
957 pr_err("failed to start tmu driver\n");
961 if (soc_is_exynos4210())
962 ret = exynos4210_tmu_init(info);
964 ret = exynos4x12_tmu_init(info);
969 static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id)
971 struct s5p_tmu_info *info = id;
974 disable_irq_nosync(irq);
976 status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF;
977 pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status);
979 /* To handle multiple interrupt pending,
980 * interrupt by high temperature are serviced with priority.
982 #if defined(CONFIG_TC_VOLTAGE)
983 if (status & INTSTAT_FALL0) {
984 info->tmu_state = TMU_STATUS_TC;
986 __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
987 exynos_interrupt_enable(info, 0);
988 } else if (status & INTSTAT_RISE2) {
989 info->tmu_state = TMU_STATUS_TRIPPED;
990 __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
992 if (status & INTSTAT_RISE2) {
993 info->tmu_state = TMU_STATUS_TRIPPED;
994 __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
996 } else if (status & INTSTAT_RISE1) {
997 info->tmu_state = TMU_STATUS_WARNING;
998 __raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
999 } else if (status & INTSTAT_RISE0) {
1000 info->tmu_state = TMU_STATUS_THROTTLED;
1001 __raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1003 pr_err("%s: interrupt error\n", __func__);
1004 __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1005 queue_delayed_work_on(0, tmu_monitor_wq,
1006 &info->polling, info->sampling_rate / 2);
1010 /* read current temperature & save */
1011 info->last_temperature = get_curr_temp(info);
1013 queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
1014 info->sampling_rate);
1019 static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id)
1021 struct s5p_tmu_info *info = id;
1022 unsigned int status;
1024 disable_irq_nosync(irq);
1026 status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT);
1027 pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status);
1029 /* To handle multiple interrupt pending,
1030 * interrupt by high temperature are serviced with priority.
1032 if (status & TMU_INTSTAT2) {
1033 info->tmu_state = TMU_STATUS_TRIPPED;
1034 __raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1035 } else if (status & TMU_INTSTAT1) {
1036 info->tmu_state = TMU_STATUS_WARNING;
1037 __raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1038 } else if (status & TMU_INTSTAT0) {
1039 info->tmu_state = TMU_STATUS_THROTTLED;
1040 __raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1042 pr_err("%s: interrupt error\n", __func__);
1043 __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
1044 queue_delayed_work_on(0, tmu_monitor_wq,
1045 &info->polling, info->sampling_rate / 2);
1049 /* read current temperature & save */
1050 info->last_temperature = get_curr_temp(info);
1052 queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
1053 info->sampling_rate);
1058 #ifdef CONFIG_TMU_SYSFS
1059 static ssize_t s5p_tmu_show_curr_temp(struct device *dev,
1060 struct device_attribute *attr, char *buf)
1062 struct s5p_tmu_info *info = dev_get_drvdata(dev);
1063 unsigned int curr_temp;
1065 curr_temp = get_curr_temp(info);
1067 pr_info("curr temp = %d\n", curr_temp);
1069 return sprintf(buf, "%d\n", curr_temp);
1071 static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL);
1074 static int __devinit s5p_tmu_probe(struct platform_device *pdev)
1076 struct s5p_tmu_info *info;
1077 struct s5p_platform_tmu *pdata;
1078 struct resource *res;
1079 unsigned int mask = (enable_mask & ENABLE_DBGMASK);
1082 pr_debug("%s: probe=%p\n", __func__, pdev);
1084 info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL);
1086 dev_err(&pdev->dev, "failed to alloc memory!\n");
1090 platform_set_drvdata(pdev, info);
1092 info->dev = &pdev->dev;
1093 info->tmu_state = TMU_STATUS_INIT;
1095 /* set cpufreq limit level at 1st_throttle & 2nd throttle */
1096 pdata = info->dev->platform_data;
1097 if (pdata->cpufreq.limit_1st_throttle)
1098 exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle,
1099 &info->cpufreq_level_1st_throttle);
1101 if (pdata->cpufreq.limit_2nd_throttle)
1102 exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle,
1103 &info->cpufreq_level_2nd_throttle);
1105 pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n",
1106 __func__, info->cpufreq_level_1st_throttle,
1107 info->cpufreq_level_2nd_throttle);
1109 #if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */
1110 if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt,
1111 &info->cpulevel_tc) < 0) {
1112 dev_err(&pdev->dev, "cpufreq_get_level error\n");
1116 #ifdef CONFIG_BUSFREQ_OPP
1117 /* To lock bus frequency in OPP mode */
1118 info->bus_dev = dev_get("exynos-busfreq");
1119 if (info->bus_dev < 0) {
1120 dev_err(&pdev->dev, "Failed to get_dev\n");
1124 if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt,
1125 &info->busfreq_tc)) {
1126 dev_err(&pdev->dev, "get_busfreq_value error\n");
1131 pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n",
1132 __func__, info->cpulevel_tc, info->busfreq_tc);
1134 /* Map auto_refresh_rate of normal & tq0 mode */
1135 info->auto_refresh_tq0 =
1136 get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0);
1137 info->auto_refresh_normal =
1138 get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL);
1140 /* To poll current temp, set sampling rate to ONE second sampling */
1141 info->sampling_rate = usecs_to_jiffies(1000 * 1000);
1142 /* 10sec monitroing */
1143 info->monitor_period = usecs_to_jiffies(10000 * 1000);
1145 /* support test mode */
1146 if (mask & ENABLE_TEST_MODE)
1147 set_temperature_params(info);
1149 print_temperature_params(info);
1151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1153 dev_err(&pdev->dev, "failed to get memory region resource\n");
1158 info->ioarea = request_mem_region(res->start,
1159 res->end-res->start + 1, pdev->name);
1160 if (!(info->ioarea)) {
1161 dev_err(&pdev->dev, "failed to reserve memory region\n");
1166 info->tmu_base = ioremap(res->start, (res->end - res->start) + 1);
1167 if (!(info->tmu_base)) {
1168 dev_err(&pdev->dev, "failed ioremap()\n");
1172 tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev));
1173 if (!tmu_monitor_wq) {
1174 pr_info("Creation of tmu_monitor_wq failed\n");
1179 /* To support periodic temprature monitoring */
1180 if (mask & ENABLE_TEMP_MON) {
1181 INIT_DELAYED_WORK_DEFERRABLE(&info->monitor,
1182 exynos4_poll_cur_temp);
1183 queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor,
1184 info->monitor_period);
1186 INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state);
1188 info->irq = platform_get_irq(pdev, 0);
1189 if (info->irq < 0) {
1190 dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq);
1195 if (soc_is_exynos4210())
1196 ret = request_irq(info->irq, exynos4210_tmu_irq_handler,
1197 IRQF_DISABLED, "s5p-tmu interrupt", info);
1199 ret = request_irq(info->irq, exynos4x12_tmu_irq_handler,
1200 IRQF_DISABLED, "s5p-tmu interrupt", info);
1203 dev_err(&pdev->dev, "request_irq is failed. %d\n", ret);
1207 ret = device_create_file(&pdev->dev, &dev_attr_temperature);
1209 pr_err("Failed to create temperatue file: %d\n", ret);
1210 goto err_sysfs_file1;
1213 ret = device_create_file(&pdev->dev, &dev_attr_tmu_state);
1215 pr_err("Failed to create tmu_state file: %d\n", ret);
1216 goto err_sysfs_file2;
1218 ret = device_create_file(&pdev->dev, &dev_attr_lot_id);
1220 pr_err("Failed to create lot id file: %d\n", ret);
1221 goto err_sysfs_file3;
1224 ret = tmu_initialize(pdev);
1228 #ifdef CONFIG_TMU_SYSFS
1229 ret = device_create_file(&pdev->dev, &dev_attr_curr_temp);
1231 dev_err(&pdev->dev, "Failed to create sysfs group\n");
1236 #ifdef CONFIG_TMU_DEBUG
1237 ret = device_create_file(&pdev->dev, &dev_attr_print_state);
1239 dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n");
1244 #if defined(CONFIG_TC_VOLTAGE)
1245 /* s/w workaround for fast service when interrupt is not occured,
1246 * such as current temp is lower than tc interrupt temperature
1247 * or current temp is continuosly increased.
1249 if (get_curr_temp(info) <= pdata->ts.start_tc) {
1250 if (exynos_tc_volt(info, 1) < 0)
1251 pr_err("TMU: lock error!\n");
1253 #if defined(CONFIG_VIDEO_MALI400MP)
1254 if (mali_voltage_lock_init())
1255 pr_err("Failed to initialize mail voltage lock.\n");
1259 /* initialize tmu_state */
1260 queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
1261 info->sampling_rate);
1266 device_remove_file(&pdev->dev, &dev_attr_lot_id);
1269 device_remove_file(&pdev->dev, &dev_attr_tmu_state);
1272 device_remove_file(&pdev->dev, &dev_attr_temperature);
1276 free_irq(info->irq, info);
1279 destroy_workqueue(tmu_monitor_wq);
1282 iounmap(info->tmu_base);
1285 release_resource(info->ioarea);
1286 kfree(info->ioarea);
1293 dev_err(&pdev->dev, "initialization failed.\n");
1298 static int __devinit s5p_tmu_remove(struct platform_device *pdev)
1300 struct s5p_tmu_info *info = platform_get_drvdata(pdev);
1302 cancel_delayed_work(&info->polling);
1303 destroy_workqueue(tmu_monitor_wq);
1305 device_remove_file(&pdev->dev, &dev_attr_temperature);
1306 device_remove_file(&pdev->dev, &dev_attr_tmu_state);
1309 free_irq(info->irq, info);
1311 iounmap(info->tmu_base);
1313 release_resource(info->ioarea);
1314 kfree(info->ioarea);
1319 pr_info("%s is removed\n", dev_name(&pdev->dev));
1324 static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state)
1326 struct s5p_tmu_info *info = platform_get_drvdata(pdev);
1331 /* save register value */
1332 info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL);
1333 info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
1334 info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
1335 info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);
1336 info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN);
1338 if (soc_is_exynos4210()) {
1340 __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
1342 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
1344 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
1346 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
1348 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
1351 __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
1352 #if defined(CONFIG_TC_VOLTAGE)
1353 info->reg_save[6] = __raw_readl(info->tmu_base
1354 + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
1357 disable_irq(info->irq);
1362 static int s5p_tmu_resume(struct platform_device *pdev)
1364 struct s5p_tmu_info *info = platform_get_drvdata(pdev);
1365 struct s5p_platform_tmu *data;
1370 data = info->dev->platform_data;
1372 /* restore tmu register value */
1373 __raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL);
1374 __raw_writel(info->reg_save[1],
1375 info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
1376 __raw_writel(info->reg_save[2],
1377 info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
1378 __raw_writel(info->reg_save[3],
1379 info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);
1381 if (soc_is_exynos4210()) {
1382 __raw_writel(info->reg_save[5],
1383 info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
1384 __raw_writel(info->reg_save[6],
1385 info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
1386 __raw_writel(info->reg_save[7],
1387 info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
1388 __raw_writel(info->reg_save[8],
1389 info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
1390 __raw_writel(info->reg_save[9],
1391 info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
1393 __raw_writel(info->reg_save[5],
1394 info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
1395 #if defined(CONFIG_TC_VOLTAGE)
1396 __raw_writel(info->reg_save[6],
1397 info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
1400 __raw_writel(info->reg_save[4],
1401 info->tmu_base + EXYNOS4_TMU_INTEN);
1403 #if defined(CONFIG_TC_VOLTAGE)
1404 /* s/w workaround for fast service when interrupt is not occured,
1405 * such as current temp is lower than tc interrupt temperature
1406 * or current temp is continuosly increased..
1409 if (get_curr_temp(info) <= data->ts.start_tc) {
1410 if (exynos_tc_volt(info, 1) < 0)
1411 pr_err("TMU: lock error!\n");
1414 /* Find out tmu_state after wakeup */
1415 queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0);
1420 #define s5p_tmu_suspend NULL
1421 #define s5p_tmu_resume NULL
1424 static struct platform_driver s5p_tmu_driver = {
1425 .probe = s5p_tmu_probe,
1426 .remove = s5p_tmu_remove,
1427 .suspend = s5p_tmu_suspend,
1428 .resume = s5p_tmu_resume,
1431 .owner = THIS_MODULE,
1435 static int __init s5p_tmu_driver_init(void)
1437 return platform_driver_register(&s5p_tmu_driver);
1440 static void __exit s5p_tmu_driver_exit(void)
1442 platform_driver_unregister(&s5p_tmu_driver);
1444 late_initcall(s5p_tmu_driver_init);
1445 module_exit(s5p_tmu_driver_exit);