@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/opp.h>
+#include <linux/delay.h>
#include <plat/omap-pm.h>
#include <plat/omap_device.h>
@@ -23,6 +24,8 @@
#include <plat/clockdomain.h>
#include <plat/voltage.h>
+#include "cm-regbits-34xx.h"
+#include "prm.h"
#include "pm.h"
static struct omap_device_pm_latency *pm_lats;
@@ -32,6 +35,8 @@ static struct device *iva_dev;
static struct device *l3_dev;
static struct device *dsp_dev;
+static struct clk *dpll1_clk, *dpll2_clk, *dpll3_clk;
+
struct device *omap2_get_mpuss_device(void)
{
WARN_ON_ONCE(!mpu_dev);
@@ -57,6 +62,26 @@ struct device *omap4_get_dsp_device(void)
}
EXPORT_SYMBOL(omap4_get_dsp_device);
+static unsigned long compute_lpj(unsigned long ref, u_int div, u_int mult)
+{
+ unsigned long new_jiffy_l, new_jiffy_h;
+
+ /*
+ * Recalculate loops_per_jiffy. We do it this way to
+ * avoid math overflow on 32-bit machines. Maybe we
+ * should make this architecture dependent? If you have
+ * a better way of doing this, please replace!
+ *
+ * new = old * mult / div
+ */
+ new_jiffy_h = ref / div;
+ new_jiffy_l = (ref % div) / 100;
+ new_jiffy_h *= mult;
+ new_jiffy_l = new_jiffy_l * mult / div;
+
+ return new_jiffy_h + new_jiffy_l * 100;
+}
+
/* static int _init_omap_device(struct omap_hwmod *oh, void *user) */
static int _init_omap_device(char *name, struct device **new_dev)
{
@@ -78,6 +103,74 @@ static int _init_omap_device(char *name, struct device **new_dev)
return 0;
}
+static unsigned long omap3_mpu_get_rate(struct device *dev)
+{
+ return dpll1_clk->rate;
+}
+
+static int omap3_mpu_set_rate(struct device *dev, unsigned long rate)
+{
+ unsigned long cur_rate = omap3_mpu_get_rate(dev);
+ int ret;
+
+#ifdef CONFIG_CPU_FREQ
+ struct cpufreq_freqs freqs_notify;
+
+ freqs_notify.old = cur_rate / 1000;
+ freqs_notify.new = rate / 1000;
+ freqs_notify.cpu = 0;
+ /* Send pre notification to CPUFreq */
+ cpufreq_notify_transition(&freqs_notify, CPUFREQ_PRECHANGE);
+#endif
+ ret = clk_set_rate(dpll1_clk, rate);
+ if (ret) {
+ dev_warn(dev, "%s: Unable to set rate to %ld\n",
+ __func__, rate);
+ return ret;
+ }
+
+#ifdef CONFIG_CPU_FREQ
+ /* Send a post notification to CPUFreq */
+ cpufreq_notify_transition(&freqs_notify, CPUFREQ_POSTCHANGE);
+#endif
+
+#ifndef CONFIG_CPU_FREQ
+ /*Update loops_per_jiffy if processor speed is being changed*/
+ loops_per_jiffy = compute_lpj(loops_per_jiffy,
+ cur_rate / 1000, rate / 1000);
+#endif
+ return 0;
+}
+
+static int omap3_iva_set_rate(struct device *dev, unsigned long rate)
+{
+ return clk_set_rate(dpll2_clk, rate);
+}
+
+static unsigned long omap3_iva_get_rate(struct device *dev)
+{
+ return dpll2_clk->rate;
+}
+
+static int omap3_l3_set_rate(struct device *dev, unsigned long rate)
+{
+ int l3_div;
+
+ l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) &
+ OMAP3430_CLKSEL_L3_MASK;
+
+ return clk_set_rate(dpll3_clk, rate * l3_div);
+}
+
+static unsigned long omap3_l3_get_rate(struct device *dev)
+{
+ int l3_div;
+
+ l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) &
+ OMAP3430_CLKSEL_L3_MASK;
+ return dpll3_clk->rate / l3_div;
+}
+
/*
* Build omap_devices for processors and bus.
*/
@@ -91,6 +184,23 @@ static void omap2_init_processor_devices(void)
} else {
_init_omap_device("l3_main", &l3_dev);
}
+
+ if (cpu_is_omap34xx()) {
+ dpll1_clk = clk_get(NULL, "dpll1_ck");
+ dpll2_clk = clk_get(NULL, "dpll2_ck");
+ dpll3_clk = clk_get(NULL, "dpll3_m2_ck");
+
+ if (mpu_dev)
+ omap_device_populate_rate_fns(mpu_dev,
+ omap3_mpu_set_rate, omap3_mpu_get_rate);
+ if (iva_dev)
+ omap_device_populate_rate_fns(iva_dev,
+ omap3_iva_set_rate, omap3_iva_get_rate);
+ if (l3_dev)
+ omap_device_populate_rate_fns(l3_dev,
+ omap3_l3_set_rate, omap3_l3_get_rate);
+
+ }
}
/*