-
-
Save Ansuel/adc2cf6c81e85ddcfa474e17f307697f to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */ | |
/* | |
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License version 2 and | |
* only version 2 as published by the Free Software Foundation. | |
* | |
* This program is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
* GNU General Public License for more details. | |
*/ | |
#include <linux/kernel.h> | |
#include <linux/module.h> | |
#include <linux/platform_device.h> | |
#include <mach/rpm-regulator.h> | |
#include <mach/msm_bus_board.h> | |
#include <mach/msm_bus.h> | |
#include "mach/socinfo.h" | |
#include "acpuclock.h" | |
#include "acpuclock-krait.h" | |
static struct hfpll_data hfpll_data __initdata = { | |
.mode_offset = 0x00, | |
.l_offset = 0x08, | |
.m_offset = 0x0C, | |
.n_offset = 0x10, | |
.config_offset = 0x04, | |
.status_offset = 0x1C, | |
.config_val = 0x7845C665, | |
.has_droop_ctl = true, | |
.droop_offset = 0x14, | |
.droop_val = 0x0108C000, | |
.low_vdd_l_max = 22, | |
.nom_vdd_l_max = 42, | |
.vdd[HFPLL_VDD_NONE] = 0, | |
.vdd[HFPLL_VDD_LOW] = 1100000, | |
.vdd[HFPLL_VDD_NOM] = 1100000, | |
.vdd[HFPLL_VDD_HIGH] = 1150000, | |
}; | |
static struct scalable scalable[] __initdata = { | |
[CPU0] = { | |
.hfpll_phys_base = 0x00903200, | |
.aux_clk_sel_phys = 0x02088014, | |
.aux_clk_sel = 3, | |
.sec_clk_sel = 2, | |
.l2cpmr_iaddr = 0x4501, | |
.vreg[VREG_CORE] = { "krait0", 1275000 }, | |
.vreg[VREG_MEM] = { "krait0_mem", 1150000 }, | |
.vreg[VREG_DIG] = { "krait0_dig", 1150000 }, | |
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1100000 }, | |
.avs_enabled = false, | |
.vddmx_scale_en = true, | |
}, | |
[CPU1] = { | |
.hfpll_phys_base = 0x00903240, | |
.aux_clk_sel_phys = 0x02098014, | |
.aux_clk_sel = 3, | |
.sec_clk_sel = 2, | |
.l2cpmr_iaddr = 0x5501, | |
.vreg[VREG_CORE] = { "krait1", 1275000 }, | |
.vreg[VREG_MEM] = { "krait1_mem", 1150000 }, | |
.vreg[VREG_DIG] = { "krait1_dig", 1150000 }, | |
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1100000 }, | |
.avs_enabled = false, | |
.vddmx_scale_en = true, | |
}, | |
[L2] = { | |
.hfpll_phys_base = 0x00903300, | |
.aux_clk_sel_phys = 0x02011028, | |
.aux_clk_sel = 3, | |
.sec_clk_sel = 2, | |
.l2cpmr_iaddr = 0x0500, | |
.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1100000 }, | |
.avs_enabled = false, | |
.vddmx_scale_en = true, | |
}, | |
}; | |
/* | |
* Apps Fabric/DDR BW levels | |
*/ | |
static struct msm_bus_paths bw_level_tbl[] __initdata = { | |
[0] = BW_MBPS(640), /* At least 80 MHz on bus. */ | |
[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */ | |
[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */ | |
[3] = BW_MBPS(2128), /* At least 266 MHz on bus. */ | |
[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */ | |
[5] = BW_MBPS(4264), /* At least 533 MHz on bus. */ | |
}; | |
static struct msm_bus_scale_pdata bus_scale_data __initdata = { | |
.usecase = bw_level_tbl, | |
.num_usecases = ARRAY_SIZE(bw_level_tbl), | |
.active_only = 1, | |
.name = "acpuclk-ipq806x", | |
}; | |
/* | |
* PVS tables for Akronite based platforms | |
* | |
* These are based on PVS Characterization results for the 19x19 Package across Voltage and Temperature on the PVS Fused parts | |
* These are based on +/-5% Margin on the VDD_APCx that is advertised in our Datasheet across Temperature | |
*/ | |
static struct l2_level l2_freq_tbl[] __initdata = { | |
[0] = { { 384000, PLL_8, 0, 0x00 }, 1100000, 1100000, 4 }, | |
[1] = { { 1000000, HFPLL, 1, 0x28 }, 1100000, 1100000, 4 }, | |
[2] = { { 1200000, HFPLL, 1, 0x30 }, 1150000, 1150000, 5 }, | |
{ } | |
}; | |
static struct acpu_level tbl_slow[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 1000000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 1050000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1100000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1150000 }, | |
{ 1, { 1200000, HFPLL, 1, 0x30 }, L2(2), 1200000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(2), 1250000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_nominal[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 975000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1025000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1075000 }, | |
{ 1, { 1200000, HFPLL, 1, 0x30 }, L2(2), 1125000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(2), 1175000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_fast[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 925000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 995000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1025000 }, | |
{ 1, { 1200000, HFPLL, 1, 0x30 }, L2(2), 1075000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(2), 1125000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_superfast[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 800000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 850000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 900000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 950000 }, | |
{ 1, { 1200000, HFPLL, 1, 0x30 }, L2(2), 1000000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(2), 1050000 }, | |
{ 0, { 0 } } | |
}; | |
static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = { | |
[0][PVS_SLOW] = {tbl_slow, sizeof(tbl_slow), 0}, | |
[0][PVS_NOMINAL] = {tbl_nominal, sizeof(tbl_nominal), 0}, | |
[0][PVS_FAST] = {tbl_fast, sizeof(tbl_fast), 0}, | |
[0][PVS_FASTER] = {tbl_superfast, sizeof(tbl_superfast), 0}, | |
}; | |
/* | |
* PVS tables for Akronite-lite based platforms | |
*/ | |
static struct acpu_level tbl_slow_lite[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000}, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 1000000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1050000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1100000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_nominal_lite[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 950000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1000000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1050000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_fast_lite[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 850000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 900000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 950000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1000000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_superfast_lite[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 800000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 850000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 900000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 950000 }, | |
{ 0, { 0 } } | |
}; | |
static struct pvs_table pvs_tables_lite[NUM_SPEED_BINS][NUM_PVS] __initdata = { | |
[0][PVS_SLOW] = {tbl_slow_lite, sizeof(tbl_slow_lite), 0 }, | |
[0][PVS_NOMINAL] = {tbl_nominal_lite, sizeof(tbl_nominal_lite), 0}, | |
[0][PVS_FAST] = {tbl_fast_lite, sizeof(tbl_fast_lite), 0}, | |
[0][PVS_FASTER] = {tbl_superfast_lite, sizeof(tbl_superfast_lite), 0}, | |
}; | |
static struct acpu_level tbl_pvs0_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 975000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 1000000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1050000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1100000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1175000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1262500 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs1_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 975000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1025000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1075000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1150000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1225000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs2_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 950000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 1000000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1050000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1125000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1200000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs3_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 925000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 975000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1025000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1100000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1175000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs4_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 900000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 950000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 1000000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1075000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1150000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs5_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 825000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 850000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 900000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 950000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 1025000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1100000 }, | |
{ 0, { 0 } } | |
}; | |
static struct acpu_level tbl_pvs6_tn_3[] __initdata = { | |
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 775000 }, | |
{ 1, { 600000, HFPLL, 1, 0x18 }, L2(1), 800000 }, | |
{ 1, { 800000, HFPLL, 1, 0x20 }, L2(1), 850000 }, | |
{ 1, { 1000000, HFPLL, 1, 0x28 }, L2(1), 900000 }, | |
{ 1, { 1400000, HFPLL, 1, 0x38 }, L2(1), 975000 }, | |
{ 1, { 1725000, HFPLL, 1, 0x45 }, L2(2), 1050000 }, | |
{ 0, { 0 } } | |
}; | |
static struct pvs_table pvs_tables_tn_3[NUM_SPEED_BINS][NUM_PVS] __initdata = { | |
[0][PVS_0] = {tbl_pvs0_tn_3, sizeof(tbl_pvs0_tn_3), 0}, | |
[0][PVS_1] = {tbl_pvs1_tn_3, sizeof(tbl_pvs1_tn_3), 0}, | |
[0][PVS_2] = {tbl_pvs2_tn_3, sizeof(tbl_pvs2_tn_3), 0}, | |
[0][PVS_3] = {tbl_pvs3_tn_3, sizeof(tbl_pvs3_tn_3), 0}, | |
[0][PVS_4] = {tbl_pvs4_tn_3, sizeof(tbl_pvs4_tn_3), 0}, | |
[0][PVS_5] = {tbl_pvs5_tn_3, sizeof(tbl_pvs5_tn_3), 0}, | |
[0][PVS_6] = {tbl_pvs6_tn_3, sizeof(tbl_pvs6_tn_3), 0}, | |
}; | |
static struct acpuclk_krait_params acpuclk_ipq806x_params __initdata = { | |
.scalable = scalable, | |
.scalable_size = sizeof(scalable), | |
.hfpll_data = &hfpll_data, | |
.pvs_tables = pvs_tables, | |
.l2_freq_tbl = l2_freq_tbl, | |
.l2_freq_tbl_size = sizeof(l2_freq_tbl), | |
.bus_scale = &bus_scale_data, | |
.pte_efuse_phys = 0x007000C0, | |
.stby_khz = 384000, | |
}; | |
static int __init acpuclk_ipq806x_probe(struct platform_device *pdev) | |
{ | |
/* Fix up the pvs tables for Akronite-Lite based platforms */ | |
if (cpu_is_ipq8062() || cpu_is_ipq8066()) { | |
acpuclk_ipq806x_params.pvs_tables = pvs_tables_lite; | |
} else if (cpu_is_ipq8065() || cpu_is_ipq8069()) { | |
acpuclk_ipq806x_params.pvs_tables = pvs_tables_tn_3; | |
} | |
return acpuclk_krait_init(&pdev->dev, &acpuclk_ipq806x_params); | |
} | |
static struct platform_driver acpuclk_ipq806x_driver = { | |
.driver = { | |
.name = "acpuclk-ipq806x", | |
.owner = THIS_MODULE, | |
}, | |
}; | |
static int __init acpuclk_ipq806x_init(void) | |
{ | |
return platform_driver_probe(&acpuclk_ipq806x_driver, | |
acpuclk_ipq806x_probe); | |
} | |
device_initcall(acpuclk_ipq806x_init); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* * Copyright (c) 2012 The Linux Foundation. All rights reserved.* */ | |
/* | |
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | |
* | |
* This program is free software; you can redistribute it and/or modify | |
* it under the terms of the GNU General Public License version 2 and | |
* only version 2 as published by the Free Software Foundation. | |
* | |
* This program is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
* GNU General Public License for more details. | |
*/ | |
#include <linux/kernel.h> | |
#include <linux/module.h> | |
#include <linux/init.h> | |
#include <linux/io.h> | |
#include <linux/delay.h> | |
#include <linux/mutex.h> | |
#include <linux/err.h> | |
#include <linux/errno.h> | |
#include <linux/cpufreq.h> | |
#include <linux/cpu.h> | |
#include <linux/regulator/consumer.h> | |
#include <asm/mach-types.h> | |
#include <asm/cpu.h> | |
#include <mach/board.h> | |
#include <mach/msm_iomap.h> | |
#include <mach/socinfo.h> | |
#include <mach/msm-krait-l2-accessors.h> | |
#include <mach/rpm-regulator.h> | |
#include <mach/rpm-regulator-smd.h> | |
#include <mach/msm_bus.h> | |
#include <mach/msm_dcvs.h> | |
#include "acpuclock.h" | |
#include "acpuclock-krait.h" | |
#include "avs.h" | |
/* MUX source selects. */ | |
#define PRI_SRC_SEL_SEC_SRC 0 | |
#define PRI_SRC_SEL_HFPLL 1 | |
#define PRI_SRC_SEL_HFPLL_DIV2 2 | |
#define SECCLKAGD BIT(4) | |
static DEFINE_MUTEX(driver_lock); | |
static DEFINE_SPINLOCK(l2_lock); | |
static struct drv_data { | |
struct acpu_level *acpu_freq_tbl; | |
const struct l2_level *l2_freq_tbl; | |
struct scalable *scalable; | |
struct hfpll_data *hfpll_data; | |
u32 bus_perf_client; | |
struct msm_bus_scale_pdata *bus_scale; | |
int boost_uv; | |
struct device *dev; | |
} drv; | |
static unsigned long acpuclk_krait_get_rate(int cpu) | |
{ | |
if (!drv.scalable[cpu].cur_speed) { | |
printk(KERN_ERR "%s: invalid drv data - cur_speed is NULL\n", | |
__FUNCTION__); | |
return 0; | |
} | |
return drv.scalable[cpu].cur_speed->khz; | |
} | |
/* Select a source on the primary MUX. */ | |
static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) | |
{ | |
u32 regval; | |
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | |
regval &= ~0x3; | |
regval |= (pri_src_sel & 0x3); | |
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | |
/* Wait for switch to complete. */ | |
mb(); | |
udelay(1); | |
} | |
/* Select a source on the secondary MUX. */ | |
static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel) | |
{ | |
u32 regval; | |
/* 8064 Errata: disable sec_src clock gating during switch. */ | |
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | |
regval |= SECCLKAGD; | |
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | |
/* Program the MUX */ | |
regval &= ~(0x3 << 2); | |
regval |= ((sec_src_sel & 0x3) << 2); | |
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | |
/* 8064 Errata: re-enabled sec_src clock gating. */ | |
regval &= ~SECCLKAGD; | |
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | |
/* Wait for switch to complete. */ | |
mb(); | |
udelay(1); | |
} | |
static int enable_rpm_vreg(struct vreg *vreg) | |
{ | |
int ret = 0; | |
if (vreg->rpm_reg) { | |
ret = rpm_regulator_enable(vreg->rpm_reg); | |
if (ret) | |
dev_err(drv.dev, "%s regulator enable failed (%d)\n", | |
vreg->name, ret); | |
} | |
return ret; | |
} | |
static void disable_rpm_vreg(struct vreg *vreg) | |
{ | |
int rc; | |
if (vreg->rpm_reg) { | |
rc = rpm_regulator_disable(vreg->rpm_reg); | |
if (rc) | |
dev_err(drv.dev, "%s regulator disable failed (%d)\n", | |
vreg->name, rc); | |
} | |
} | |
/* Enable an already-configured HFPLL. */ | |
static void hfpll_enable(struct scalable *sc, bool skip_regulators) | |
{ | |
volatile uint32_t value; | |
uint32_t wait_cycles = 100; | |
if (!skip_regulators) { | |
/* Enable regulators required by the HFPLL. */ | |
enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); | |
enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); | |
} | |
/* Disable PLL bypass mode. */ | |
writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset); | |
/* | |
* H/W requires a 5us delay between disabling the bypass and | |
* de-asserting the reset. Delay 10us just to be safe. | |
*/ | |
mb(); | |
udelay(10); | |
/* De-assert active-low PLL reset. */ | |
writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset); | |
/* Wait for PLL to lock. */ | |
mb(); | |
udelay(60); | |
/* Enable PLL output. */ | |
writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset); | |
/* Confirm that PLL is Locked - HW team's recommendation */ | |
do { | |
value = readl_relaxed(sc->hfpll_base + drv.hfpll_data->status_offset); | |
if (value != 0) { | |
break; | |
} | |
mdelay(1); | |
} while (wait_cycles-- > 0); | |
} | |
/* Disable a HFPLL for power-savings or while it's being reprogrammed. */ | |
static void hfpll_disable(struct scalable *sc, bool skip_regulators) | |
{ | |
/* | |
* Disable the PLL output, disable test mode, enable the bypass mode, | |
* and assert the reset. | |
*/ | |
writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset); | |
if (!skip_regulators) { | |
/* Remove voltage votes required by the HFPLL. */ | |
disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); | |
disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); | |
} | |
} | |
/* Program the HFPLL rate. Assumes HFPLL is already disabled. */ | |
static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s) | |
{ | |
void __iomem *base = sc->hfpll_base; | |
u32 regval; | |
writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset); | |
if (drv.hfpll_data->has_user_reg) { | |
regval = readl_relaxed(base + drv.hfpll_data->user_offset); | |
if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max) | |
regval &= ~drv.hfpll_data->user_vco_mask; | |
else | |
regval |= drv.hfpll_data->user_vco_mask; | |
writel_relaxed(regval, base + drv.hfpll_data->user_offset); | |
} | |
} | |
/* Return the L2 speed that should be applied. */ | |
static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l) | |
{ | |
unsigned int new_l = 0; | |
int cpu; | |
/* Find max L2 speed vote. */ | |
sc->l2_vote = vote_l; | |
for_each_present_cpu(cpu) | |
new_l = max(new_l, drv.scalable[cpu].l2_vote); | |
return new_l; | |
} | |
/* Update the bus bandwidth request. */ | |
static void set_bus_bw(unsigned int bw) | |
{ | |
int ret; | |
/* Update bandwidth if request has changed. This may sleep. */ | |
ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw); | |
if (ret) | |
dev_err(drv.dev, "bandwidth request failed (%d)\n", ret); | |
} | |
/* Set the CPU or L2 clock speed. */ | |
static void set_speed(struct scalable *sc, const struct core_speed *tgt_s, | |
bool skip_regulators) | |
{ | |
const struct core_speed *strt_s = sc->cur_speed; | |
if (strt_s == tgt_s) | |
return; | |
if (strt_s->src == HFPLL && tgt_s->src == HFPLL) { | |
/* | |
* Move to an always-on source running at a frequency | |
* that does not require an elevated CPU voltage. | |
*/ | |
set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); | |
/* Re-program HFPLL. */ | |
hfpll_disable(sc, true); | |
hfpll_set_rate(sc, tgt_s); | |
hfpll_enable(sc, true); | |
/* Move to HFPLL. */ | |
set_pri_clk_src(sc, tgt_s->pri_src_sel); | |
} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) { | |
set_pri_clk_src(sc, tgt_s->pri_src_sel); | |
hfpll_disable(sc, skip_regulators); | |
} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) { | |
hfpll_set_rate(sc, tgt_s); | |
hfpll_enable(sc, skip_regulators); | |
set_pri_clk_src(sc, tgt_s->pri_src_sel); | |
} | |
sc->cur_speed = tgt_s; | |
} | |
struct vdd_data { | |
int vdd_mem; | |
int vdd_dig; | |
int vdd_core; | |
int ua_core; | |
}; | |
/* Apply any per-cpu voltage increases. */ | |
static int increase_vdd(int cpu, struct vdd_data *data, | |
enum setrate_reason reason) | |
{ | |
struct scalable *sc = &drv.scalable[cpu]; | |
int rc; | |
if (sc->vddmx_scale_en) { | |
/* | |
* Increase vdd_mem active-set before vdd_dig. | |
* vdd_mem should be >= vdd_dig. | |
*/ | |
if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) { | |
rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, | |
data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); | |
if (rc) { | |
dev_err(drv.dev, | |
"vdd_mem (cpu%d) increase failed (%d)\n", | |
cpu, rc); | |
return rc; | |
} | |
sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; | |
} | |
/* Increase vdd_dig active-set vote. */ | |
if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) { | |
rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, | |
data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); | |
if (rc) { | |
dev_err(drv.dev, | |
"vdd_dig (cpu%d) increase failed (%d)\n", | |
cpu, rc); | |
return rc; | |
} | |
sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; | |
} | |
/* Increase current request. */ | |
if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) { | |
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | |
data->ua_core); | |
if (rc < 0) { | |
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | |
sc->vreg[VREG_CORE].name, rc); | |
return rc; | |
} | |
sc->vreg[VREG_CORE].cur_ua = data->ua_core; | |
} | |
} | |
/* | |
* Update per-CPU core voltage. Don't do this for the hotplug path for | |
* which it should already be correct. Attempting to set it is bad | |
* because we don't know what CPU we are running on at this point, but | |
* the CPU regulator API requires we call it from the affected CPU. | |
*/ | |
if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd | |
&& reason != SETRATE_HOTPLUG) { | |
rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, | |
data->vdd_core, sc->vreg[VREG_CORE].max_vdd); | |
if (rc) { | |
dev_err(drv.dev, | |
"vdd_core (cpu%d) increase failed (%d)\n", | |
cpu, rc); | |
return rc; | |
} | |
/* Voltage settling Time */ | |
udelay(10); | |
sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; | |
} | |
return 0; | |
} | |
/* Apply any per-cpu voltage decreases. */ | |
static void decrease_vdd(int cpu, struct vdd_data *data, | |
enum setrate_reason reason) | |
{ | |
struct scalable *sc = &drv.scalable[cpu]; | |
int ret; | |
/* | |
* Update per-CPU core voltage. This must be called on the CPU | |
* that's being affected. Don't do this in the hotplug remove path, | |
* where the rail is off and we're executing on the other CPU. | |
*/ | |
if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd | |
&& reason != SETRATE_HOTPLUG) { | |
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, | |
data->vdd_core, sc->vreg[VREG_CORE].max_vdd); | |
if (ret) { | |
dev_err(drv.dev, | |
"vdd_core (cpu%d) decrease failed (%d)\n", | |
cpu, ret); | |
return; | |
} | |
/* Voltage settling Time */ | |
udelay(10); | |
sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; | |
} | |
if (!sc->vddmx_scale_en) { | |
return; | |
} | |
/* Decrease current request. */ | |
if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) { | |
ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | |
data->ua_core); | |
if (ret < 0) { | |
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | |
sc->vreg[VREG_CORE].name, ret); | |
return; | |
} | |
sc->vreg[VREG_CORE].cur_ua = data->ua_core; | |
} | |
/* Decrease vdd_dig active-set vote. */ | |
if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) { | |
ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, | |
data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); | |
if (ret) { | |
dev_err(drv.dev, | |
"vdd_dig (cpu%d) decrease failed (%d)\n", | |
cpu, ret); | |
return; | |
} | |
sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; | |
} | |
/* | |
* Decrease vdd_mem active-set after vdd_dig. | |
* vdd_mem should be >= vdd_dig. | |
*/ | |
if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) { | |
ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, | |
data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); | |
if (ret) { | |
dev_err(drv.dev, | |
"vdd_mem (cpu%d) decrease failed (%d)\n", | |
cpu, ret); | |
return; | |
} | |
sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; | |
} | |
} | |
static int calculate_vdd_mem(const struct acpu_level *tgt) | |
{ | |
return drv.l2_freq_tbl[tgt->l2_level].vdd_mem; | |
} | |
static int get_src_dig(const struct core_speed *s) | |
{ | |
const int *hfpll_vdd = drv.hfpll_data->vdd; | |
const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max; | |
const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max; | |
if (s->src != HFPLL) | |
return hfpll_vdd[HFPLL_VDD_NONE]; | |
else if (s->pll_l_val > nom_vdd_l_max) | |
return hfpll_vdd[HFPLL_VDD_HIGH]; | |
else if (s->pll_l_val > low_vdd_l_max) | |
return hfpll_vdd[HFPLL_VDD_NOM]; | |
else | |
return hfpll_vdd[HFPLL_VDD_LOW]; | |
} | |
static int calculate_vdd_dig(const struct acpu_level *tgt) | |
{ | |
int l2_pll_vdd_dig, cpu_pll_vdd_dig; | |
l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed); | |
cpu_pll_vdd_dig = get_src_dig(&tgt->speed); | |
return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig, | |
max(l2_pll_vdd_dig, cpu_pll_vdd_dig)); | |
} | |
static bool enable_boost = true; | |
module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR); | |
static int calculate_vdd_core(const struct acpu_level *tgt) | |
{ | |
return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0); | |
} | |
static DEFINE_MUTEX(l2_regulator_lock); | |
static int l2_vreg_count; | |
static int enable_l2_regulators(void) | |
{ | |
int ret = 0; | |
mutex_lock(&l2_regulator_lock); | |
if (l2_vreg_count == 0) { | |
ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | |
if (ret) | |
goto out; | |
ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); | |
if (ret) { | |
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | |
goto out; | |
} | |
} | |
l2_vreg_count++; | |
out: | |
mutex_unlock(&l2_regulator_lock); | |
return ret; | |
} | |
static void disable_l2_regulators(void) | |
{ | |
mutex_lock(&l2_regulator_lock); | |
if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!")) | |
goto out; | |
if (l2_vreg_count == 1) { | |
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); | |
disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | |
} | |
l2_vreg_count--; | |
out: | |
mutex_unlock(&l2_regulator_lock); | |
} | |
/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */ | |
static int acpuclk_krait_set_rate(int cpu, unsigned long rate, | |
enum setrate_reason reason) | |
{ | |
const struct core_speed *strt_acpu_s, *tgt_acpu_s; | |
const struct acpu_level *tgt; | |
int tgt_l2_l; | |
enum src_id prev_l2_src = NUM_SRC_ID; | |
struct vdd_data vdd_data; | |
bool skip_regulators; | |
int rc = 0; | |
if (cpu > num_possible_cpus()) | |
return -EINVAL; | |
if (!drv.scalable[cpu].cur_speed) { | |
printk(KERN_ERR "%s: invalid drv data - cur_speed is NULL\n", | |
__FUNCTION__); | |
return 0; | |
} | |
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) | |
mutex_lock(&driver_lock); | |
strt_acpu_s = drv.scalable[cpu].cur_speed; | |
/* Return early if rate didn't change. */ | |
if (rate == strt_acpu_s->khz) | |
goto out; | |
/* Find target frequency. */ | |
for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) { | |
if (tgt->speed.khz == rate) { | |
tgt_acpu_s = &tgt->speed; | |
break; | |
} | |
} | |
if (tgt->speed.khz == 0) { | |
rc = -EINVAL; | |
goto out; | |
} | |
memset(&vdd_data, 0, sizeof(struct vdd_data)); | |
/* Calculate voltage requirements for the current CPU. */ | |
if (drv.scalable[cpu].vddmx_scale_en) { | |
vdd_data.vdd_mem = calculate_vdd_mem(tgt); | |
vdd_data.vdd_dig = calculate_vdd_dig(tgt); | |
vdd_data.ua_core = tgt->ua_core; | |
} | |
vdd_data.vdd_core = calculate_vdd_core(tgt); | |
/* Disable AVS before voltage switch */ | |
if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) { | |
AVS_DISABLE(cpu); | |
drv.scalable[cpu].avs_enabled = false; | |
} | |
/* Increase VDD levels if needed. */ | |
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) { | |
rc = increase_vdd(cpu, &vdd_data, reason); | |
if (rc) | |
goto out; | |
prev_l2_src = | |
drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src; | |
/* Vote for the L2 regulators here if necessary. */ | |
if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) { | |
rc = enable_l2_regulators(); | |
if (rc) | |
goto out; | |
} | |
} | |
dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n", | |
cpu, strt_acpu_s->khz, tgt_acpu_s->khz); | |
/* | |
* If we are setting the rate as part of power collapse or in the resume | |
* path after power collapse, skip the vote for the HFPLL regulators, | |
* which are active-set-only votes that will be removed when apps enters | |
* its sleep set. This is needed to avoid voting for regulators with | |
* sleeping APIs from an atomic context. | |
*/ | |
skip_regulators = (reason == SETRATE_PC); | |
/* Set the new CPU speed. */ | |
set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators); | |
/* | |
* Update the L2 vote and apply the rate change. A spinlock is | |
* necessary to ensure L2 rate is calculated and set atomically | |
* with the CPU frequency, even if acpuclk_krait_set_rate() is | |
* called from an atomic context and the driver_lock mutex is not | |
* acquired. | |
*/ | |
spin_lock(&l2_lock); | |
tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level); | |
set_speed(&drv.scalable[L2], | |
&drv.l2_freq_tbl[tgt_l2_l].speed, true); | |
spin_unlock(&l2_lock); | |
/* Nothing else to do for power collapse or SWFI. */ | |
if (reason == SETRATE_PC || reason == SETRATE_SWFI) | |
goto out; | |
/* | |
* Remove the vote for the L2 HFPLL regulators only if the L2 | |
* was already on an HFPLL source. | |
*/ | |
if (prev_l2_src == HFPLL) | |
disable_l2_regulators(); | |
/* Update bus bandwith request. */ | |
set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level); | |
/* Drop VDD levels if we can. */ | |
decrease_vdd(cpu, &vdd_data, reason); | |
/* Re-enable AVS */ | |
if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) { | |
AVS_ENABLE(cpu, tgt->avsdscr_setting); | |
drv.scalable[cpu].avs_enabled = true; | |
} | |
dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu); | |
out: | |
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) | |
mutex_unlock(&driver_lock); | |
return rc; | |
} | |
static struct acpuclk_data acpuclk_krait_data = { | |
.set_rate = acpuclk_krait_set_rate, | |
.get_rate = acpuclk_krait_get_rate, | |
}; | |
/* Initialize a HFPLL at a given rate and enable it. */ | |
static void __cpuinit hfpll_init(struct scalable *sc, | |
const struct core_speed *tgt_s) | |
{ | |
dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable); | |
/* Disable the PLL for re-programming. */ | |
hfpll_disable(sc, true); | |
/* Configure PLL parameters for integer mode. */ | |
writel_relaxed(drv.hfpll_data->config_val, | |
sc->hfpll_base + drv.hfpll_data->config_offset); | |
writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset); | |
writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset); | |
if (drv.hfpll_data->has_user_reg) | |
writel_relaxed(drv.hfpll_data->user_val, | |
sc->hfpll_base + drv.hfpll_data->user_offset); | |
/* Program droop controller, if supported */ | |
if (drv.hfpll_data->has_droop_ctl) | |
writel_relaxed(drv.hfpll_data->droop_val, | |
sc->hfpll_base + drv.hfpll_data->droop_offset); | |
/* Set an initial rate and enable the PLL. */ | |
hfpll_set_rate(sc, tgt_s); | |
hfpll_enable(sc, false); | |
} | |
static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg, | |
int vdd, bool enable) | |
{ | |
int ret; | |
if (!sc->vreg[vreg].name) | |
return 0; | |
sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev, | |
sc->vreg[vreg].name); | |
if (IS_ERR(sc->vreg[vreg].rpm_reg)) { | |
ret = PTR_ERR(sc->vreg[vreg].rpm_reg); | |
dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n", | |
sc->vreg[vreg].name, ret); | |
goto err_get; | |
} | |
ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd, | |
sc->vreg[vreg].max_vdd); | |
if (ret) { | |
dev_err(drv.dev, "%s initialization failed (%d)\n", | |
sc->vreg[vreg].name, ret); | |
goto err_conf; | |
} | |
sc->vreg[vreg].cur_vdd = vdd; | |
if (enable) { | |
ret = enable_rpm_vreg(&sc->vreg[vreg]); | |
if (ret) | |
goto err_conf; | |
} | |
return 0; | |
err_conf: | |
rpm_regulator_put(sc->vreg[vreg].rpm_reg); | |
err_get: | |
return ret; | |
} | |
static void __cpuinit rpm_regulator_cleanup(struct scalable *sc, | |
enum vregs vreg) | |
{ | |
if (!sc->vreg[vreg].rpm_reg) | |
return; | |
disable_rpm_vreg(&sc->vreg[vreg]); | |
rpm_regulator_put(sc->vreg[vreg].rpm_reg); | |
} | |
/* Voltage regulator initialization. */ | |
static int __cpuinit regulator_init(struct scalable *sc, | |
const struct acpu_level *acpu_level) | |
{ | |
int ret, vdd_core; | |
int vdd_mem, vdd_dig; | |
if (sc->vddmx_scale_en) { | |
vdd_mem = calculate_vdd_mem(acpu_level); | |
ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true); | |
if (ret) | |
goto err_mem; | |
vdd_dig = calculate_vdd_dig(acpu_level); | |
ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true); | |
if (ret) | |
goto err_dig; | |
} | |
ret = rpm_regulator_init(sc, VREG_HFPLL_A, | |
sc->vreg[VREG_HFPLL_A].max_vdd, false); | |
if (ret) | |
goto err_hfpll_a; | |
ret = rpm_regulator_init(sc, VREG_HFPLL_B, | |
sc->vreg[VREG_HFPLL_B].max_vdd, false); | |
if (ret) | |
goto err_hfpll_b; | |
/* Setup Krait CPU regulators and initial core voltage. */ | |
sc->vreg[VREG_CORE].reg = regulator_get(drv.dev, | |
sc->vreg[VREG_CORE].name); | |
if (IS_ERR(sc->vreg[VREG_CORE].reg)) { | |
ret = PTR_ERR(sc->vreg[VREG_CORE].reg); | |
dev_err(drv.dev, "regulator_get(%s) failed (%d)\n", | |
sc->vreg[VREG_CORE].name, ret); | |
goto err_core_get; | |
} | |
if (sc->vddmx_scale_en) { | |
ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | |
acpu_level->ua_core); | |
if (ret < 0) { | |
dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | |
sc->vreg[VREG_CORE].name, ret); | |
goto err_core_conf; | |
} | |
sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core; | |
} | |
vdd_core = calculate_vdd_core(acpu_level); | |
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core, | |
sc->vreg[VREG_CORE].max_vdd); | |
if (ret) { | |
dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n", | |
sc->vreg[VREG_CORE].name, ret); | |
goto err_core_conf; | |
} | |
sc->vreg[VREG_CORE].cur_vdd = vdd_core; | |
ret = regulator_enable(sc->vreg[VREG_CORE].reg); | |
if (ret) { | |
dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n", | |
sc->vreg[VREG_CORE].name, ret); | |
goto err_core_conf; | |
} | |
/* | |
* Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency | |
* requires a corresponding target L2 frequency that needs the L2 to | |
* run off of an HFPLL. | |
*/ | |
if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL) | |
l2_vreg_count++; | |
return 0; | |
err_core_conf: | |
regulator_put(sc->vreg[VREG_CORE].reg); | |
err_core_get: | |
rpm_regulator_cleanup(sc, VREG_HFPLL_B); | |
err_hfpll_b: | |
rpm_regulator_cleanup(sc, VREG_HFPLL_A); | |
err_hfpll_a: | |
if (sc->vddmx_scale_en) { | |
rpm_regulator_cleanup(sc, VREG_DIG); | |
} | |
err_dig: | |
if (sc->vddmx_scale_en) { | |
rpm_regulator_cleanup(sc, VREG_MEM); | |
} | |
err_mem: | |
return ret; | |
} | |
static void __cpuinit regulator_cleanup(struct scalable *sc) | |
{ | |
regulator_disable(sc->vreg[VREG_CORE].reg); | |
regulator_put(sc->vreg[VREG_CORE].reg); | |
rpm_regulator_cleanup(sc, VREG_HFPLL_B); | |
rpm_regulator_cleanup(sc, VREG_HFPLL_A); | |
if (sc->vddmx_scale_en) { | |
rpm_regulator_cleanup(sc, VREG_DIG); | |
rpm_regulator_cleanup(sc, VREG_MEM); | |
} | |
} | |
/* Set initial rate for a given core. */ | |
static int __cpuinit init_clock_sources(struct scalable *sc, | |
const struct core_speed *tgt_s) | |
{ | |
u32 regval; | |
void __iomem *aux_reg; | |
/* Program AUX source input to the secondary MUX. */ | |
if (sc->aux_clk_sel_phys) { | |
aux_reg = ioremap(sc->aux_clk_sel_phys, 4); | |
if (!aux_reg) | |
return -ENOMEM; | |
writel_relaxed(sc->aux_clk_sel, aux_reg); | |
iounmap(aux_reg); | |
} | |
/* Switch away from the HFPLL while it's re-initialized. */ | |
set_sec_clk_src(sc, sc->sec_clk_sel); | |
set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); | |
hfpll_init(sc, tgt_s); | |
/* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */ | |
regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | |
regval &= ~(0x3 << 6); | |
set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | |
/* Switch to the target clock source. */ | |
set_pri_clk_src(sc, tgt_s->pri_src_sel); | |
sc->cur_speed = tgt_s; | |
return 0; | |
} | |
static void __cpuinit fill_cur_core_speed(struct core_speed *s, | |
struct scalable *sc) | |
{ | |
s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3; | |
s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset); | |
} | |
static bool __cpuinit speed_equal(const struct core_speed *s1, | |
const struct core_speed *s2) | |
{ | |
return (s1->pri_src_sel == s2->pri_src_sel && | |
s1->pll_l_val == s2->pll_l_val); | |
} | |
static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu) | |
{ | |
struct scalable *sc = &drv.scalable[cpu]; | |
const struct acpu_level *l; | |
struct core_speed cur_speed; | |
fill_cur_core_speed(&cur_speed, sc); | |
for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) | |
if (speed_equal(&l->speed, &cur_speed)) | |
return l; | |
return NULL; | |
} | |
static const struct l2_level __init *find_cur_l2_level(void) | |
{ | |
struct scalable *sc = &drv.scalable[L2]; | |
const struct l2_level *l; | |
struct core_speed cur_speed; | |
fill_cur_core_speed(&cur_speed, sc); | |
for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++) | |
if (speed_equal(&l->speed, &cur_speed)) | |
return l; | |
return NULL; | |
} | |
static const struct acpu_level __cpuinit *find_min_acpu_level(void) | |
{ | |
struct acpu_level *l; | |
for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) | |
if (l->use_for_scaling) | |
return l; | |
return NULL; | |
} | |
static int __cpuinit per_cpu_init(int cpu) | |
{ | |
struct scalable *sc = &drv.scalable[cpu]; | |
const struct acpu_level *acpu_level; | |
int ret; | |
sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32); | |
if (!sc->hfpll_base) { | |
ret = -ENOMEM; | |
goto err_ioremap; | |
} | |
acpu_level = find_cur_acpu_level(cpu); | |
if (!acpu_level) { | |
acpu_level = find_min_acpu_level(); | |
if (!acpu_level) { | |
ret = -ENODEV; | |
goto err_table; | |
} | |
dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n", | |
cpu, acpu_level->speed.khz); | |
} else { | |
dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu, | |
acpu_level->speed.khz); | |
} | |
ret = regulator_init(sc, acpu_level); | |
if (ret) | |
goto err_regulators; | |
ret = init_clock_sources(sc, &acpu_level->speed); | |
if (ret) | |
goto err_clocks; | |
sc->l2_vote = acpu_level->l2_level; | |
sc->initialized = true; | |
return 0; | |
err_clocks: | |
regulator_cleanup(sc); | |
err_regulators: | |
err_table: | |
iounmap(sc->hfpll_base); | |
err_ioremap: | |
return ret; | |
} | |
/* Register with bus driver. */ | |
static void __init bus_init(const struct l2_level *l2_level) | |
{ | |
int ret; | |
drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale); | |
if (!drv.bus_perf_client) { | |
dev_err(drv.dev, "unable to register bus client\n"); | |
BUG(); | |
} | |
ret = msm_bus_scale_client_update_request(drv.bus_perf_client, | |
l2_level->bw_level); | |
if (ret) | |
dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret); | |
} | |
#ifdef CONFIG_CPU_FREQ_MSM | |
static struct cpufreq_frequency_table freq_table[NR_CPUS][35]; | |
static void __init cpufreq_table_init(void) | |
{ | |
int cpu; | |
if (machine_is_ipq806x_rumi3()) { | |
printk("Skipping %s for rumi\n", __func__); | |
return; | |
} | |
for_each_possible_cpu(cpu) { | |
int i, freq_cnt = 0; | |
/* Construct the freq_table tables from acpu_freq_tbl. */ | |
for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0 | |
&& freq_cnt < ARRAY_SIZE(*freq_table); i++) { | |
if (drv.acpu_freq_tbl[i].use_for_scaling) { | |
freq_table[cpu][freq_cnt].index = freq_cnt; | |
freq_table[cpu][freq_cnt].frequency | |
= drv.acpu_freq_tbl[i].speed.khz; | |
freq_cnt++; | |
} | |
} | |
/* freq_table not big enough to store all usable freqs. */ | |
BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0); | |
freq_table[cpu][freq_cnt].index = freq_cnt; | |
freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END; | |
dev_info(drv.dev, "CPU%d: %d frequencies supported\n", | |
cpu, freq_cnt); | |
/* Register table with CPUFreq. */ | |
cpufreq_frequency_table_get_attr(freq_table[cpu], cpu); | |
} | |
} | |
#else | |
static void __init cpufreq_table_init(void) {} | |
#endif | |
static void __init dcvs_freq_init(void) | |
{ | |
int i; | |
for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++) | |
if (drv.acpu_freq_tbl[i].use_for_scaling) | |
msm_dcvs_register_cpu_freq( | |
drv.acpu_freq_tbl[i].speed.khz, | |
drv.acpu_freq_tbl[i].vdd_core / 1000); | |
} | |
static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb, | |
unsigned long action, void *hcpu) | |
{ | |
static int prev_khz[NR_CPUS]; | |
int rc, cpu = (int)hcpu; | |
struct scalable *sc = &drv.scalable[cpu]; | |
unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz; | |
switch (action & ~CPU_TASKS_FROZEN) { | |
case CPU_DEAD: | |
prev_khz[cpu] = acpuclk_krait_get_rate(cpu); | |
/* Fall through. */ | |
case CPU_UP_CANCELED: | |
acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG); | |
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0); | |
break; | |
case CPU_UP_PREPARE: | |
if (!sc->initialized) { | |
rc = per_cpu_init(cpu); | |
if (rc) | |
return NOTIFY_BAD; | |
break; | |
} | |
if (WARN_ON(!prev_khz[cpu])) | |
return NOTIFY_BAD; | |
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | |
sc->vreg[VREG_CORE].cur_ua); | |
if (rc < 0) | |
return NOTIFY_BAD; | |
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG); | |
break; | |
default: | |
break; | |
} | |
return NOTIFY_OK; | |
} | |
static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = { | |
.notifier_call = acpuclk_cpu_callback, | |
}; | |
static const int krait_needs_vmin(void) | |
{ | |
switch (read_cpuid_id()) { | |
case 0x511F04D0: /* KR28M2A20 */ | |
case 0x511F04D1: /* KR28M2A21 */ | |
case 0x510F06F0: /* KR28M4A10 */ | |
return 1; | |
default: | |
return 0; | |
}; | |
} | |
static void krait_apply_vmin(struct acpu_level *tbl) | |
{ | |
for (; tbl->speed.khz != 0; tbl++) { | |
if (tbl->vdd_core < 1150000) | |
tbl->vdd_core = 1150000; | |
tbl->avsdscr_setting = 0; | |
} | |
} | |
static int __init get_speed_bin(u32 pte_efuse) | |
{ | |
uint32_t speed_bin; | |
speed_bin = pte_efuse & 0xF; | |
if (speed_bin == 0xF) | |
speed_bin = (pte_efuse >> 4) & 0xF; | |
/* Default to 0 if we read unknown speed_bin or if PVS is not blown */ | |
if (speed_bin == 0xF || ((pte_efuse & PVS_BLOW_STATUS) == 0)) { | |
speed_bin = 0; | |
dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n", speed_bin); | |
} else { | |
dev_info(drv.dev, "SPEED BIN: %d\n", speed_bin); | |
} | |
return speed_bin; | |
} | |
static int __init get_pvs_bin(u32 pte_efuse) | |
{ | |
uint32_t pvs_bin; | |
/* | |
* For IPQ8065 and IPQ8069 : Default to SLOW if PVS is not blown | |
* For Others : Default to NOMINAL if PVS is not blown | |
*/ | |
if ((pte_efuse & PVS_BLOW_STATUS) == 0) { | |
if (cpu_is_ipq8065() || cpu_is_ipq8069()) { | |
dev_warn(drv.dev, "ACPU PVS: Defaulting to slow\n"); | |
return PVS_SLOW; | |
} else { | |
dev_warn(drv.dev, "ACPU PVS: Defaulting to nominal\n"); | |
return PVS_NOMINAL; | |
} | |
} | |
pvs_bin = (pte_efuse >> 10) & 0x7; | |
if (pvs_bin == 0x7) | |
pvs_bin = (pte_efuse >> 13) & 0x7; | |
if (pvs_bin == 0x7) { | |
pvs_bin = 0; | |
dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n", pvs_bin); | |
} else { | |
dev_info(drv.dev, "ACPU PVS: %d\n", pvs_bin); | |
} | |
return pvs_bin; | |
} | |
static struct pvs_table * __init select_freq_plan(u32 pte_efuse_phys, | |
struct pvs_table (*pvs_tables)[NUM_PVS]) | |
{ | |
void __iomem *pte_efuse; | |
u32 pte_efuse_val, tbl_idx, bin_idx; | |
pte_efuse = ioremap(pte_efuse_phys, 4); | |
if (!pte_efuse) { | |
dev_err(drv.dev, "Unable to map QFPROM base\n"); | |
return NULL; | |
} | |
pte_efuse_val = readl_relaxed(pte_efuse); | |
iounmap(pte_efuse); | |
/* Select frequency tables. */ | |
bin_idx = get_speed_bin(pte_efuse_val); | |
tbl_idx = get_pvs_bin(pte_efuse_val); | |
return &pvs_tables[bin_idx][tbl_idx]; | |
} | |
static void __init drv_data_init(struct device *dev, | |
const struct acpuclk_krait_params *params) | |
{ | |
struct pvs_table *pvs; | |
drv.dev = dev; | |
drv.scalable = kmemdup(params->scalable, params->scalable_size, | |
GFP_KERNEL); | |
BUG_ON(!drv.scalable); | |
drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data), | |
GFP_KERNEL); | |
BUG_ON(!drv.hfpll_data); | |
drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size, | |
GFP_KERNEL); | |
BUG_ON(!drv.l2_freq_tbl); | |
drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale), | |
GFP_KERNEL); | |
BUG_ON(!drv.bus_scale); | |
drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase, | |
drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase), | |
GFP_KERNEL); | |
BUG_ON(!drv.bus_scale->usecase); | |
pvs = select_freq_plan(params->pte_efuse_phys, params->pvs_tables); | |
BUG_ON(!pvs->table); | |
drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL); | |
BUG_ON(!drv.acpu_freq_tbl); | |
drv.boost_uv = pvs->boost_uv; | |
acpuclk_krait_data.power_collapse_khz = params->stby_khz; | |
acpuclk_krait_data.wait_for_irq_khz = params->stby_khz; | |
} | |
static void __init hw_init(void) | |
{ | |
struct scalable *l2 = &drv.scalable[L2]; | |
const struct l2_level *l2_level; | |
int cpu, rc; | |
if (krait_needs_vmin()) | |
krait_apply_vmin(drv.acpu_freq_tbl); | |
l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32); | |
BUG_ON(!l2->hfpll_base); | |
if (machine_is_ipq806x_rumi3()) { | |
printk("Skipping %s for rumi\n", __func__); | |
return; | |
} | |
rc = rpm_regulator_init(l2, VREG_HFPLL_A, | |
l2->vreg[VREG_HFPLL_A].max_vdd, false); | |
BUG_ON(rc); | |
rc = rpm_regulator_init(l2, VREG_HFPLL_B, | |
l2->vreg[VREG_HFPLL_B].max_vdd, false); | |
BUG_ON(rc); | |
l2_level = find_cur_l2_level(); | |
if (!l2_level) { | |
l2_level = drv.l2_freq_tbl; | |
dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n", | |
l2_level->speed.khz); | |
} else { | |
dev_dbg(drv.dev, "L2 is running at %lu KHz\n", | |
l2_level->speed.khz); | |
} | |
rc = init_clock_sources(l2, &l2_level->speed); | |
BUG_ON(rc); | |
for_each_online_cpu(cpu) { | |
rc = per_cpu_init(cpu); | |
BUG_ON(rc); | |
} | |
bus_init(l2_level); | |
} | |
int __init acpuclk_krait_init(struct device *dev, | |
const struct acpuclk_krait_params *params) | |
{ | |
drv_data_init(dev, params); | |
hw_init(); | |
cpufreq_table_init(); | |
dcvs_freq_init(); | |
acpuclk_register(&acpuclk_krait_data); | |
register_hotcpu_notifier(&acpuclk_cpu_notifier); | |
return 0; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment