diff --git a/Patches/LineageOS-14.1/android_vendor_cm/99mpdecision b/Patches/LineageOS-14.1/android_vendor_cm/99mpdecision new file mode 100755 index 00000000..55645a3d --- /dev/null +++ b/Patches/LineageOS-14.1/android_vendor_cm/99mpdecision @@ -0,0 +1,23 @@ +#Credit: Cl3Kener +echo "1" > /sys/kernel/msm_mpdecision/conf/enabled +echo "0" > /sys/kernel/msm_mpdecision/conf/boost_enabled +echo "300000" > /sys/kernel/msm_mpdecision/conf/idle_freq +echo "8" > /sys/kernel/msm_mpdecision/conf/max_cpus +echo "1" > /sys/kernel/msm_mpdecision/conf/min_cpus +echo "12" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_0 +echo "0" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_1 +echo "25" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_2 +echo "7" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_3 +echo "30" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_4 +echo "10" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_5 +echo "0" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_6 +echo "18" > /sys/kernel/msm_mpdecision/conf/nwns_threshold_7 +echo "1" > /sys/kernel/msm_mpdecision/conf/scroff_single_core +echo "140" > /sys/kernel/msm_mpdecision/conf/twts_threshold_0 +echo "0" > /sys/kernel/msm_mpdecision/conf/twts_threshold_1 +echo "140" > /sys/kernel/msm_mpdecision/conf/twts_threshold_2 +echo "190" > /sys/kernel/msm_mpdecision/conf/twts_threshold_3 +echo "140" > /sys/kernel/msm_mpdecision/conf/twts_threshold_4 +echo "190" > /sys/kernel/msm_mpdecision/conf/twts_threshold_5 +echo "0" > /sys/kernel/msm_mpdecision/conf/twts_threshold_6 +echo "190" > /sys/kernel/msm_mpdecision/conf/twts_threshold_7 diff --git a/Patches/LineageOS-14.1/android_vendor_cm/sce.mk b/Patches/LineageOS-14.1/android_vendor_cm/sce.mk index 8a1cce70..90404994 100644 --- a/Patches/LineageOS-14.1/android_vendor_cm/sce.mk +++ b/Patches/LineageOS-14.1/android_vendor_cm/sce.mk @@ -1,3 +1,7 @@ +# MSM_MPDECISION +PRODUCT_COPY_FILES += \ + vendor/cm/prebuilt/common/etc/init.d/99mpdecision:system/etc/init.d/99mpdecision + # MicroG PRODUCT_PACKAGES += \ GmsCore \ diff --git a/Patches/LineageOS-14.1/msm_kernel/mpdcvs_trace.h b/Patches/LineageOS-14.1/msm_kernel/mpdcvs_trace.h new file mode 100644 index 00000000..0db13788 --- /dev/null +++ b/Patches/LineageOS-14.1/msm_kernel/mpdcvs_trace.h @@ -0,0 +1,156 @@ +/* Copyright (c) 2012, Free Software Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mpdcvs_trace + +#if !defined(_TRACE_MPDCVS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MPDCVS_H + +#include + +DECLARE_EVENT_CLASS(msm_mp, + + TP_PROTO(const char *name, int mp_val), + + TP_ARGS(name, mp_val), + + TP_STRUCT__entry( + __string(name, name) + __field(int, mp_val) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->mp_val = mp_val; + ), + + TP_printk("ev_name=%s ev_level=%d", + __get_str(name), + __entry->mp_val) +); + +/* Core function of run_q */ + +DEFINE_EVENT(msm_mp, msm_mp_runq, + + TP_PROTO(const char *name, int mp_val), + + TP_ARGS(name, mp_val) +); + +DEFINE_EVENT(msm_mp, msm_mp_cpusonline, + + TP_PROTO(const char *name, int mp_val), + + TP_ARGS(name, mp_val) +); + +DEFINE_EVENT(msm_mp, msm_mp_slacktime, + + TP_PROTO(const char *name, int mp_val), + + TP_ARGS(name, mp_val) +); + +DECLARE_EVENT_CLASS(msm_dcvs, + + TP_PROTO(const char *name, const char *cpuid, int val), + + TP_ARGS(name, cpuid, val), + + TP_STRUCT__entry( + __string(name, name) + __string(cpuid, cpuid) + __field(int, val) + ), + + TP_fast_assign( + __assign_str(name, name); + __assign_str(cpuid, cpuid); + __entry->val = val; + ), + + TP_printk("ev_name=%s d_name=%s ev_level=%d", + __get_str(name), + __get_str(cpuid), + __entry->val) +); + +/* Core function of dcvs */ + +DEFINE_EVENT(msm_dcvs, msm_dcvs_idle, + + TP_PROTO(const char *name, const char *cpuid, int val), + + TP_ARGS(name, cpuid, val) +); + +DEFINE_EVENT(msm_dcvs, msm_dcvs_iowait, + + TP_PROTO(const char *name, const char *cpuid, int val), + + TP_ARGS(name, cpuid, val) +); + +DEFINE_EVENT(msm_dcvs, msm_dcvs_slack_time, + + TP_PROTO(const char *name, const char *cpuid, int val), + + TP_ARGS(name, cpuid, val) +); + +DECLARE_EVENT_CLASS(msm_dcvs_scm, + + TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0, + unsigned long param1, unsigned long ret0, unsigned long ret1), + + TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1), + + TP_STRUCT__entry( + __field(unsigned long, cpuid) + __field(int, ev_type) + __field(unsigned long, param0) + __field(unsigned long, param1) + __field(unsigned long, ret0) + __field(unsigned long, ret1) + ), + + TP_fast_assign( + __entry->cpuid = cpuid; + __entry->ev_type = ev_type; + __entry->param0 = param0; + __entry->param1 = param1; + __entry->ret0 = ret0; + __entry->ret1 = ret1; + ), + + TP_printk("dev=%lu ev_type=%d ev_param0=%lu ev_param1=%lu ev_ret0=%lu ev_ret1=%lu", + __entry->cpuid, + __entry->ev_type, + __entry->param0, + __entry->param1, + __entry->ret0, + __entry->ret1) +); + +DEFINE_EVENT(msm_dcvs_scm, msm_dcvs_scm_event, + + TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0, + unsigned long param1, unsigned long ret0, unsigned long ret1), + + TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1) +); + +#endif /* _TRACE_MPDCVS_H */ + +/* This part must be outside protection */ +#include diff --git a/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.c b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.c new file mode 100644 index 00000000..1a919fcf --- /dev/null +++ b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.c @@ -0,0 +1,1363 @@ +/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CORE_HANDLE_OFFSET (0xA0) +#define __err(f, ...) pr_err("MSM_DCVS: %s: " f, __func__, __VA_ARGS__) +#define __info(f, ...) pr_info("MSM_DCVS: %s: " f, __func__, __VA_ARGS__) +#define MAX_PENDING (5) + +#define CORE_FLAG_TEMP_UPDATE 0x1 + +struct core_attribs { + struct kobj_attribute freq_change_us; + + struct kobj_attribute disable_pc_threshold; + struct kobj_attribute em_win_size_min_us; + struct kobj_attribute em_win_size_max_us; + struct kobj_attribute em_max_util_pct; + struct kobj_attribute group_id; + struct kobj_attribute max_freq_chg_time_us; + struct kobj_attribute slack_mode_dynamic; + struct kobj_attribute slack_time_min_us; + struct kobj_attribute slack_time_max_us; + struct kobj_attribute slack_weight_thresh_pct; + struct kobj_attribute ss_no_corr_below_freq; + struct kobj_attribute ss_win_size_min_us; + struct kobj_attribute ss_win_size_max_us; + struct kobj_attribute ss_util_pct; + + struct kobj_attribute active_coeff_a; + struct kobj_attribute active_coeff_b; + struct kobj_attribute active_coeff_c; + struct kobj_attribute leakage_coeff_a; + struct kobj_attribute leakage_coeff_b; + struct kobj_attribute leakage_coeff_c; + struct kobj_attribute leakage_coeff_d; + + struct kobj_attribute thermal_poll_ms; + + struct kobj_attribute freq_tbl; + struct kobj_attribute offset_tbl; + + struct attribute_group attrib_group; +}; + +enum pending_freq_state { + /* + * used by the thread to check if pending_freq was updated while it was + * setting previous frequency - this is written to and used by the + * freq updating thread + */ + NO_OUTSTANDING_FREQ_CHANGE = 0, + + /* + * This request is set to indicate that the governor is stopped and no + * more frequency change requests are accepted untill it starts again. + * This is checked/used by the threads that want to change the freq + */ + STOP_FREQ_CHANGE = -1, + + /* + * Any other +ve value means that a freq change was requested and the + * thread has not gotten around to update it + * + * Any other -ve value means that this is the last freq change i.e. a + * freq change was requested but the thread has not run yet and + * meanwhile the governor was stopped. + */ +}; + +struct dcvs_core { + spinlock_t idle_state_change_lock; + /* 0 when not idle (busy) 1 when idle and -1 when governor starts and + * we dont know whether the next call is going to be idle enter or exit + */ + int idle_entered; + + enum msm_dcvs_core_type type; + /* this is the number in each type for example cpu 0,1,2 and gpu 0,1 */ + int type_core_num; + char core_name[CORE_NAME_MAX]; + uint32_t actual_freq; + uint32_t freq_change_us; + + uint32_t max_time_us; /* core param */ + + struct msm_dcvs_algo_param algo_param; + struct msm_dcvs_energy_curve_coeffs coeffs; + + /* private */ + ktime_t time_start; + struct task_struct *task; + struct core_attribs attrib; + uint32_t dcvs_core_id; + struct msm_dcvs_core_info *info; + int sensor; + wait_queue_head_t wait_q; + + int (*set_frequency)(int type_core_num, unsigned int freq); + unsigned int (*get_frequency)(int type_core_num); + int (*idle_enable)(int type_core_num, + enum msm_core_control_event event); + int (*set_floor_frequency)(int type_core_num, unsigned int freq); + + spinlock_t pending_freq_lock; + int pending_freq; + + struct hrtimer slack_timer; + struct delayed_work temperature_work; + int flags; +}; + +static int msm_dcvs_enabled = 1; +module_param_named(enable, msm_dcvs_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static struct dentry *debugfs_base; + +static struct dcvs_core core_list[CORES_MAX]; + +static struct kobject *cores_kobj; + +#define DCVS_MAX_NUM_FREQS 15 +static struct msm_dcvs_freq_entry cpu_freq_tbl[DCVS_MAX_NUM_FREQS]; +static unsigned num_cpu_freqs; +static struct msm_dcvs_platform_data *dcvs_pdata; + +static DEFINE_MUTEX(param_update_mutex); +static DEFINE_MUTEX(gpu_floor_mutex); + +static void force_stop_slack_timer(struct dcvs_core *core) +{ + unsigned long flags; + + spin_lock_irqsave(&core->idle_state_change_lock, flags); + hrtimer_cancel(&core->slack_timer); + spin_unlock_irqrestore(&core->idle_state_change_lock, flags); +} + +static void force_start_slack_timer(struct dcvs_core *core, int slack_us) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&core->idle_state_change_lock, flags); + + /* + * only start the timer if governor is not stopped + */ + if (slack_us != 0) { + ret = hrtimer_start(&core->slack_timer, + ktime_set(0, slack_us * 1000), + HRTIMER_MODE_REL_PINNED); + if (ret) { + pr_err("%s Failed to start timer ret = %d\n", + core->core_name, ret); + } + } + + spin_unlock_irqrestore(&core->idle_state_change_lock, flags); +} + +static void stop_slack_timer(struct dcvs_core *core) +{ + unsigned long flags; + + spin_lock_irqsave(&core->idle_state_change_lock, flags); + /* err only for cpu type's GPU's can do idle exit consecutively */ + if (core->idle_entered == 1 && !(core->dcvs_core_id >= GPU_OFFSET)) + __err("%s trying to reenter idle", core->core_name); + core->idle_entered = 1; + hrtimer_cancel(&core->slack_timer); + core->idle_entered = 1; + spin_unlock_irqrestore(&core->idle_state_change_lock, flags); +} + +static void start_slack_timer(struct dcvs_core *core, int slack_us) +{ + unsigned long flags1, flags2; + int ret; + + spin_lock_irqsave(&core->idle_state_change_lock, flags2); + + spin_lock_irqsave(&core->pending_freq_lock, flags1); + + /* err only for cpu type's GPU's can do idle enter consecutively */ + if (core->idle_entered == 0 && !(core->dcvs_core_id >= GPU_OFFSET)) + __err("%s trying to reexit idle", core->core_name); + core->idle_entered = 0; + /* + * only start the timer if governor is not stopped + */ + if (slack_us != 0 + && !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) { + ret = hrtimer_start(&core->slack_timer, + ktime_set(0, slack_us * 1000), + HRTIMER_MODE_REL_PINNED); + if (ret) { + pr_err("%s Failed to start timer ret = %d\n", + core->core_name, ret); + } + } + spin_unlock_irqrestore(&core->pending_freq_lock, flags1); + + spin_unlock_irqrestore(&core->idle_state_change_lock, flags2); +} + +static void restart_slack_timer(struct dcvs_core *core, int slack_us) +{ + unsigned long flags1, flags2; + int ret; + + spin_lock_irqsave(&core->idle_state_change_lock, flags2); + + hrtimer_cancel(&core->slack_timer); + + spin_lock_irqsave(&core->pending_freq_lock, flags1); + + /* + * only start the timer if idle is not entered + * and governor is not stopped + */ + if (slack_us != 0 && (core->idle_entered != 1) + && !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) { + ret = hrtimer_start(&core->slack_timer, + ktime_set(0, slack_us * 1000), + HRTIMER_MODE_REL_PINNED); + if (ret) { + pr_err("%s Failed to start timer ret = %d\n", + core->core_name, ret); + } + } + spin_unlock_irqrestore(&core->pending_freq_lock, flags1); + spin_unlock_irqrestore(&core->idle_state_change_lock, flags2); +} + +void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq) +{ + static unsigned long curr_cpu0_freq; + unsigned long gpu_floor_freq = 0; + struct dcvs_core *gpu; + int i; + + if (!dcvs_pdata) + return; + + mutex_lock(&gpu_floor_mutex); + + if (cpu_freq) + curr_cpu0_freq = cpu_freq; + + for (i = 0; i < dcvs_pdata->num_sync_rules; i++) + if (curr_cpu0_freq > dcvs_pdata->sync_rules[i].cpu_khz) { + gpu_floor_freq = + dcvs_pdata->sync_rules[i].gpu_floor_khz; + break; + } + + if (num_online_cpus() > 1) + gpu_floor_freq = max(gpu_floor_freq, + dcvs_pdata->gpu_max_nom_khz); + + if (!gpu_floor_freq) { + mutex_unlock(&gpu_floor_mutex); + return; + } + + for (i = GPU_OFFSET; i < CORES_MAX; i++) { + gpu = &core_list[i]; + if (gpu->dcvs_core_id == -1) + continue; + + if (gpu->pending_freq != STOP_FREQ_CHANGE && + gpu->set_floor_frequency) { + gpu->set_floor_frequency(gpu->type_core_num, + gpu_floor_freq); + /* TZ will know about a freq change (if any) + * at next idle exit. */ + gpu->actual_freq = + gpu->get_frequency(gpu->type_core_num); + } + } + + mutex_unlock(&gpu_floor_mutex); +} + +static void check_power_collapse_modes(struct dcvs_core *core) +{ + struct msm_dcvs_algo_param *params; + + params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param; + + if (core->actual_freq >= params->disable_pc_threshold) + core->idle_enable(core->type_core_num, + MSM_DCVS_DISABLE_HIGH_LATENCY_MODES); + else + core->idle_enable(core->type_core_num, + MSM_DCVS_ENABLE_HIGH_LATENCY_MODES); +} + +static int __msm_dcvs_change_freq(struct dcvs_core *core) +{ + int ret = 0; + unsigned long flags = 0; + int requested_freq = 0; + ktime_t time_start; + uint32_t slack_us = 0; + uint32_t ret1 = 0; + + spin_lock_irqsave(&core->pending_freq_lock, flags); + if (core->pending_freq == STOP_FREQ_CHANGE) + goto out; +repeat: + BUG_ON(!core->pending_freq); + + requested_freq = core->pending_freq; + time_start = core->time_start; + core->time_start = ns_to_ktime(0); + + core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE; + + if (requested_freq == core->actual_freq) + goto out; + + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + + if (core->type == MSM_DCVS_CORE_TYPE_CPU && + core->type_core_num == 0) + msm_dcvs_apply_gpu_floor(requested_freq); + + /** + * Call the frequency sink driver to change the frequency + * We will need to get back the actual frequency in KHz and + * the record the time taken to change it. + */ + ret = core->set_frequency(core->type_core_num, requested_freq); + if (ret <= 0) + __err("Core %s failed to set freq %u\n", + core->core_name, requested_freq); + /* continue to call TZ to get updated slack timer */ + else + core->actual_freq = ret; + + core->freq_change_us = (uint32_t)ktime_to_us( + ktime_sub(ktime_get(), time_start)); + + if (core->type == MSM_DCVS_CORE_TYPE_CPU && + core->type_core_num == 0) { + mutex_lock(¶m_update_mutex); + check_power_collapse_modes(core); + mutex_unlock(¶m_update_mutex); + } + + /** + * Update algorithm with new freq and time taken to change + * to this frequency and that will get us the new slack + * timer + */ + ret = msm_dcvs_scm_event(core->dcvs_core_id, + MSM_DCVS_SCM_CLOCK_FREQ_UPDATE, + core->actual_freq, core->freq_change_us, + &slack_us, &ret1); + if (ret) { + __err("Error sending core (%s) dcvs_core_id = %d freq change (%u) reqfreq = %d slack_us=%d ret = %d\n", + core->core_name, core->dcvs_core_id, + core->actual_freq, requested_freq, + slack_us, ret); + } + + /* TODO confirm that we get a valid freq from SM even when the above + * FREQ_UPDATE fails + */ + restart_slack_timer(core, slack_us); + spin_lock_irqsave(&core->pending_freq_lock, flags); + + /** + * By the time we are done with freq changes, we could be asked to + * change again. Check before exiting. + */ + if (core->pending_freq != NO_OUTSTANDING_FREQ_CHANGE + && core->pending_freq != STOP_FREQ_CHANGE) { + goto repeat; + } + +out: /* should always be jumped to with the spin_lock held */ + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + + return ret; +} + +static void msm_dcvs_report_temp_work(struct work_struct *work) +{ + struct dcvs_core *core = container_of(work, + struct dcvs_core, + temperature_work.work); + struct msm_dcvs_core_info *info = core->info; + struct tsens_device tsens_dev; + int ret; + unsigned long temp = 0; + int interval_ms; + + if (!(core->flags & CORE_FLAG_TEMP_UPDATE)) + return; + + tsens_dev.sensor_num = core->sensor; + ret = tsens_get_temp(&tsens_dev, &temp); + if (!temp) { + tsens_dev.sensor_num = 0; + ret = tsens_get_temp(&tsens_dev, &temp); + if (!temp) + goto out; + } + + if (temp == info->power_param.current_temp) + goto out; + info->power_param.current_temp = temp; + + ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, + &info->power_param, + &info->freq_tbl[0], &core->coeffs); +out: + if (info->thermal_poll_ms == 0) + interval_ms = 60000; + else if (info->thermal_poll_ms < 1000) + interval_ms = 1000; + else + interval_ms = info->thermal_poll_ms; + + schedule_delayed_work(&core->temperature_work, + msecs_to_jiffies(interval_ms)); +} + +static int msm_dcvs_do_freq(void *data) +{ + struct dcvs_core *core = (struct dcvs_core *)data; + + while (!kthread_should_stop()) { + wait_event(core->wait_q, !(core->pending_freq == 0 || + core->pending_freq == -1) || + kthread_should_stop()); + + if (kthread_should_stop()) + break; + + __msm_dcvs_change_freq(core); + } + + return 0; +} + +/* freq_pending_lock should be held */ +static void request_freq_change(struct dcvs_core *core, int new_freq) +{ + if (new_freq == NO_OUTSTANDING_FREQ_CHANGE) { + if (core->pending_freq != STOP_FREQ_CHANGE) { + __err("%s gov started with earlier pending freq %d\n", + core->core_name, core->pending_freq); + } + core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE; + return; + } + + if (new_freq == STOP_FREQ_CHANGE) { + core->pending_freq = STOP_FREQ_CHANGE; + return; + } + + if (core->pending_freq < 0) { + /* a value less than 0 means that the governor has stopped + * and no more freq changes should be requested + */ + return; + } + + if (core->actual_freq != new_freq && core->pending_freq != new_freq) { + core->pending_freq = new_freq; + core->time_start = ktime_get(); + wake_up(&core->wait_q); + } +} + +static int msm_dcvs_update_freq(struct dcvs_core *core, + enum msm_dcvs_scm_event event, uint32_t param0, + uint32_t *ret1) +{ + int ret = 0; + unsigned long flags = 0; + uint32_t new_freq = -EINVAL; + + spin_lock_irqsave(&core->pending_freq_lock, flags); + + ret = msm_dcvs_scm_event(core->dcvs_core_id, event, param0, + core->actual_freq, &new_freq, ret1); + if (ret) { + if (ret == -13) + ret = 0; + else + __err("Error (%d) sending SCM event %d for core %s\n", + ret, event, core->core_name); + goto out; + } + + if (new_freq == 0) { + /* + * sometimes TZ gives us a 0 freq back, + * do not queue up a request + */ + goto out; + } + + request_freq_change(core, new_freq); + +out: + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + + return ret; +} + +static enum hrtimer_restart msm_dcvs_core_slack_timer(struct hrtimer *timer) +{ + int ret = 0; + struct dcvs_core *core = container_of(timer, + struct dcvs_core, slack_timer); + uint32_t ret1; + + trace_printk("dcvs: Slack timer fired for core=%s\n", core->core_name); + /** + * Timer expired, notify TZ + * Dont care about the third arg. + */ + ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_QOS_TIMER_EXPIRED, 0, + &ret1); + if (ret) + __err("Timer expired for core %s but failed to notify.\n", + core->core_name); + + return HRTIMER_NORESTART; +} + +int msm_dcvs_update_algo_params(void) +{ + static struct msm_dcvs_algo_param curr_params; + struct msm_dcvs_algo_param *new_params; + int cpu, ret = 0; + + mutex_lock(¶m_update_mutex); + new_params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param; + + if (memcmp(&curr_params, new_params, + sizeof(struct msm_dcvs_algo_param))) { + for_each_possible_cpu(cpu) { + struct dcvs_core *core = &core_list[CPU_OFFSET + cpu]; + ret = msm_dcvs_scm_set_algo_params(CPU_OFFSET + cpu, + new_params); + if (ret) { + pr_err("scm set algo params failed on cpu %d, ret %d\n", + cpu, ret); + mutex_unlock(¶m_update_mutex); + return ret; + } + if (cpu == 0) + check_power_collapse_modes(core); + } + memcpy(&curr_params, new_params, + sizeof(struct msm_dcvs_algo_param)); + } + + mutex_unlock(¶m_update_mutex); + return ret; +} + +/* Helper functions and macros for sysfs nodes for a core */ +#define CORE_FROM_ATTRIBS(attr, name) \ + container_of(container_of(attr, struct core_attribs, name), \ + struct dcvs_core, attrib); + +#define DCVS_PARAM_SHOW(_name, v) \ +static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + return snprintf(buf, PAGE_SIZE, "%d\n", v); \ +} + +#define DCVS_PARAM_STORE(_name) \ +static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\ + struct kobj_attribute *attr, char *buf) \ +{ \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + return snprintf(buf, PAGE_SIZE, "%d\n", core->info->_name); \ +} \ +static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, const char *buf, size_t count) \ +{ \ + int ret = 0; \ + uint32_t val = 0; \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + ret = kstrtouint(buf, 10, &val); \ + if (ret) { \ + __err("Invalid input %s for %s\n", buf, __stringify(_name));\ + } else { \ + core->info->_name = val; \ + } \ + return count; \ +} + +#define DCVS_ALGO_PARAM(_name) \ +static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\ + struct kobj_attribute *attr, char *buf) \ +{ \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + return snprintf(buf, PAGE_SIZE, "%d\n", core->algo_param._name); \ +} \ +static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, const char *buf, size_t count) \ +{ \ + int ret = 0; \ + uint32_t val = 0; \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + ret = kstrtouint(buf, 10, &val); \ + if (ret) { \ + __err("Invalid input %s for %s\n", buf, __stringify(_name));\ + } else { \ + uint32_t old_val = core->algo_param._name; \ + core->algo_param._name = val; \ + ret = msm_dcvs_update_algo_params(); \ + if (ret) { \ + core->algo_param._name = old_val; \ + } \ + } \ + return count; \ +} + +#define DCVS_ENERGY_PARAM(_name) \ +static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\ + struct kobj_attribute *attr, char *buf) \ +{ \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + return snprintf(buf, PAGE_SIZE, "%d\n", core->coeffs._name); \ +} \ +static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, const char *buf, size_t count) \ +{ \ + int ret = 0; \ + int32_t val = 0; \ + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \ + ret = kstrtoint(buf, 10, &val); \ + if (ret) { \ + __err("Invalid input %s for %s\n", buf, __stringify(_name));\ + } else { \ + int32_t old_val = core->coeffs._name; \ + core->coeffs._name = val; \ + ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, \ + &core->info->power_param, &core->info->freq_tbl[0], \ + &core->coeffs); \ + if (ret) { \ + core->coeffs._name = old_val; \ + __err("Error(%d) in setting %d for coeffs param %s\n",\ + ret, val, __stringify(_name)); \ + } \ + } \ + return count; \ +} + +#define DCVS_RO_ATTRIB(i, _name) \ + core->attrib._name.attr.name = __stringify(_name); \ + core->attrib._name.attr.mode = S_IRUGO; \ + core->attrib._name.show = msm_dcvs_attr_##_name##_show; \ + core->attrib._name.store = NULL; \ + core->attrib.attrib_group.attrs[i] = &core->attrib._name.attr; + +#define DCVS_RW_ATTRIB(i, _name) \ + core->attrib._name.attr.name = __stringify(_name); \ + core->attrib._name.attr.mode = S_IRUGO | S_IWUSR; \ + core->attrib._name.show = msm_dcvs_attr_##_name##_show; \ + core->attrib._name.store = msm_dcvs_attr_##_name##_store; \ + core->attrib.attrib_group.attrs[i] = &core->attrib._name.attr; + +/** + * Function declarations for different attributes. + * Gets used when setting the attribute show and store parameters. + */ +DCVS_PARAM_SHOW(freq_change_us, (core->freq_change_us)) + +DCVS_ALGO_PARAM(disable_pc_threshold) +DCVS_ALGO_PARAM(em_win_size_min_us) +DCVS_ALGO_PARAM(em_win_size_max_us) +DCVS_ALGO_PARAM(em_max_util_pct) +DCVS_ALGO_PARAM(group_id) +DCVS_ALGO_PARAM(max_freq_chg_time_us) +DCVS_ALGO_PARAM(slack_mode_dynamic) +DCVS_ALGO_PARAM(slack_time_min_us) +DCVS_ALGO_PARAM(slack_time_max_us) +DCVS_ALGO_PARAM(slack_weight_thresh_pct) +DCVS_ALGO_PARAM(ss_no_corr_below_freq) +DCVS_ALGO_PARAM(ss_win_size_min_us) +DCVS_ALGO_PARAM(ss_win_size_max_us) +DCVS_ALGO_PARAM(ss_util_pct) + +DCVS_ENERGY_PARAM(active_coeff_a) +DCVS_ENERGY_PARAM(active_coeff_b) +DCVS_ENERGY_PARAM(active_coeff_c) +DCVS_ENERGY_PARAM(leakage_coeff_a) +DCVS_ENERGY_PARAM(leakage_coeff_b) +DCVS_ENERGY_PARAM(leakage_coeff_c) +DCVS_ENERGY_PARAM(leakage_coeff_d) + +DCVS_PARAM_STORE(thermal_poll_ms) + +static ssize_t msm_dcvs_attr_offset_tbl_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct msm_dcvs_freq_entry *freq_tbl; + char *buf_idx = buf; + int i, len; + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, offset_tbl); + + freq_tbl = core->info->freq_tbl; + *buf_idx = '\0'; + + /* limit the number of frequencies we will print into + * the PAGE_SIZE sysfs show buffer. */ + if (core->info->power_param.num_freq > 64) + return 0; + + for (i = 0; i < core->info->power_param.num_freq; i++) { + len = snprintf(buf_idx, 30, "%7d %7d %7d\n", + freq_tbl[i].freq, + freq_tbl[i].active_energy_offset, + freq_tbl[i].leakage_energy_offset); + /* buf_idx always points at terminating null */ + buf_idx += len; + } + return buf_idx - buf; +} + +static ssize_t msm_dcvs_attr_offset_tbl_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct msm_dcvs_freq_entry *freq_tbl; + uint32_t freq, active_energy_offset, leakage_energy_offset; + int i, ret; + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, offset_tbl); + + freq_tbl = core->info->freq_tbl; + + ret = sscanf(buf, "%u %u %u", + &freq, &active_energy_offset, &leakage_energy_offset); + if (ret != 3) { + __err("Invalid input %s for offset_tbl\n", buf); + return count; + } + + for (i = 0; i < core->info->power_param.num_freq; i++) + if (freq_tbl[i].freq == freq) { + freq_tbl[i].active_energy_offset = + active_energy_offset; + freq_tbl[i].leakage_energy_offset = + leakage_energy_offset; + break; + } + + if (i >= core->info->power_param.num_freq) { + __err("Invalid frequency for offset_tbl: %d\n", freq); + return count; + } + + ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, + &core->info->power_param, + &core->info->freq_tbl[0], + &core->coeffs); + if (ret) + __err("Error %d in updating active/leakage energy\n", ret); + + return count; +} + +static ssize_t msm_dcvs_attr_freq_tbl_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct msm_dcvs_freq_entry *freq_tbl; + char *buf_idx = buf; + int i, len; + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl); + + freq_tbl = core->info->freq_tbl; + *buf_idx = '\0'; + + /* limit the number of frequencies we will print into + * the PAGE_SIZE sysfs show buffer. */ + if (core->info->power_param.num_freq > 64) + return 0; + + for (i = 0; i < core->info->power_param.num_freq; i++) { + if (freq_tbl[i].is_trans_level) { + len = snprintf(buf_idx, 10, "%7d ", freq_tbl[i].freq); + /* buf_idx always points at terminating null */ + buf_idx += len; + } + } + /* overwrite final trailing space with newline */ + if (buf_idx > buf) + *(buf_idx - 1) = '\n'; + + return buf_idx - buf; +} + +static ssize_t msm_dcvs_attr_freq_tbl_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct msm_dcvs_freq_entry *freq_tbl; + uint32_t freq; + int i, ret; + struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl); + + freq_tbl = core->info->freq_tbl; + + ret = kstrtouint(buf, 10, &freq); + if (ret) { + __err("Invalid input %s for freq_tbl\n", buf); + return count; + } + + for (i = 0; i < core->info->power_param.num_freq; i++) + if (freq_tbl[i].freq == freq) { + freq_tbl[i].is_trans_level ^= 1; + break; + } + + if (i >= core->info->power_param.num_freq) { + __err("Invalid frequency for freq_tbl: %d\n", freq); + return count; + } + + ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, + &core->info->power_param, + &core->info->freq_tbl[0], + &core->coeffs); + if (ret) { + freq_tbl[i].is_trans_level ^= 1; + __err("Error %d in toggling freq %d (orig enable val %d)\n", + ret, freq_tbl[i].freq, freq_tbl[i].is_trans_level); + } + return count; +} + +static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core) +{ + int ret = 0; + struct kobject *core_kobj = NULL; + const int attr_count = 26; + + BUG_ON(!cores_kobj); + + core->attrib.attrib_group.attrs = + kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL); + + if (!core->attrib.attrib_group.attrs) { + ret = -ENOMEM; + goto done; + } + + DCVS_RO_ATTRIB(0, freq_change_us); + + DCVS_RW_ATTRIB(1, disable_pc_threshold); + DCVS_RW_ATTRIB(2, em_win_size_min_us); + DCVS_RW_ATTRIB(3, em_win_size_max_us); + DCVS_RW_ATTRIB(4, em_max_util_pct); + DCVS_RW_ATTRIB(5, group_id); + DCVS_RW_ATTRIB(6, max_freq_chg_time_us); + DCVS_RW_ATTRIB(7, slack_mode_dynamic); + DCVS_RW_ATTRIB(8, slack_weight_thresh_pct); + DCVS_RW_ATTRIB(9, slack_time_min_us); + DCVS_RW_ATTRIB(10, slack_time_max_us); + DCVS_RW_ATTRIB(11, ss_no_corr_below_freq); + DCVS_RW_ATTRIB(12, ss_win_size_min_us); + DCVS_RW_ATTRIB(13, ss_win_size_max_us); + DCVS_RW_ATTRIB(14, ss_util_pct); + + DCVS_RW_ATTRIB(15, active_coeff_a); + DCVS_RW_ATTRIB(16, active_coeff_b); + DCVS_RW_ATTRIB(17, active_coeff_c); + DCVS_RW_ATTRIB(18, leakage_coeff_a); + DCVS_RW_ATTRIB(19, leakage_coeff_b); + DCVS_RW_ATTRIB(20, leakage_coeff_c); + DCVS_RW_ATTRIB(21, leakage_coeff_d); + DCVS_RW_ATTRIB(22, thermal_poll_ms); + + DCVS_RW_ATTRIB(23, freq_tbl); + DCVS_RW_ATTRIB(24, offset_tbl); + + core->attrib.attrib_group.attrs[25] = NULL; + + core_kobj = kobject_create_and_add(core->core_name, cores_kobj); + if (!core_kobj) { + ret = -ENOMEM; + goto done; + } + + ret = sysfs_create_group(core_kobj, &core->attrib.attrib_group); + if (ret) + __err("Cannot create core %s attr group\n", core->core_name); + +done: + if (ret) { + kfree(core->attrib.attrib_group.attrs); + kobject_del(core_kobj); + } + + return ret; +} + +static int get_core_offset(enum msm_dcvs_core_type type, int num) +{ + int offset = -EINVAL; + + switch (type) { + case MSM_DCVS_CORE_TYPE_CPU: + offset = CPU_OFFSET + num; + BUG_ON(offset >= GPU_OFFSET); + break; + case MSM_DCVS_CORE_TYPE_GPU: + offset = GPU_OFFSET + num; + BUG_ON(offset >= CORES_MAX); + break; + default: + BUG(); + } + + return offset; +} + +/* Return the core and initialize non platform data specific numbers in it */ +static struct dcvs_core *msm_dcvs_add_core(enum msm_dcvs_core_type type, + int num) +{ + struct dcvs_core *core = NULL; + int i; + char name[CORE_NAME_MAX]; + + i = get_core_offset(type, num); + if (i < 0) + return NULL; + + if (type == MSM_DCVS_CORE_TYPE_CPU) + snprintf(name, CORE_NAME_MAX, "cpu%d", num); + else + snprintf(name, CORE_NAME_MAX, "gpu%d", num); + + core = &core_list[i]; + core->dcvs_core_id = i; + strlcpy(core->core_name, name, CORE_NAME_MAX); + spin_lock_init(&core->pending_freq_lock); + spin_lock_init(&core->idle_state_change_lock); + hrtimer_init(&core->slack_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + core->slack_timer.function = msm_dcvs_core_slack_timer; + return core; +} + +/* Return the core if found or add to list if @add_to_list is true */ +static struct dcvs_core *msm_dcvs_get_core(int offset) +{ + /* if the handle is still not set bug */ + BUG_ON(core_list[offset].dcvs_core_id == -1); + return &core_list[offset]; +} + +void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage) +{ + BUG_ON(freq == 0 || voltage == 0 || + num_cpu_freqs == DCVS_MAX_NUM_FREQS); + + cpu_freq_tbl[num_cpu_freqs].freq = freq; + cpu_freq_tbl[num_cpu_freqs].voltage = voltage; + + num_cpu_freqs++; +} + +int msm_dcvs_register_core( + enum msm_dcvs_core_type type, + int type_core_num, + struct msm_dcvs_core_info *info, + int (*set_frequency)(int type_core_num, unsigned int freq), + unsigned int (*get_frequency)(int type_core_num), + int (*idle_enable)(int type_core_num, + enum msm_core_control_event event), + int (*set_floor_frequency)(int type_core_num, unsigned int freq), + int sensor) +{ + int ret = -EINVAL; + int offset; + struct dcvs_core *core = NULL; + uint32_t ret1; + uint32_t ret2; + + if (!msm_dcvs_enabled) + return ret; + + offset = get_core_offset(type, type_core_num); + if (offset < 0) + return ret; + if (core_list[offset].dcvs_core_id != -1) + return core_list[offset].dcvs_core_id; + + core = msm_dcvs_add_core(type, type_core_num); + if (!core) + return ret; + + core->type = type; + core->type_core_num = type_core_num; + core->set_frequency = set_frequency; + core->get_frequency = get_frequency; + core->idle_enable = idle_enable; + core->set_floor_frequency = set_floor_frequency; + + core->info = info; + if (type == MSM_DCVS_CORE_TYPE_CPU) { + BUG_ON(num_cpu_freqs == 0); + info->freq_tbl = cpu_freq_tbl; + info->power_param.num_freq = num_cpu_freqs; + } + + memcpy(&core->algo_param, &info->algo_param, + sizeof(struct msm_dcvs_algo_param)); + + memcpy(&core->coeffs, &info->energy_coeffs, + sizeof(struct msm_dcvs_energy_curve_coeffs)); + + /* + * The tz expects cpu0 to represent bit 0 in the mask, however the + * dcvs_core_id needs to start from 1, dcvs_core_id = 0 is used to + * indicate that this request is not associated with any core. + * mpdecision + */ + info->core_param.core_bitmask_id + = 1 << (core->dcvs_core_id - CPU_OFFSET); + core->sensor = sensor; + + ret = msm_dcvs_scm_register_core(core->dcvs_core_id, &info->core_param); + if (ret) { + __err("%s: scm register core fail handle = %d ret = %d\n", + __func__, core->dcvs_core_id, ret); + goto bail; + } + + ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id, + &info->algo_param); + if (ret) { + __err("%s: scm algo params failed ret = %d\n", __func__, ret); + goto bail; + } + + ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, + &info->power_param, + &info->freq_tbl[0], &core->coeffs); + if (ret) { + __err("%s: scm power params failed ret = %d\n", __func__, ret); + goto bail; + } + + ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE, + core->actual_freq, 0, &ret1, &ret2); + if (ret) + goto bail; + + ret = msm_dcvs_setup_core_sysfs(core); + if (ret) { + __err("Unable to setup core %s sysfs\n", core->core_name); + goto bail; + } + core->idle_entered = -1; + init_waitqueue_head(&core->wait_q); + core->task = kthread_run(msm_dcvs_do_freq, (void *)core, + "msm_dcvs/%d", core->dcvs_core_id); + ret = core->dcvs_core_id; + return ret; +bail: + core->dcvs_core_id = -1; + return -EINVAL; +} +EXPORT_SYMBOL(msm_dcvs_register_core); + +void msm_dcvs_update_limits(int dcvs_core_id) +{ + struct dcvs_core *core; + + if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) { + __err("%s invalid dcvs_core_id = %d returning -EINVAL\n", + __func__, dcvs_core_id); + return; + } + + core = msm_dcvs_get_core(dcvs_core_id); + core->actual_freq = core->get_frequency(core->type_core_num); +} + +int msm_dcvs_freq_sink_start(int dcvs_core_id) +{ + int ret = -EINVAL; + struct dcvs_core *core = NULL; + uint32_t ret1; + unsigned long flags; + int new_freq; + int timer_interval_us; + + if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) { + __err("%s invalid dcvs_core_id = %d returning -EINVAL\n", + __func__, dcvs_core_id); + return -EINVAL; + } + + core = msm_dcvs_get_core(dcvs_core_id); + if (!core) + return ret; + + core->actual_freq = core->get_frequency(core->type_core_num); + + spin_lock_irqsave(&core->pending_freq_lock, flags); + /* mark that we are ready to accept new frequencies */ + request_freq_change(core, NO_OUTSTANDING_FREQ_CHANGE); + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + + spin_lock_irqsave(&core->idle_state_change_lock, flags); + core->idle_entered = -1; + spin_unlock_irqrestore(&core->idle_state_change_lock, flags); + + /* Notify TZ to start receiving idle info for the core */ + ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 1, &ret1); + + ret = msm_dcvs_scm_event( + core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE, core->actual_freq, + 0, &new_freq, &timer_interval_us); + if (ret) + __err("Error (%d) DCVS sending online for %s\n", + ret, core->core_name); + + if (new_freq != 0) { + spin_lock_irqsave(&core->pending_freq_lock, flags); + request_freq_change(core, new_freq); + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + } + force_start_slack_timer(core, timer_interval_us); + + core->flags |= CORE_FLAG_TEMP_UPDATE; + INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work); + schedule_delayed_work(&core->temperature_work, + msecs_to_jiffies(core->info->thermal_poll_ms)); + + core->idle_enable(core->type_core_num, MSM_DCVS_ENABLE_IDLE_PULSE); + return 0; +} +EXPORT_SYMBOL(msm_dcvs_freq_sink_start); + +int msm_dcvs_freq_sink_stop(int dcvs_core_id) +{ + int ret = -EINVAL; + struct dcvs_core *core = NULL; + uint32_t ret1; + uint32_t freq; + unsigned long flags; + + if (dcvs_core_id < 0 || dcvs_core_id > CORES_MAX) { + pr_err("%s invalid dcvs_core_id = %d returning -EINVAL\n", + __func__, dcvs_core_id); + return -EINVAL; + } + + core = msm_dcvs_get_core(dcvs_core_id); + if (!core) { + __err("couldn't find core for coreid = %d\n", dcvs_core_id); + return ret; + } + + core->flags &= ~CORE_FLAG_TEMP_UPDATE; + cancel_delayed_work(&core->temperature_work); + + core->idle_enable(core->type_core_num, MSM_DCVS_DISABLE_IDLE_PULSE); + /* Notify TZ to stop receiving idle info for the core */ + ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_DCVS_ENABLE, + 0, core->actual_freq, &freq, &ret1); + core->idle_enable(core->type_core_num, + MSM_DCVS_ENABLE_HIGH_LATENCY_MODES); + + if (core->type == MSM_DCVS_CORE_TYPE_GPU) + mutex_lock(&gpu_floor_mutex); + + spin_lock_irqsave(&core->pending_freq_lock, flags); + /* flush out all the pending freq changes */ + request_freq_change(core, STOP_FREQ_CHANGE); + spin_unlock_irqrestore(&core->pending_freq_lock, flags); + + if (core->type == MSM_DCVS_CORE_TYPE_GPU) + mutex_unlock(&gpu_floor_mutex); + + force_stop_slack_timer(core); + + return 0; +} +EXPORT_SYMBOL(msm_dcvs_freq_sink_stop); + +int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state, + uint32_t iowaited) +{ + int ret = 0; + struct dcvs_core *core = NULL; + uint32_t timer_interval_us = 0; + uint32_t r0, r1; + + if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) { + pr_err("invalid dcvs_core_id = %d ret -EINVAL\n", dcvs_core_id); + return -EINVAL; + } + + core = msm_dcvs_get_core(dcvs_core_id); + + switch (state) { + case MSM_DCVS_IDLE_ENTER: + stop_slack_timer(core); + ret = msm_dcvs_scm_event(core->dcvs_core_id, + MSM_DCVS_SCM_IDLE_ENTER, 0, 0, &r0, &r1); + if (ret < 0 && ret != -13) + __err("Error (%d) sending idle enter for %s\n", + ret, core->core_name); + trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 1); + break; + + case MSM_DCVS_IDLE_EXIT: + ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_IDLE_EXIT, + iowaited, &timer_interval_us); + if (ret) + __err("Error (%d) sending idle exit for %s\n", + ret, core->core_name); + start_slack_timer(core, timer_interval_us); + trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 0); + trace_msm_dcvs_iowait("iowait", core->core_name, iowaited); + trace_msm_dcvs_slack_time("slack_timer_dcvs", core->core_name, + timer_interval_us); + break; + } + + return ret; +} +EXPORT_SYMBOL(msm_dcvs_idle); + +static int __init msm_dcvs_late_init(void) +{ + struct kobject *module_kobj = NULL; + int ret = 0; + + if (!msm_dcvs_enabled) + return ret; + + module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); + if (!module_kobj) { + pr_err("%s: cannot find kobject for module %s\n", + __func__, KBUILD_MODNAME); + ret = -ENOENT; + goto err; + } + + cores_kobj = kobject_create_and_add("cores", module_kobj); + if (!cores_kobj) { + __err("Cannot create %s kobject\n", "cores"); + ret = -ENOMEM; + goto err; + } + + debugfs_base = debugfs_create_dir("msm_dcvs", NULL); + if (!debugfs_base) { + __err("Cannot create debugfs base %s\n", "msm_dcvs"); + ret = -ENOENT; + goto err; + } + +err: + if (ret) { + kobject_del(cores_kobj); + cores_kobj = NULL; + debugfs_remove(debugfs_base); + } + + return ret; +} +late_initcall(msm_dcvs_late_init); + +static int __devinit dcvs_probe(struct platform_device *pdev) +{ + if (pdev->dev.platform_data) + dcvs_pdata = pdev->dev.platform_data; + + return 0; +} + +static struct platform_driver dcvs_driver = { + .probe = dcvs_probe, + .driver = { + .name = "dcvs", + .owner = THIS_MODULE, + }, +}; + +static int __init msm_dcvs_early_init(void) +{ + int ret = 0; + int i; + + platform_driver_register(&dcvs_driver); + + if (!msm_dcvs_enabled) { + __info("Not enabled (%d)\n", msm_dcvs_enabled); + return 0; + } + + + /* Only need about 32kBytes for normal operation */ + ret = msm_dcvs_scm_init(SZ_32K); + if (ret) { + __err("Unable to initialize DCVS err=%d\n", ret); + msm_dcvs_enabled = 0; + goto done; + } + + for (i = 0; i < CORES_MAX; i++) { + core_list[i].dcvs_core_id = -1; + core_list[i].pending_freq = STOP_FREQ_CHANGE; + } +done: + return ret; +} +postcore_initcall(msm_dcvs_early_init); diff --git a/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.h b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.h new file mode 100644 index 00000000..2ad7d22f --- /dev/null +++ b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs.h @@ -0,0 +1,177 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ARCH_ARM_MACH_MSM_MSM_DCVS_H +#define _ARCH_ARM_MACH_MSM_MSM_DCVS_H + +#include + +#define CORE_NAME_MAX (32) +#define CORES_MAX (10) + +#define CPU_OFFSET 1 /* used to notify TZ the core number */ +#define GPU_OFFSET (CORES_MAX * 2/3) /* there will be more cpus than gpus, + * let the GPU be assigned fewer core + * elements and start later + */ + +enum msm_core_idle_state { + MSM_DCVS_IDLE_ENTER, + MSM_DCVS_IDLE_EXIT, +}; + +enum msm_core_control_event { + MSM_DCVS_ENABLE_IDLE_PULSE, + MSM_DCVS_DISABLE_IDLE_PULSE, + MSM_DCVS_ENABLE_HIGH_LATENCY_MODES, + MSM_DCVS_DISABLE_HIGH_LATENCY_MODES, +}; + +struct msm_dcvs_sync_rule { + unsigned long cpu_khz; + unsigned long gpu_floor_khz; +}; + +struct msm_dcvs_platform_data { + struct msm_dcvs_sync_rule *sync_rules; + unsigned num_sync_rules; + unsigned long gpu_max_nom_khz; +}; + +struct msm_gov_platform_data { + struct msm_dcvs_core_info *info; + int latency; +}; + +/** + * msm_dcvs_register_cpu_freq + * @freq: the frequency value to register + * @voltage: the operating voltage (in mV) associated with the above frequency + * + * Register a cpu frequency and its operating voltage with dcvs. + */ +#ifdef CONFIG_MSM_DCVS +void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage); +#else +static inline void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage) +{} +#endif + +/** + * msm_dcvs_idle + * @dcvs_core_id: The id returned by msm_dcvs_register_core + * @state: The enter/exit idle state the core is in + * @iowaited: iowait in us + * on iMSM_DCVS_IDLE_EXIT. + * @return: + * 0 on success, + * -ENOSYS, + * -EINVAL, + * SCM return values + * + * Send idle state notifications to the msm_dcvs driver + */ +int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state, + uint32_t iowaited); + +/** + * struct msm_dcvs_core_info + * + * Core specific information used by algorithm. Need to provide this + * before the sink driver can be registered. + */ +struct msm_dcvs_core_info { + int num_cores; + int *sensors; + int thermal_poll_ms; + struct msm_dcvs_freq_entry *freq_tbl; + struct msm_dcvs_core_param core_param; + struct msm_dcvs_algo_param algo_param; + struct msm_dcvs_energy_curve_coeffs energy_coeffs; + struct msm_dcvs_power_params power_param; +}; + +/** + * msm_dcvs_register_core + * @type: whether this is a CPU or a GPU + * @type_core_num: The number of the core for a type + * @info: The core specific algorithm parameters. + * @sensor: The thermal sensor number of the core in question + * @return : + * 0 on success, + * -ENOSYS, + * -ENOMEM + * + * Register the core with msm_dcvs driver. Done once at init before calling + * msm_dcvs_freq_sink_register + * Cores that need to run synchronously must share the same group id. + */ +extern int msm_dcvs_register_core( + enum msm_dcvs_core_type type, + int type_core_num, + struct msm_dcvs_core_info *info, + int (*set_frequency)(int type_core_num, unsigned int freq), + unsigned int (*get_frequency)(int type_core_num), + int (*idle_enable)(int type_core_num, + enum msm_core_control_event event), + int (*set_floor_frequency)(int type_core_num, unsigned int freq), + int sensor); + +/** + * msm_dcvs_freq_sink_start + * @drv: The sink driver + * @return: Handle unique to the core. + * + * Register the clock driver code with the msm_dvs driver to get notified about + * frequency change requests. + */ +extern int msm_dcvs_freq_sink_start(int dcvs_core_id); + +/** + * msm_dcvs_freq_sink_stop + * @drv: The sink driver + * @return: + * 0 on success, + * -EINVAL + * + * Unregister the sink driver for the core. This will cause the source driver + * for the core to stop sending idle pulses. + */ +extern int msm_dcvs_freq_sink_stop(int dcvs_core_id); + +/** + * msm_dcvs_update_limits + * @drv: The sink driver + * + * Update the frequency known to dcvs when the limits are changed. + */ +extern void msm_dcvs_update_limits(int dcvs_core_id); + +/** + * msm_dcvs_apply_gpu_floor + * @cpu_freq: CPU frequency to compare to GPU sync rules + * + * Apply a GPU floor frequency if the corresponding CPU frequency, + * or the number of CPUs online, requires it. + */ +extern void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq); + +/** + * msm_dcvs_update_algo_params + * @return: + * 0 on success, < 0 on error + * + * Updates the DCVS algorithm with parameters depending on the + * number of CPUs online. + */ +extern int msm_dcvs_update_algo_params(void); +#endif diff --git a/Patches/LineageOS-14.1/msm_kernel/msm_dcvs_scm.h b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs_scm.h new file mode 100644 index 00000000..7eefd549 --- /dev/null +++ b/Patches/LineageOS-14.1/msm_kernel/msm_dcvs_scm.h @@ -0,0 +1,262 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H +#define _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H + +enum msm_dcvs_core_type { + MSM_DCVS_CORE_TYPE_CPU = 0, + MSM_DCVS_CORE_TYPE_GPU = 1, +}; + +enum msm_dcvs_algo_param_type { + MSM_DCVS_ALGO_DCVS_PARAM = 0, + MSM_DCVS_ALGO_MPD_PARAM = 1, +}; + +enum msm_dcvs_scm_event { + MSM_DCVS_SCM_IDLE_ENTER = 0, /* Core enters idle */ + MSM_DCVS_SCM_IDLE_EXIT = 1, /* Core exits idle */ + MSM_DCVS_SCM_QOS_TIMER_EXPIRED = 2, /* Core slack timer expired */ + MSM_DCVS_SCM_CLOCK_FREQ_UPDATE = 3, /* Core freq change complete */ + MSM_DCVS_SCM_CORE_ONLINE = 4, /* Core is online */ + MSM_DCVS_SCM_CORE_OFFLINE = 5, /* Core is offline */ + MSM_DCVS_SCM_CORE_UNAVAILABLE = 6, /* Core is offline + unavailable */ + MSM_DCVS_SCM_DCVS_ENABLE = 7, /* DCVS is enabled/disabled for core */ + MSM_DCVS_SCM_MPD_ENABLE = 8, /* Enable/disable MP Decision */ + MSM_DCVS_SCM_RUNQ_UPDATE = 9, /* Update running threads */ + MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED = 10, /* MPDecision slack timer */ +}; + +struct msm_dcvs_algo_param { + uint32_t disable_pc_threshold; + uint32_t em_win_size_min_us; + uint32_t em_win_size_max_us; + uint32_t em_max_util_pct; + uint32_t group_id; + uint32_t max_freq_chg_time_us; + uint32_t slack_mode_dynamic; + uint32_t slack_time_min_us; + uint32_t slack_time_max_us; + uint32_t slack_weight_thresh_pct; + uint32_t ss_no_corr_below_freq; + uint32_t ss_win_size_min_us; + uint32_t ss_win_size_max_us; + uint32_t ss_util_pct; +}; + +struct msm_dcvs_freq_entry { + uint32_t freq; + uint32_t voltage; + uint32_t is_trans_level; + uint32_t active_energy_offset; + uint32_t leakage_energy_offset; +}; + +struct msm_dcvs_energy_curve_coeffs { + int32_t active_coeff_a; + int32_t active_coeff_b; + int32_t active_coeff_c; + + int32_t leakage_coeff_a; + int32_t leakage_coeff_b; + int32_t leakage_coeff_c; + int32_t leakage_coeff_d; +}; + +struct msm_dcvs_power_params { + uint32_t current_temp; + uint32_t num_freq; /* number of msm_dcvs_freq_entry passed */ +}; + +struct msm_dcvs_core_param { + uint32_t core_type; + uint32_t core_bitmask_id; +}; + +struct msm_mpd_algo_param { + uint32_t em_win_size_min_us; + uint32_t em_win_size_max_us; + uint32_t em_max_util_pct; + uint32_t mp_em_rounding_point_min; + uint32_t mp_em_rounding_point_max; + uint32_t online_util_pct_min; + uint32_t online_util_pct_max; + uint32_t slack_time_min_us; + uint32_t slack_time_max_us; +}; + +#ifdef CONFIG_MSM_DCVS +/** + * Initialize DCVS algorithm in TrustZone. + * Must call before invoking any other DCVS call into TZ. + * + * @size: Size of buffer in bytes + * + * @return: + * 0 on success. + * -EEXIST: DCVS algorithm already initialized. + * -EINVAL: Invalid args. + */ +extern int msm_dcvs_scm_init(size_t size); + +/** + * Registers cores with the DCVS algo. + * + * @core_id: The core identifier that will be used for communication with DCVS + * @param: The core parameters + * @freq: Array of frequency and energy values + * + * @return: + * 0 on success. + * -ENOMEM: Insufficient memory. + * -EINVAL: Invalid args. + */ +extern int msm_dcvs_scm_register_core(uint32_t core_id, + struct msm_dcvs_core_param *param); + +/** + * Set DCVS algorithm parameters + * + * @core_id: The algorithm parameters specific for the core + * @param: The param data structure + * + * @return: + * 0 on success. + * -EINVAL: Invalid args. + */ +extern int msm_dcvs_scm_set_algo_params(uint32_t core_id, + struct msm_dcvs_algo_param *param); + +/** + * Set MPDecision algorithm parameters + * + * @param: The param data structure + * 0 on success. + * -EINVAL: Invalid args. + */ +extern int msm_mpd_scm_set_algo_params(struct msm_mpd_algo_param *param); + +/** + * Set frequency and power characteristics for the core. + * + * @param core_id: The core identifier that will be used to interace with the + * DCVS algo. + * @param pwr_param: power params + * @param freq_entry: frequency characteristics desired + * @param coeffs: Coefficients that will describe the power curve + * + * @return int + * 0 on success. + * -EINVAL: Invalid args. + */ +extern int msm_dcvs_scm_set_power_params(uint32_t core_id, + struct msm_dcvs_power_params *pwr_param, + struct msm_dcvs_freq_entry *freq_entry, + struct msm_dcvs_energy_curve_coeffs *coeffs); + +/** + * Do an SCM call. + * + * @core_id: The core identifier. + * @event_id: The event that occured. + * Possible values: + * MSM_DCVS_SCM_IDLE_ENTER + * @param0: unused + * @param1: unused + * @ret0: unused + * @ret1: unused + * MSM_DCVS_SCM_IDLE_EXIT + * @param0: Did the core iowait + * @param1: unused + * @ret0: New clock frequency for the core in KHz + * @ret1: New QoS timer value for the core in usec + * MSM_DCVS_SCM_QOS_TIMER_EXPIRED + * @param0: unused + * @param1: unused + * @ret0: New clock frequency for the core in KHz + * @ret1: unused + * MSM_DCVS_SCM_CLOCK_FREQ_UPDATE + * @param0: active clock frequency of the core in KHz + * @param1: time taken in usec to switch to the frequency + * @ret0: New QoS timer value for the core in usec + * @ret1: unused + * MSM_DCVS_SCM_CORE_ONLINE + * @param0: active clock frequency of the core in KHz + * @param1: time taken to online the core + * @ret0: unused + * @ret1: unused + * MSM_DCVS_SCM_CORE_OFFLINE + * @param0: time taken to offline the core + * @param1: unused + * @ret0: unused + * @ret1: unused + * MSM_DCVS_SCM_CORE_UNAVAILABLE + * @param0: TODO:bitmask + * @param1: unused + * @ret0: Bitmask of cores to bring online/offline. + * @ret1: Mp Decision slack time. Common to all cores. + * MSM_DCVS_SCM_DCVS_ENABLE + * @param0: 1 to enable; 0 to disable DCVS + * @param1: unused + * @ret0: New clock frequency for the core in KHz + * @ret1: unused + * MSM_DCVS_SCM_MPD_ENABLE + * @param0: 1 to enable; 0 to disable MP Decision + * @param1: unused + * @ret0: unused + * @ret1: unused + * MSM_DCVS_SCM_RUNQ_UPDATE + * @param0: run q value + * @param1: unused + * @ret0: Bitmask of cores online + * @ret1: New QoS timer for MP Decision (usec) + * MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED + * @param0: unused + * @param1: unused + * @ret0: Bitmask of cores online + * @ret1: New QoS timer for MP Decision (usec) + * @return: + * 0 on success, + * SCM return values + */ +extern int msm_dcvs_scm_event(uint32_t core_id, + enum msm_dcvs_scm_event event_id, + uint32_t param0, uint32_t param1, + uint32_t *ret0, uint32_t *ret1); + +#else +static inline int msm_dcvs_scm_init(uint32_t phy, size_t bytes) +{ return -ENOSYS; } +static inline int msm_dcvs_scm_register_core(uint32_t core_id, + struct msm_dcvs_core_param *param, + struct msm_dcvs_freq_entry *freq) +{ return -ENOSYS; } +static inline int msm_dcvs_scm_set_algo_params(uint32_t core_id, + struct msm_dcvs_algo_param *param) +{ return -ENOSYS; } +static inline int msm_mpd_scm_set_algo_params( + struct msm_mpd_algo_param *param) +{ return -ENOSYS; } +static inline int msm_dcvs_set_power_params(uint32_t core_id, + struct msm_dcvs_power_params *pwr_param, + struct msm_dcvs_freq_entry *freq_entry, + struct msm_dcvs_energy_curve_coeffs *coeffs) +{ return -ENOSYS; } +static inline int msm_dcvs_scm_event(uint32_t core_id, + enum msm_dcvs_scm_event event_id, + uint32_t param0, uint32_t param1, + uint32_t *ret0, uint32_t *ret1) +{ return -ENOSYS; } +#endif + +#endif diff --git a/Patches/LineageOS-14.1/msm_kernel/msm_mpdecision.c b/Patches/LineageOS-14.1/msm_kernel/msm_mpdecision.c new file mode 100644 index 00000000..746bbe80 --- /dev/null +++ b/Patches/LineageOS-14.1/msm_kernel/msm_mpdecision.c @@ -0,0 +1,726 @@ + /* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "mpd %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define CREATE_TRACE_POINTS +#include + +#define DEFAULT_RQ_AVG_POLL_MS (1) +#define DEFAULT_RQ_AVG_DIVIDE (25) + +struct mpd_attrib { + struct kobj_attribute enabled; + struct kobj_attribute rq_avg_poll_ms; + struct kobj_attribute iowait_threshold_pct; + + struct kobj_attribute rq_avg_divide; + struct kobj_attribute em_win_size_min_us; + struct kobj_attribute em_win_size_max_us; + struct kobj_attribute em_max_util_pct; + struct kobj_attribute mp_em_rounding_point_min; + struct kobj_attribute mp_em_rounding_point_max; + struct kobj_attribute online_util_pct_min; + struct kobj_attribute online_util_pct_max; + struct kobj_attribute slack_time_min_us; + struct kobj_attribute slack_time_max_us; + struct kobj_attribute hp_up_max_ms; + struct kobj_attribute hp_up_ms; + struct kobj_attribute hp_up_count; + struct kobj_attribute hp_dw_max_ms; + struct kobj_attribute hp_dw_ms; + struct kobj_attribute hp_dw_count; + struct attribute_group attrib_group; +}; + +struct msm_mpd_scm_data { + enum msm_dcvs_scm_event event; + int nr; +}; + +struct mpdecision { + uint32_t enabled; + atomic_t algo_cpu_mask; + uint32_t rq_avg_poll_ms; + uint32_t iowait_threshold_pct; + uint32_t rq_avg_divide; + ktime_t next_update; + uint32_t slack_us; + struct msm_mpd_algo_param mp_param; + struct mpd_attrib attrib; + struct mutex lock; + struct task_struct *task; + struct task_struct *hptask; + struct hrtimer slack_timer; + struct msm_mpd_scm_data data; + int hpupdate; + wait_queue_head_t wait_q; + wait_queue_head_t wait_hpq; +}; + +struct hp_latency { + int hp_up_max_ms; + int hp_up_ms; + int hp_up_count; + int hp_dw_max_ms; + int hp_dw_ms; + int hp_dw_count; +}; + +static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer); +static DEFINE_SPINLOCK(rq_avg_lock); + +enum { + MSM_MPD_DEBUG_NOTIFIER = BIT(0), + MSM_MPD_CORE_STATUS = BIT(1), + MSM_MPD_SLACK_TIMER = BIT(2), +}; + +enum { + HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */ + HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */ + HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */ +}; + +static int msm_mpd_enabled = 1; +module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static struct dentry *debugfs_base; +static struct mpdecision msm_mpd; + +static struct hp_latency hp_latencies; + +static unsigned long last_nr; +static int num_present_hundreds; +static ktime_t last_down_time; + +static bool ok_to_update_tz(int nr, int last_nr) +{ + /* + * Exclude unnecessary TZ reports if run queue haven't changed much from + * the last reported value. The divison by rq_avg_divide is to + * filter out small changes in the run queue average which won't cause + * a online cpu mask change. Also if the cpu online count does not match + * the count requested by TZ and we are not in the process of bringing + * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata + */ + return + (((nr / msm_mpd.rq_avg_divide) + != (last_nr / msm_mpd.rq_avg_divide)) + || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask)) + != num_online_cpus()) + && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS))); +} + +static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer) +{ + int nr, nr_iowait; + ktime_t curr_time = ktime_get(); + unsigned long flags; + int cpu = smp_processor_id(); + enum hrtimer_restart restart = HRTIMER_RESTART; + + spin_lock_irqsave(&rq_avg_lock, flags); + /* If running on the wrong cpu, don't restart */ + if (&per_cpu(rq_avg_poll_timer, cpu) != timer) + restart = HRTIMER_NORESTART; + + if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0) + goto out; + + msm_mpd.next_update = ktime_add_ns(curr_time, + (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC)); + + sched_get_nr_running_avg(&nr, &nr_iowait); + + if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr)) + nr = last_nr; + + if (nr > num_present_hundreds) + nr = num_present_hundreds; + + trace_msm_mp_runq("nr_running", nr); + + if (ok_to_update_tz(nr, last_nr)) { + hrtimer_try_to_cancel(&msm_mpd.slack_timer); + msm_mpd.data.nr = nr; + msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE; + wake_up(&msm_mpd.wait_q); + last_nr = nr; + } + +out: + hrtimer_set_expires(timer, msm_mpd.next_update); + spin_unlock_irqrestore(&rq_avg_lock, flags); + /* set next expiration */ + return restart; +} + +static void bring_up_cpu(int cpu) +{ + int cpu_action_time_ms; + int time_taken_ms; + int ret, ret1, ret2; + + cpu_action_time_ms = ktime_to_ms(ktime_get()); + ret = cpu_up(cpu); + if (ret) { + pr_debug("Error %d online core %d\n", ret, cpu); + } else { + time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms; + if (time_taken_ms > hp_latencies.hp_up_max_ms) + hp_latencies.hp_up_max_ms = time_taken_ms; + hp_latencies.hp_up_ms += time_taken_ms; + hp_latencies.hp_up_count++; + ret = msm_dcvs_scm_event( + CPU_OFFSET + cpu, + MSM_DCVS_SCM_CORE_ONLINE, + cpufreq_get(cpu), + (uint32_t) time_taken_ms * USEC_PER_MSEC, + &ret1, &ret2); + if (ret) + pr_err("Error sending hotplug scm event err=%d\n", ret); + } +} + +static void bring_down_cpu(int cpu) +{ + int cpu_action_time_ms; + int time_taken_ms; + int ret, ret1, ret2; + + BUG_ON(cpu == 0); + cpu_action_time_ms = ktime_to_ms(ktime_get()); + ret = cpu_down(cpu); + if (ret) { + pr_debug("Error %d offline" "core %d\n", ret, cpu); + } else { + time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms; + if (time_taken_ms > hp_latencies.hp_dw_max_ms) + hp_latencies.hp_dw_max_ms = time_taken_ms; + hp_latencies.hp_dw_ms += time_taken_ms; + hp_latencies.hp_dw_count++; + ret = msm_dcvs_scm_event( + CPU_OFFSET + cpu, + MSM_DCVS_SCM_CORE_OFFLINE, + (uint32_t) time_taken_ms * USEC_PER_MSEC, + 0, + &ret1, &ret2); + if (ret) + pr_err("Error sending hotplug scm event err=%d\n", ret); + } +} + +static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr) +{ + int ret = 0; + uint32_t req_cpu_mask = 0; + uint32_t slack_us = 0; + uint32_t param0 = 0; + + if (event == MSM_DCVS_SCM_RUNQ_UPDATE) + param0 = nr; + + ret = msm_dcvs_scm_event(0, event, param0, 0, + &req_cpu_mask, &slack_us); + + if (ret) { + pr_err("Error (%d) sending event %d, param %d\n", ret, event, + param0); + return ret; + } + + trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask); + trace_msm_mp_slacktime("slack_time_mp", slack_us); + msm_mpd.slack_us = slack_us; + atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask); + msm_mpd.hpupdate = HPUPDATE_SCHEDULED; + wake_up(&msm_mpd.wait_hpq); + + /* Start MP Decision slack timer */ + if (slack_us) { + hrtimer_cancel(&msm_mpd.slack_timer); + ret = hrtimer_start(&msm_mpd.slack_timer, + ktime_set(0, slack_us * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + if (ret) + pr_err("Failed to register slack timer (%d) %d\n", + slack_us, ret); + } + + return ret; +} + +static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer) +{ + unsigned long flags; + + trace_printk("mpd:slack_timer_fired!\n"); + + spin_lock_irqsave(&rq_avg_lock, flags); + if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE) + goto out; + + msm_mpd.data.nr = 0; + msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED; + wake_up(&msm_mpd.wait_q); +out: + spin_unlock_irqrestore(&rq_avg_lock, flags); + return HRTIMER_NORESTART; +} + +static int msm_mpd_idle_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + int cpu = smp_processor_id(); + unsigned long flags; + + switch (cmd) { + case CPU_PM_EXIT: + spin_lock_irqsave(&rq_avg_lock, flags); + hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu), + msm_mpd.next_update, + HRTIMER_MODE_ABS_PINNED); + spin_unlock_irqrestore(&rq_avg_lock, flags); + break; + case CPU_PM_ENTER: + hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu)); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int msm_mpd_hotplug_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int cpu = (int)hcpu; + unsigned long flags; + + switch (action & (~CPU_TASKS_FROZEN)) { + case CPU_STARTING: + spin_lock_irqsave(&rq_avg_lock, flags); + hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu), + msm_mpd.next_update, + HRTIMER_MODE_ABS_PINNED); + spin_unlock_irqrestore(&rq_avg_lock, flags); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block msm_mpd_idle_nb = { + .notifier_call = msm_mpd_idle_notifier, +}; + +static struct notifier_block msm_mpd_hotplug_nb = { + .notifier_call = msm_mpd_hotplug_notifier, +}; + +static int __cpuinit msm_mpd_do_hotplug(void *data) +{ + int *event = (int *)data; + int cpu; + + while (1) { + msm_dcvs_update_algo_params(); + wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop()); + if (kthread_should_stop()) + break; + + msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS; + /* + * Bring online any offline cores, then offline any online + * cores. Whenever a core is off/onlined restart the procedure + * in case a new core is desired to be brought online in the + * mean time. + */ +restart: + for_each_possible_cpu(cpu) { + if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu)) + && !cpu_online(cpu)) { + bring_up_cpu(cpu); + if (cpu_online(cpu)) + goto restart; + } + } + + if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) > + 100 * NSEC_PER_MSEC) + for_each_possible_cpu(cpu) + if (!(atomic_read(&msm_mpd.algo_cpu_mask) & + (1 << cpu)) && cpu_online(cpu)) { + bring_down_cpu(cpu); + last_down_time = ktime_get(); + break; + } + msm_mpd.hpupdate = HPUPDATE_WAITING; + msm_dcvs_apply_gpu_floor(0); + } + + return 0; +} + +static int msm_mpd_do_update_scm(void *data) +{ + struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data; + unsigned long flags; + enum msm_dcvs_scm_event event; + int nr; + + while (1) { + wait_event(msm_mpd.wait_q, + msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED + || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE + || kthread_should_stop()); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&rq_avg_lock, flags); + event = scm_data->event; + nr = scm_data->nr; + scm_data->event = 0; + scm_data->nr = 0; + spin_unlock_irqrestore(&rq_avg_lock, flags); + + msm_mpd_update_scm(event, nr); + } + return 0; +} + +static int __ref msm_mpd_set_enabled(uint32_t enable) +{ + int ret = 0; + int ret0 = 0; + int ret1 = 0; + int cpu; + static uint32_t last_enable; + + enable = (enable > 0) ? 1 : 0; + if (last_enable == enable) + return ret; + + if (enable) { + ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); + if (ret) { + pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n", + ret); + return ret; + } + } + + ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0, + &ret0, &ret1); + if (ret) { + pr_err("Error(%d) %s MP Decision\n", + ret, (enable ? "enabling" : "disabling")); + } else { + last_enable = enable; + last_nr = 0; + } + if (enable) { + msm_mpd.next_update = ktime_add_ns(ktime_get(), + (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC)); + msm_mpd.task = kthread_run(msm_mpd_do_update_scm, + &msm_mpd.data, "msm_mpdecision"); + if (IS_ERR(msm_mpd.task)) + return -EFAULT; + + msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug, + &msm_mpd.hpupdate, "msm_hp"); + if (IS_ERR(msm_mpd.hptask)) + return -EFAULT; + + for_each_online_cpu(cpu) + hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu), + msm_mpd.next_update, + HRTIMER_MODE_ABS_PINNED); + cpu_pm_register_notifier(&msm_mpd_idle_nb); + register_cpu_notifier(&msm_mpd_hotplug_nb); + msm_mpd.enabled = 1; + } else { + for_each_online_cpu(cpu) + hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu)); + kthread_stop(msm_mpd.hptask); + kthread_stop(msm_mpd.task); + cpu_pm_unregister_notifier(&msm_mpd_idle_nb); + unregister_cpu_notifier(&msm_mpd_hotplug_nb); + msm_mpd.enabled = 0; + } + + return ret; +} + +static int msm_mpd_set_rq_avg_poll_ms(uint32_t val) +{ + /* + * No need to do anything. Just let the timer set its own next poll + * interval when it next fires. + */ + msm_mpd.rq_avg_poll_ms = val; + return 0; +} + +static int msm_mpd_set_iowait_threshold_pct(uint32_t val) +{ + /* + * No need to do anything. Just let the timer set its own next poll + * interval when it next fires. + */ + msm_mpd.iowait_threshold_pct = val; + return 0; +} + +static int msm_mpd_set_rq_avg_divide(uint32_t val) +{ + /* + * No need to do anything. New value will be used next time + * the decision is made as to whether to update tz. + */ + + if (val == 0) + return -EINVAL; + + msm_mpd.rq_avg_divide = val; + return 0; +} + +#define MPD_ALGO_PARAM(_name, _param) \ +static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return snprintf(buf, PAGE_SIZE, "%d\n", _param); \ +} \ +static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, const char *buf, size_t count) \ +{ \ + int ret = 0; \ + uint32_t val; \ + uint32_t old_val; \ + mutex_lock(&msm_mpd.lock); \ + ret = kstrtouint(buf, 10, &val); \ + if (ret) { \ + pr_err("Invalid input %s for %s %d\n", \ + buf, __stringify(_name), ret);\ + return 0; \ + } \ + old_val = _param; \ + _param = val; \ + ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \ + if (ret) { \ + pr_err("Error %d returned when setting algo param %s to %d\n",\ + ret, __stringify(_name), val); \ + _param = old_val; \ + } \ + mutex_unlock(&msm_mpd.lock); \ + return count; \ +} + +#define MPD_PARAM(_name, _param) \ +static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return snprintf(buf, PAGE_SIZE, "%d\n", _param); \ +} \ +static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, const char *buf, size_t count) \ +{ \ + int ret = 0; \ + uint32_t val; \ + uint32_t old_val; \ + mutex_lock(&msm_mpd.lock); \ + ret = kstrtouint(buf, 10, &val); \ + if (ret) { \ + pr_err("Invalid input %s for %s %d\n", \ + buf, __stringify(_name), ret);\ + return 0; \ + } \ + old_val = _param; \ + ret = msm_mpd_set_##_name(val); \ + if (ret) { \ + pr_err("Error %d returned when setting algo param %s to %d\n",\ + ret, __stringify(_name), val); \ + _param = old_val; \ + } \ + mutex_unlock(&msm_mpd.lock); \ + return count; \ +} + +#define MPD_RW_ATTRIB(i, _name) \ + msm_mpd.attrib._name.attr.name = __stringify(_name); \ + msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \ + msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \ + msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \ + msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr; + +MPD_PARAM(enabled, msm_mpd.enabled); +MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms); +MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct); +MPD_PARAM(rq_avg_divide, msm_mpd.rq_avg_divide); +MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us); +MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us); +MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct); +MPD_ALGO_PARAM(mp_em_rounding_point_min, + msm_mpd.mp_param.mp_em_rounding_point_min); +MPD_ALGO_PARAM(mp_em_rounding_point_max, + msm_mpd.mp_param.mp_em_rounding_point_max); +MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min); +MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max); +MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us); +MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us); +MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms); +MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms); +MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count); +MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms); +MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms); +MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count); + +static int __devinit msm_mpd_probe(struct platform_device *pdev) +{ + struct kobject *module_kobj = NULL; + int ret = 0; + const int attr_count = 20; + struct msm_mpd_algo_param *param = NULL; + + param = pdev->dev.platform_data; + + module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); + if (!module_kobj) { + pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME); + ret = -ENOENT; + goto done; + } + + msm_mpd.attrib.attrib_group.attrs = + kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL); + if (!msm_mpd.attrib.attrib_group.attrs) { + ret = -ENOMEM; + goto done; + } + + MPD_RW_ATTRIB(0, enabled); + MPD_RW_ATTRIB(1, rq_avg_poll_ms); + MPD_RW_ATTRIB(2, iowait_threshold_pct); + MPD_RW_ATTRIB(3, rq_avg_divide); + MPD_RW_ATTRIB(4, em_win_size_min_us); + MPD_RW_ATTRIB(5, em_win_size_max_us); + MPD_RW_ATTRIB(6, em_max_util_pct); + MPD_RW_ATTRIB(7, mp_em_rounding_point_min); + MPD_RW_ATTRIB(8, mp_em_rounding_point_max); + MPD_RW_ATTRIB(9, online_util_pct_min); + MPD_RW_ATTRIB(10, online_util_pct_max); + MPD_RW_ATTRIB(11, slack_time_min_us); + MPD_RW_ATTRIB(12, slack_time_max_us); + MPD_RW_ATTRIB(13, hp_up_max_ms); + MPD_RW_ATTRIB(14, hp_up_ms); + MPD_RW_ATTRIB(15, hp_up_count); + MPD_RW_ATTRIB(16, hp_dw_max_ms); + MPD_RW_ATTRIB(17, hp_dw_ms); + MPD_RW_ATTRIB(18, hp_dw_count); + + msm_mpd.attrib.attrib_group.attrs[19] = NULL; + ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group); + if (ret) + pr_err("Unable to create sysfs objects :%d\n", ret); + + msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS; + msm_mpd.rq_avg_divide = DEFAULT_RQ_AVG_DIVIDE; + + memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param)); + + debugfs_base = debugfs_create_dir("msm_mpdecision", NULL); + if (!debugfs_base) { + pr_err("Cannot create debugfs base msm_mpdecision\n"); + ret = -ENOENT; + goto done; + } + +done: + if (ret && debugfs_base) + debugfs_remove(debugfs_base); + + return ret; +} + +static int __devexit msm_mpd_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver msm_mpd_driver = { + .probe = msm_mpd_probe, + .remove = __devexit_p(msm_mpd_remove), + .driver = { + .name = "msm_mpdecision", + .owner = THIS_MODULE, + }, +}; + +static int __init msm_mpdecision_init(void) +{ + int cpu; + if (!msm_mpd_enabled) { + pr_info("Not enabled\n"); + return 0; + } + + num_present_hundreds = 100 * num_present_cpus(); + + hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + msm_mpd.slack_timer.function = msm_mpd_slack_timer; + + for_each_possible_cpu(cpu) { + hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu), + CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + per_cpu(rq_avg_poll_timer, cpu).function + = msm_mpd_rq_avg_poll_timer; + } + mutex_init(&msm_mpd.lock); + init_waitqueue_head(&msm_mpd.wait_q); + init_waitqueue_head(&msm_mpd.wait_hpq); + return platform_driver_register(&msm_mpd_driver); +} +late_initcall(msm_mpdecision_init); diff --git a/Scripts/Generic_Deblob.sh b/Scripts/Generic_Deblob.sh index 575422ff..a0f042e5 100755 --- a/Scripts/Generic_Deblob.sh +++ b/Scripts/Generic_Deblob.sh @@ -105,10 +105,8 @@ export base; makes=$makes"|com.motorola.cameraone.xml"; #Performance [Qualcomm] - #blobs=$blobs"|mpdecision|msm_irqbalance"; - - #Performance Profiles [Qualcomm] - blobs=$blobs"|libqti-perfd-client.so|perfd|perf-profile0.conf|perf-profile1.conf|perf-profile2.conf|perf-profile3.conf|perf-profile4.conf|perf-profile5.conf"; + #blobs=$blobs"|msm_irqbalance"; + blobs=$blobs"|mpdecision|libqti-perfd-client.so|perfd|perf-profile0.conf|perf-profile1.conf|perf-profile2.conf|perf-profile3.conf|perf-profile4.conf|perf-profile5.conf"; #Playready (DRM) [Microsoft] blobs=$blobs"|playread.b00|playread.b01|playread.b02|playread.b03|playread.mdt"; @@ -137,7 +135,7 @@ export base; #Time Service [Qualcomm] #XXX: Requires that https://github.com/LineageOS/android_hardware_sony_timekeep be included in repo manifest #XXX: This is another aggressive change and might be disabled in the future - #XXX: Time seems to be set properly with these blobs removed without Sony TimeKeep, so there may be more shenanigans here + #XXX: Time seems to be set properly with these blobs removed without Sony TimeKeep, so there may be more shenanigans here unless thats just NTP #blobs=$blobs"|libtime_genoff.so"; #XXX: Breaks radio blobs=$blobs"|libTimeService.so|time_daemon|TimeService.apk"; diff --git a/Scripts/LAOS-14.1_Patches.sh b/Scripts/LAOS-14.1_Patches.sh index 768cbb6c..7cea39d3 100755 --- a/Scripts/LAOS-14.1_Patches.sh +++ b/Scripts/LAOS-14.1_Patches.sh @@ -48,6 +48,16 @@ disableDexPreOpt() { sed -i 's/WITH_DEXPREOPT := true/WITH_DEXPREOPT := false/' BoardConfig.mk; echo "Disable dexpreopt"; } + +addMPD() { + cp $patches"msm_kernel/msm_dcvs.c" arch/arm/mach-msm/msm_dcvs.c; + cp $patches"msm_kernel/msm_dcvs.h" arch/arm/mach-msm/include/mach/msm_dcvs.h; + cp $patches"msm_kernel/msm_dcvs_scm.h" arch/arm/mach-msm/include/mach/msm_dcvs_scm.h; + cp $patches"msm_kernel/mpdcvs_trace.h" include/trace/events/mpdcvs_trace.h; + cp $patches"msm_kernel/msm_mpdecision.c" arch/arm/mach-msm/msm_mpdecision.c; + echo "obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_mpdecision.o" >> arch/arm/mach-msm/Makefile; + echo "Added msm_mpdecision"; +} # #END OF PREPRATION # @@ -138,6 +148,7 @@ enter "vendor/cm" awk -i inplace '!/50-cm.sh/' config/common.mk; #Make sure our hosts is always used patch -p1 < $patches"android_vendor_cm/0001-SCE.patch" #Include our extras such as MicroG and F-Droid cp $patches"android_vendor_cm/sce.mk" config/sce.mk +cp $patches"android_vendor_cm/99mpdecision" prebuilt/common/etc/init.d/99mpdecision #Credit: Cl3Kener sed -i 's/CM_BUILDTYPE := UNOFFICIAL/CM_BUILDTYPE := dsc/' config/common.mk; #Change buildtype enter "vendor/cmsdk" @@ -154,6 +165,7 @@ enter "device/motorola/clark" enableDexPreOpt enter "kernel/motorola/msm8992" +addMPD patch -p1 < $patches"android_kernel_motorola_msm8992/0001-OverUnderClock.patch" #a57: 1.82Ghz -> 2.01Ghz, a53 1.44Ghz -> 1.63Ghz, 384Mhz -> 300Mhz =+1.14Ghz TODO: Enable by default patch -p1 < $patches"android_kernel_motorola_msm8992/0002-MMC_Tweak.patch" #Improves MMC performance @@ -168,25 +180,38 @@ enter "device/lge/mako" disableDexPreOpt #bootloops patch -p1 < $patches"android_device_lge_mako/0001-Enable_LTE.patch" #Enable LTE support (Requires LTE hybrid modem to be flashed) -enter "kernel/lge/mako" -patch -p1 < $patches"android_kernel_lge_mako/0001-OverUnderClock.patch" #384Mhz -> 81Mhz, 1.51Ghz -> 1.94Ghz =+1.72Ghz +#enter "kernel/lge/mako" +#patch -p1 < $patches"android_kernel_lge_mako/0001-OverUnderClock.patch" #384Mhz -> 81Mhz, 1.51Ghz -> 1.94Ghz =+1.72Ghz #XXX: Causes excessively long boot times + +enter "kernel/asus/msm8916" +addMPD enter "kernel/lge/hammerhead" patch -p1 < $patches"android_kernel_lge_hammerhead/0001-OverUnderClock.patch" #2.26Ghz -> 2.95Ghz =+2.76Ghz enter "kernel/moto/shamu" +addMPD patch -p1 < $patches"android_kernel_moto_shamu/0001-OverUnderClock.patch" #300Mhz -> 35Mhz, 2.64Ghz -> 2.88Ghz =+0.96Ghz enter "kernel/lge/bullhead" +addMPD patch -p1 < $patches"android_kernel_lge_bullhead/0001-OverUnderClock.patch" #a57: 1.82Ghz -> 2.01Ghz, a53 1.44Ghz -> 1.63Ghz, 384Mhz -> 300Mhz =+1.14Ghz TODO: Enable by default patch -p1 < $patches"android_kernel_lge_bullhead/0002-MMC_Tweak.patch" #Improves MMC performance enter "kernel/motorola/msm8916" +addMPD patch -p1 < $patches"android_kernel_motorola_msm8916/0001-Overclock.patch" #1.36Ghz -> 1.88Ghz =+ 2.07Ghz enter "kernel/nextbit/msm8992" +addMPD patch -p1 < $patches"android_kernel_nextbit_msm8992/0001-OverUnderClock.patch" #a57: 1.82Ghz -> 2.01Ghz, a53 1.44Ghz -> 1.63Ghz, 384Mhz -> 300Mhz =+1.14Ghz TODO: Enable by default patch -p1 < $patches"android_kernel_nextbit_msm8992/0002-MMC_Tweak.patch" #Improves MMC performance + +enter "kernel/huawei/angler" +addMPD + +enter "kernel/google/marlin" +addMPD # #END OF DEVICE CHANGES #