Initial commit, long overdue

This commit is contained in:
Tad 2016-12-21 19:30:02 -05:00
commit c0083c1519
292 changed files with 951990 additions and 0 deletions

View file

@ -0,0 +1,577 @@
From c0209d6b15d2fc752741fe0e04b227990e74377f Mon Sep 17 00:00:00 2001
From: Tad <tad@spotco.us>
Date: Sat, 17 Oct 2015 20:49:21 -0400
Subject: [PATCH] Implement KEXEC
---
arch/arm/Kconfig | 26 ++++++++++
arch/arm/boot/compressed/head.S | 64 ++++++++++++++++++++++++
arch/arm/configs/cyanogenmod_bacon_defconfig | 6 ++-
arch/arm/include/asm/kexec.h | 8 +++
arch/arm/kernel/machine_kexec.c | 58 +++++++++++++++++++--
arch/arm/kernel/relocate_kernel.S | 75 ++++++++++++++++++++++++++++
arch/arm/mach-msm/include/mach/memory.h | 16 ++++++
arch/arm/mach-msm/oppo/board-8974-oppo.c | 27 ++++++++++
arch/arm/mach-msm/restart.c | 28 +++++++++++
include/linux/kexec.h | 19 +++++--
kernel/kexec.c | 4 ++
11 files changed, 322 insertions(+), 9 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 37d3c6d..f801a19 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2299,6 +2299,32 @@ config ATAGS_PROC
Should the atags used to boot the kernel be exported in an "atags"
file in procfs. Useful with kexec.
+config KEXEC_HARDBOOT
+ bool "Support hard booting to a kexec kernel"
+ depends on KEXEC
+ help
+ Allows hard booting (i.e., with a full hardware reboot) to a kernel
+ previously loaded in memory by kexec. This works around the problem of
+ soft-booted kernel hangs due to improper device shutdown and/or
+ reinitialization. Support is comprised of two components:
+
+ First, a "hardboot" flag is added to the kexec syscall to force a hard
+ reboot in relocate_new_kernel() (which requires machine-specific assembly
+ code). This also requires the kexec userspace tool to load the kexec'd
+ kernel in memory region left untouched by the bootloader (i.e., not
+ explicitly cleared and not overwritten by the boot kernel). Just prior
+ to reboot, the kexec kernel arguments are stashed in a machine-specific
+ memory page that must also be preserved. Note that this hardboot page
+ need not be reserved during regular kernel execution.
+
+ Second, the zImage decompresor of the boot (bootloader-loaded) kernel is
+ modified to check the hardboot page for fresh kexec arguments, and if
+ present, attempts to jump to the kexec'd kernel preserved in memory.
+
+ Note that hardboot support is only required in the boot kernel and any
+ kernel capable of performing a hardboot kexec. It is _not_ required by a
+ kexec'd kernel.
+
config CRASH_DUMP
bool "Build kdump crash kernel (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index d3892ef..be05aa1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -11,6 +11,12 @@
#include <linux/linkage.h>
.arch armv7-a
+
+#ifdef CONFIG_KEXEC_HARDBOOT
+ #include <asm/kexec.h>
+ #include <asm/memory.h>
+#endif
+
/*
* Debugging stuff
*
@@ -136,6 +142,64 @@ start:
1: mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
+#ifdef CONFIG_KEXEC_HARDBOOT
+ /* Check hardboot page for a kexec kernel. */
+ ldr r3, =KEXEC_HB_PAGE_ADDR
+ ldr r0, [r3]
+ ldr r1, =KEXEC_HB_PAGE_MAGIC
+ teq r0, r1
+ bne not_booting_other
+
+ /* Clear hardboot page magic to avoid boot loop. */
+ mov r0, #0
+ str r0, [r3]
+
+ /*
+ * Copy dtb from location up high in memory to default location.
+ * Kernel freezes if this is not done.
+ */
+ ldr r1, [r3, #12] @ kexec_boot_atags
+ ldr r2, [r3, #16] @ kexec_boot_atags_len
+ mov r5, #0 @ iterator
+catags_cpy:
+ ldr r0, [r1, r5] @ from kexec_boot_atags
+ str r0, [r8, r5] @ to atags_pointer
+ add r5, r5, #4
+ cmp r5, r2
+ blo catags_cpy
+
+#ifdef KEXEC_HB_KERNEL_LOC
+ /*
+ * Copy kernel from location up high in memory to location in first 128MB.
+ * Bootloader on hammerhead erases first 128MB of ram on reboot, so it can't
+ * be in there before reboot, but decompressing in location above 128MB takes
+ * a long time. This memcpy is much quicker, for some reason.
+ */
+ ldr r2, [r3, #4] @ kexec_start_address
+ ldr r4, [r3, #20] @ kexec_kernel_len
+ ldr r6, =KEXEC_HB_KERNEL_LOC @ target
+ mov r5, #0 @ iterator
+kernel_cpy:
+ ldr r0, [r2, r5] @ from kexec_start_address
+ str r0, [r6, r5] @ to KEXEC_HB_KERNEL_LOC
+ add r5, r5, #4
+ cmp r5, r4
+ blo kernel_cpy
+#else
+ ldr r6, [r3, #4] @ kexec_start_address
+#endif
+
+ /* set registers and boot kexecd' kernel */
+ mov r0, #0
+ ldr r1, [r3, #8] @ kexec_mach_type
+ mov r2, r8 @ atags pointer
+ mov pc, r6
+
+ .ltorg
+
+not_booting_other:
+#endif
+
#ifndef __ARM_ARCH_2__
/*
* Booting from Angel - need to enter SVC mode and disable
diff --git a/arch/arm/configs/cyanogenmod_bacon_defconfig b/arch/arm/configs/cyanogenmod_bacon_defconfig
index 1fd8a93..5f67061 100644
--- a/arch/arm/configs/cyanogenmod_bacon_defconfig
+++ b/arch/arm/configs/cyanogenmod_bacon_defconfig
@@ -693,7 +693,9 @@ CONFIG_ZBOOT_ROM_BSS=0
# CONFIG_ARM_APPENDED_DTB is not set
CONFIG_CMDLINE=""
# CONFIG_XIP_KERNEL is not set
-# CONFIG_KEXEC is not set
+CONFIG_KEXEC=y
+CONFIG_KEXEC_HARDBOOT=y
+CONFIG_ATAGS_PROC=n
# CONFIG_CRASH_DUMP is not set
# CONFIG_AUTO_ZRELADDR is not set
@@ -1283,7 +1285,7 @@ CONFIG_OF=y
#
# Device Tree and Open Firmware support
#
-# CONFIG_PROC_DEVICETREE is not set
+CONFIG_PROC_DEVICETREE=y
# CONFIG_OF_SELFTEST is not set
CONFIG_OF_FLATTREE=y
CONFIG_OF_EARLY_FLATTREE=y
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index c2b9b4b..564c55b 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -17,6 +17,10 @@
#define KEXEC_ARM_ATAGS_OFFSET 0x1000
#define KEXEC_ARM_ZIMAGE_OFFSET 0x8000
+#ifdef CONFIG_KEXEC_HARDBOOT
+ #define KEXEC_HB_PAGE_MAGIC 0x4a5db007
+#endif
+
#ifndef __ASSEMBLY__
/**
@@ -53,6 +57,10 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Function pointer to optional machine-specific reinitialization */
extern void (*kexec_reinit)(void);
+#ifdef CONFIG_KEXEC_HARDBOOT
+extern void (*kexec_hardboot_hook)(void);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 357b651..29cdd2f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,6 +14,9 @@
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
#include <asm/system_misc.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <asm/mmu_writeable.h>
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
@@ -22,6 +25,12 @@ extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
extern unsigned long kexec_boot_atags;
+#ifdef CONFIG_KEXEC_HARDBOOT
+extern unsigned long kexec_hardboot;
+extern unsigned long kexec_boot_atags_len;
+extern unsigned long kexec_kernel_len;
+void (*kexec_hardboot_hook)(void);
+#endif
static atomic_t waiting_for_crash_ipi;
@@ -32,6 +41,37 @@ static atomic_t waiting_for_crash_ipi;
int machine_kexec_prepare(struct kimage *image)
{
+ struct kexec_segment *current_segment;
+ __be32 header;
+ int i, err;
+
+ /* No segment at default ATAGs address. try to locate
+ * a dtb using magic */
+ for (i = 0; i < image->nr_segments; i++) {
+ current_segment = &image->segment[i];
+
+ err = memblock_is_region_memory(current_segment->mem,
+ current_segment->memsz);
+ if (!err)
+ return - EINVAL;
+
+#ifdef CONFIG_KEXEC_HARDBOOT
+ if(current_segment->mem == image->start)
+ mem_text_write_kernel_word(&kexec_kernel_len, current_segment->memsz);
+#endif
+
+ err = get_user(header, (__be32*)current_segment->buf);
+ if (err)
+ return err;
+
+ if (be32_to_cpu(header) == OF_DT_HEADER)
+ {
+ mem_text_write_kernel_word(&kexec_boot_atags, current_segment->mem);
+#ifdef CONFIG_KEXEC_HARDBOOT
+ mem_text_write_kernel_word(&kexec_boot_atags_len, current_segment->memsz);
+#endif
+ }
+ }
return 0;
}
@@ -123,10 +163,14 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
+ mem_text_write_kernel_word(&kexec_start_address, image->start);
+ mem_text_write_kernel_word(&kexec_indirection_page, page_list);
+ mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type);
+ if (!kexec_boot_atags)
+ mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET);
+#ifdef CONFIG_KEXEC_HARDBOOT
+ mem_text_write_kernel_word(&kexec_hardboot, image->hardboot);
+#endif
/* copy our kernel relocation code to the control code page */
memcpy(reboot_code_buffer,
@@ -140,6 +184,12 @@ void machine_kexec(struct kimage *image)
if (kexec_reinit)
kexec_reinit();
+#ifdef CONFIG_KEXEC_HARDBOOT
+ /* Run any final machine-specific shutdown code. */
+ if (image->hardboot && kexec_hardboot_hook)
+ kexec_hardboot_hook();
+#endif
+
soft_restart(reboot_code_buffer_phys);
}
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf..0e45ffc 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -4,6 +4,15 @@
#include <asm/kexec.h>
+#ifdef CONFIG_KEXEC_HARDBOOT
+#include <asm/memory.h>
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+ #include <mach/iomap.h>
+#elif defined(CONFIG_ARCH_APQ8064) || defined(CONFIG_ARCH_MSM8974)
+ #include <mach/msm_iomap.h>
+#endif
+#endif
+
.globl relocate_new_kernel
relocate_new_kernel:
@@ -52,6 +61,12 @@ relocate_new_kernel:
b 0b
2:
+#ifdef CONFIG_KEXEC_HARDBOOT
+ ldr r0, kexec_hardboot
+ teq r0, #0
+ bne hardboot
+#endif
+
/* Jump to relocated kernel */
mov lr,r1
mov r0,#0
@@ -60,6 +75,52 @@ relocate_new_kernel:
ARM( mov pc, lr )
THUMB( bx lr )
+#ifdef CONFIG_KEXEC_HARDBOOT
+hardboot:
+ /* Stash boot arguments in hardboot page:
+ * 0: KEXEC_HB_PAGE_MAGIC
+ * 4: kexec_start_address
+ * 8: kexec_mach_type
+ * 12: kexec_boot_atags
+ * 16: kexec_boot_atags_len
+ * 20: kexec_kernel_len */
+ ldr r0, =KEXEC_HB_PAGE_ADDR
+ str r1, [r0, #4]
+ ldr r1, kexec_mach_type
+ str r1, [r0, #8]
+ ldr r1, kexec_boot_atags
+ str r1, [r0, #12]
+ ldr r1, kexec_boot_atags_len
+ str r1, [r0, #16]
+ ldr r1, kexec_kernel_len
+ str r1, [r0, #20]
+ ldr r1, =KEXEC_HB_PAGE_MAGIC
+ str r1, [r0]
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+ ldr r0, =TEGRA_PMC_BASE
+ ldr r1, [r0]
+ orr r1, r1, #0x10
+ str r1, [r0]
+loop: b loop
+#elif defined(CONFIG_ARCH_APQ8064)
+ /* Restart using the PMIC chip, see mach-msm/restart.c */
+ ldr r0, =APQ8064_TLMM_PHYS
+ mov r1, #0
+ str r1, [r0, #0x820] @ PSHOLD_CTL_SU
+loop: b loop
+#elif defined(CONFIG_ARCH_MSM8974)
+ /* Restart using the PMIC chip, see mach-msm/restart.c */
+ ldr r0, =MSM8974_MPM2_PSHOLD_PHYS
+ mov r1, #0
+ str r1, [r0, #0]
+loop: b loop
+#else
+#error "No reboot method defined for hardboot."
+#endif
+
+ .ltorg
+#endif
.align
.globl kexec_start_address
@@ -79,6 +140,20 @@ kexec_mach_type:
kexec_boot_atags:
.long 0x0
+#ifdef CONFIG_KEXEC_HARDBOOT
+ .globl kexec_boot_atags_len
+kexec_boot_atags_len:
+ .long 0x0
+
+ .globl kexec_kernel_len
+kexec_kernel_len:
+ .long 0x0
+
+ .globl kexec_hardboot
+kexec_hardboot:
+ .long 0x0
+#endif
+
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 9225230..1c87b96 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -20,6 +20,22 @@
/* physical offset of RAM */
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#if defined(CONFIG_KEXEC_HARDBOOT)
+#if defined(CONFIG_MACH_APQ8064_FLO)
+#define KEXEC_HB_PAGE_ADDR UL(0x88C00000)
+#elif defined(CONFIG_MACH_APQ8064_MAKO)
+#define KEXEC_HB_PAGE_ADDR UL(0x88600000)
+#elif defined(CONFIG_MACH_MSM8974_HAMMERHEAD)
+#define KEXEC_HB_PAGE_ADDR UL(0x10100000)
+#define KEXEC_HB_KERNEL_LOC UL(0x3208000)
+#elif defined(CONFIG_MACH_OPPO_MSM8974)
+#define KEXEC_HB_PAGE_ADDR UL(0x2F600000)
+#define KEXEC_HB_KERNEL_LOC UL(0x3208000)
+#else
+#error "Adress for kexec hardboot page not defined"
+#endif
+#endif
+
#ifndef __ASSEMBLY__
void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
void clean_caches(unsigned long, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/oppo/board-8974-oppo.c b/arch/arm/mach-msm/oppo/board-8974-oppo.c
index eb24545..10bbbda 100644
--- a/arch/arm/mach-msm/oppo/board-8974-oppo.c
+++ b/arch/arm/mach-msm/oppo/board-8974-oppo.c
@@ -54,6 +54,13 @@
#include <linux/pcb_version.h>
+#ifdef CONFIG_KEXEC_HARDBOOT
+#include <asm/setup.h>
+#include <asm/memory.h>
+#include <linux/memblock.h>
+#define OPPO_PERSISTENT_RAM_SIZE (SZ_1M)
+#endif
+
static struct platform_device *ram_console_dev;
static struct persistent_ram_descriptor msm_prd[] __initdata = {
@@ -72,6 +79,26 @@ static struct persistent_ram msm_pr __initdata = {
void __init msm_8974_reserve(void)
{
+#ifdef CONFIG_KEXEC_HARDBOOT
+ // Reserve space for hardboot page - just after ram_console,
+ // at the start of second memory bank
+ int ret;
+ phys_addr_t start;
+ struct membank* bank;
+
+ if (meminfo.nr_banks < 2) {
+ pr_err("%s: not enough membank\n", __func__);
+ return;
+ }
+
+ bank = &meminfo.bank[1];
+ start = bank->start + SZ_1M + OPPO_PERSISTENT_RAM_SIZE;
+ ret = memblock_remove(start, SZ_1M);
+ if(!ret)
+ pr_info("Hardboot page reserved at 0x%X\n", start);
+ else
+ pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start);
+#endif
persistent_ram_early_init(&msm_pr);
of_scan_flat_dt(dt_scan_for_memory_reserve, NULL);
}
diff --git a/arch/arm/mach-msm/restart.c b/arch/arm/mach-msm/restart.c
index a04ab8d..fe89976 100644
--- a/arch/arm/mach-msm/restart.c
+++ b/arch/arm/mach-msm/restart.c
@@ -38,6 +38,10 @@
#include "timer.h"
#include "wdog_debug.h"
+#ifdef CONFIG_KEXEC_HARDBOOT
+#include <asm/kexec.h>
+#endif
+
#define WDT0_RST 0x38
#define WDT0_EN 0x40
#define WDT0_BARK_TIME 0x4C
@@ -373,6 +377,26 @@ static int __init msm_pmic_restart_init(void)
late_initcall(msm_pmic_restart_init);
+#ifdef CONFIG_KEXEC_HARDBOOT
+static void msm_kexec_hardboot_hook(void)
+{
+ set_dload_mode(0);
+
+ // Set PMIC to restart-on-poweroff
+ pm8xxx_reset_pwr_off(1);
+
+ // These are executed on normal reboot, but with kexec-hardboot,
+ // they reboot/panic the system immediately.
+#if 0
+ qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
+
+ /* Needed to bypass debug image on some chips */
+ msm_disable_wdog_debug();
+ halt_spmi_pmic_arbiter();
+#endif
+}
+#endif
+
static int __init msm_restart_init(void)
{
#ifdef CONFIG_MSM_DLOAD_MODE
@@ -392,6 +416,10 @@ static int __init msm_restart_init(void)
if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER) > 0)
scm_pmic_arbiter_disable_supported = true;
+#ifdef CONFIG_KEXEC_HARDBOOT
+ kexec_hardboot_hook = msm_kexec_hardboot_hook;
+#endif
+
return 0;
}
early_initcall(msm_restart_init);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index af84a25..a4509ad 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -111,6 +111,10 @@ struct kimage {
#define KEXEC_TYPE_CRASH 1
unsigned int preserve_context : 1;
+#ifdef CONFIG_KEXEC_HARDBOOT
+ unsigned int hardboot : 1;
+#endif
+
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
#endif
@@ -178,6 +182,11 @@ extern struct kimage *kexec_crash_image;
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
+
+#ifdef CONFIG_KEXEC_HARDBOOT
+#define KEXEC_HARDBOOT 0x00000004
+#endif
+
#define KEXEC_ARCH_MASK 0xffff0000
/* These values match the ELF architecture values.
@@ -196,10 +205,14 @@ extern struct kimage *kexec_crash_image;
#define KEXEC_ARCH_MIPS ( 8 << 16)
/* List of defined/legal kexec flags */
-#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
-#else
+#if defined(CONFIG_KEXEC_JUMP) && defined(CONFIG_KEXEC_HARDBOOT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_HARDBOOT)
+#elif defined(CONFIG_KEXEC_JUMP)
#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#elif defined(CONFIG_KEXEC_HARDBOOT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_HARDBOOT)
+#else
+#define KEXEC_FLAGS (KEXEC_ON_CRASH)
#endif
#define VMCOREINFO_BYTES (4096)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 4e2e472..aef7893 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1004,6 +1004,10 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1;
+#ifdef CONFIG_KEXEC_HARDBOOT
+ if (flags & KEXEC_HARDBOOT)
+ image->hardboot = 1;
+#endif
result = machine_kexec_prepare(image);
if (result)
goto out;

View file

@ -0,0 +1,706 @@
From e914463298097e12f8504fa8215246ac70e59831 Mon Sep 17 00:00:00 2001
From: Tad <tad@spotco.us>
Date: Sat, 17 Oct 2015 20:49:57 -0400
Subject: [PATCH] Overclocked to 2.8Ghz, underclocked to 268Mhz
---
arch/arm/boot/dts/msm8974-v2.dtsi | 2 +-
arch/arm/boot/dts/msm8974pro-pm8941.dtsi | 8 +-
.../arm/boot/dts/msm8974pro-pma8084-regulator.dtsi | 16 +-
arch/arm/boot/dts/msm8974pro.dtsi | 190 ++++++++++++++++++---
4 files changed, 179 insertions(+), 37 deletions(-)
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 04769bd..8739175 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -135,7 +135,7 @@
<1880000 2068000>,
<3008000 3309000>,
<3760000 4136000>,
- <4468000 2457000>;
+ <4468000 2457600>;
qcom,dec-ocmem-ab-ib = <0 0>,
<176000 519000>,
<456000 519000>,
diff --git a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
index 89939e6..d76e4bd 100644
--- a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
@@ -39,22 +39,22 @@
};
&krait0_vreg {
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,ldo-delta-voltage = <12500>;
};
&krait1_vreg {
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,ldo-delta-voltage = <12500>;
};
&krait2_vreg {
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,ldo-delta-voltage = <12500>;
};
&krait3_vreg {
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,ldo-delta-voltage = <12500>;
};
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
index 433d466..428a520 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
@@ -492,9 +492,9 @@
<0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
reg-names = "acs", "mdd";
regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
+ qcom,retention-voltage = <600000>;
qcom,ldo-default-voltage = <750000>;
qcom,ldo-threshold-voltage = <850000>;
qcom,ldo-delta-voltage = <12500>;
@@ -508,9 +508,9 @@
<0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
reg-names = "acs", "mdd";
regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
+ qcom,retention-voltage = <600000>;
qcom,ldo-default-voltage = <750000>;
qcom,ldo-threshold-voltage = <850000>;
qcom,ldo-delta-voltage = <12500>;
@@ -524,9 +524,9 @@
<0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
reg-names = "acs", "mdd";
regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
+ qcom,retention-voltage = <600000>;
qcom,ldo-default-voltage = <750000>;
qcom,ldo-threshold-voltage = <850000>;
qcom,ldo-delta-voltage = <12500>;
@@ -540,9 +540,9 @@
<0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
reg-names = "acs", "mdd";
regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1120000>;
+ regulator-max-microvolt = <1250000>;
qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
+ qcom,retention-voltage = <600000>;
qcom,ldo-default-voltage = <750000>;
qcom,ldo-threshold-voltage = <850000>;
qcom,ldo-delta-voltage = <12500>;
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index c50b379..ebb5112 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -90,6 +90,7 @@
qcom,clock-krait@f9016000 {
qcom,speed1-pvs0-bin-v0 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 74 >,
< 345600000 775000 85 >,
< 422400000 775000 104 >,
@@ -121,6 +122,7 @@
qcom,speed1-pvs1-bin-v0 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 74 >,
< 345600000 775000 85 >,
< 422400000 775000 104 >,
@@ -152,6 +154,7 @@
qcom,speed1-pvs2-bin-v0 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 74 >,
< 345600000 750000 85 >,
< 422400000 750000 104 >,
@@ -183,6 +186,7 @@
qcom,speed1-pvs3-bin-v0 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 74 >,
< 345600000 750000 85 >,
< 422400000 750000 104 >,
@@ -214,6 +218,7 @@
qcom,speed1-pvs4-bin-v0 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 74 >,
< 345600000 750000 85 >,
< 422400000 750000 104 >,
@@ -245,6 +250,7 @@
qcom,speed1-pvs5-bin-v0 =
< 0 0 0 >,
+ < 268800000 720000 68 >,
< 300000000 725000 74 >,
< 345600000 725000 85 >,
< 422400000 725000 104 >,
@@ -276,6 +282,7 @@
qcom,speed1-pvs6-bin-v0 =
< 0 0 0 >,
+ < 268800000 720000 68 >,
< 300000000 725000 74 >,
< 345600000 725000 85 >,
< 422400000 725000 104 >,
@@ -307,6 +314,7 @@
qcom,speed3-pvs0-bin-v0 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -337,10 +345,14 @@
< 2265600000 1065000 700 >,
< 2342400000 1080000 734 >,
< 2419200000 1095000 769 >,
- < 2457600000 1100000 785 >;
+ < 2457600000 1100000 785 >,
+ < 2572800000 1145000 827 >,
+ < 2726400000 1205000 900 >,
+ < 2880000000 1235000 937 >;
qcom,speed3-pvs1-bin-v0 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -371,10 +383,14 @@
< 2265600000 1040000 700 >,
< 2342400000 1055000 734 >,
< 2419200000 1070000 769 >,
- < 2457600000 1075000 785 >;
+ < 2457600000 1075000 785 >,
+ < 2572800000 1145000 827 >,
+ < 2726400000 1205000 900 >,
+ < 2880000000 1235000 937 >;
qcom,speed3-pvs2-bin-v0 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -405,10 +421,14 @@
< 2265600000 1015000 700 >,
< 2342400000 1030000 734 >,
< 2419200000 1045000 769 >,
- < 2457600000 1050000 785 >;
+ < 2457600000 1050000 785 >,
+ < 2572800000 1125000 827 >,
+ < 2726400000 1195000 900 >,
+ < 2880000000 1225000 937 >;
qcom,speed3-pvs3-bin-v0 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -439,10 +459,14 @@
< 2265600000 990000 700 >,
< 2342400000 1005000 734 >,
< 2419200000 1020000 769 >,
- < 2457600000 1025000 785 >;
+ < 2457600000 1025000 785 >,
+ < 2572800000 1115000 827 >,
+ < 2726400000 1185000 900 >,
+ < 2880000000 1215000 937 >;
qcom,speed3-pvs4-bin-v0 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -473,10 +497,14 @@
< 2265600000 965000 700 >,
< 2342400000 980000 734 >,
< 2419200000 995000 769 >,
- < 2457600000 1000000 785 >;
+ < 2457600000 1000000 785 >,
+ < 2572800000 1075000 827 >,
+ < 2726400000 1175000 900 >,
+ < 2880000000 1205000 937 >;
qcom,speed3-pvs5-bin-v0 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 106 >,
@@ -507,10 +535,14 @@
< 2265600000 940000 700 >,
< 2342400000 955000 734 >,
< 2419200000 970000 769 >,
- < 2457600000 975000 785 >;
+ < 2457600000 975000 785 >,
+ < 2572800000 1025000 827 >,
+ < 2726400000 1175000 900 >,
+ < 2880000000 1195000 937 >;
qcom,speed3-pvs6-bin-v0 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 106 >,
@@ -541,10 +573,14 @@
< 2265600000 915000 700 >,
< 2342400000 930000 734 >,
< 2419200000 945000 769 >,
- < 2457600000 950000 785 >;
+ < 2457600000 950000 785 >,
+ < 2572800000 1010000 827 >,
+ < 2726400000 1155000 900 >,
+ < 2880000000 1175000 937 >;
qcom,speed1-pvs0-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 810000 87 >,
< 422400000 820000 108 >,
@@ -576,6 +612,7 @@
qcom,speed1-pvs1-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 810000 108 >,
@@ -607,6 +644,7 @@
qcom,speed1-pvs2-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 108 >,
@@ -638,6 +676,7 @@
qcom,speed1-pvs3-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 108 >,
@@ -669,6 +708,7 @@
qcom,speed1-pvs4-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 108 >,
@@ -700,6 +740,7 @@
qcom,speed1-pvs5-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 108 >,
@@ -731,6 +772,7 @@
qcom,speed1-pvs6-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -762,6 +804,7 @@
qcom,speed1-pvs7-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -793,6 +836,7 @@
qcom,speed1-pvs8-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -824,6 +868,7 @@
qcom,speed1-pvs9-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -855,6 +900,7 @@
qcom,speed1-pvs10-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -886,6 +932,7 @@
qcom,speed1-pvs11-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -917,6 +964,7 @@
qcom,speed1-pvs12-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -948,6 +996,7 @@
qcom,speed1-pvs13-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 108 >,
@@ -979,6 +1028,7 @@
qcom,speed1-pvs14-bin-v1 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 108 >,
@@ -1010,6 +1060,7 @@
qcom,speed1-pvs15-bin-v1 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 108 >,
@@ -1041,6 +1092,7 @@
qcom,speed3-pvs0-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1071,10 +1123,14 @@
< 2265600000 1085000 716 >,
< 2342400000 1100000 751 >,
< 2419200000 1115000 786 >,
- < 2457600000 1120000 802 >;
+ < 2457600000 1120000 802 >,
+ < 2572800000 1175000 827 >,
+ < 2726400000 1225000 900 >,
+ < 2880000000 1265000 937 >;
qcom,speed3-pvs1-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1105,10 +1161,14 @@
< 2265600000 1075000 716 >,
< 2342400000 1090000 751 >,
< 2419200000 1105000 786 >,
- < 2457600000 1110000 802 >;
+ < 2457600000 1110000 802 >,
+ < 2572800000 1165000 827 >,
+ < 2726400000 1215000 900 >,
+ < 2880000000 1245000 937 >;
qcom,speed3-pvs2-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1139,10 +1199,14 @@
< 2265600000 1065000 716 >,
< 2342400000 1080000 751 >,
< 2419200000 1095000 786 >,
- < 2457600000 1100000 802 >;
+ < 2457600000 1100000 802 >,
+ < 2572800000 1145000 827 >,
+ < 2726400000 1185000 900 >,
+ < 2880000000 1215000 937 >;
qcom,speed3-pvs3-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1173,10 +1237,14 @@
< 2265600000 1055000 716 >,
< 2342400000 1070000 751 >,
< 2419200000 1085000 786 >,
- < 2457600000 1090000 802 >;
+ < 2457600000 1090000 802 >,
+ < 2572800000 1145000 827 >,
+ < 2726400000 1175000 900 >,
+ < 2880000000 1205000 937 >;
qcom,speed3-pvs4-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1207,10 +1275,14 @@
< 2265600000 1045000 716 >,
< 2342400000 1060000 751 >,
< 2419200000 1075000 786 >,
- < 2457600000 1080000 802 >;
+ < 2457600000 1080000 802 >,
+ < 2572800000 1135000 827 >,
+ < 2726400000 1165000 900 >,
+ < 2880000000 1195000 937 >;
qcom,speed3-pvs5-bin-v1 =
< 0 0 0 >,
+ < 268800000 795000 68 >,
< 300000000 800000 76 >,
< 345600000 800000 87 >,
< 422400000 800000 106 >,
@@ -1241,10 +1313,14 @@
< 2265600000 1035000 716 >,
< 2342400000 1050000 751 >,
< 2419200000 1065000 786 >,
- < 2457600000 1070000 802 >;
+ < 2457600000 1070000 802 >,
+ < 2572800000 1125000 827 >,
+ < 2726400000 1155000 900 >,
+ < 2880000000 1185000 937 >;
qcom,speed3-pvs6-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1275,10 +1351,14 @@
< 2265600000 1025000 716 >,
< 2342400000 1040000 751 >,
< 2419200000 1055000 786 >,
- < 2457600000 1060000 802 >;
+ < 2457600000 1060000 802 >,
+ < 2572800000 1115000 827 >,
+ < 2726400000 1145000 900 >,
+ < 2880000000 1175000 937 >;
qcom,speed3-pvs7-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1309,10 +1389,14 @@
< 2265600000 1015000 716 >,
< 2342400000 1030000 751 >,
< 2419200000 1045000 786 >,
- < 2457600000 1050000 802 >;
+ < 2457600000 1050000 802 >,
+ < 2572800000 1105000 827 >,
+ < 2726400000 1135000 900 >,
+ < 2880000000 1165000 937 >;
qcom,speed3-pvs8-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1343,10 +1427,14 @@
< 2265600000 1005000 716 >,
< 2342400000 1020000 751 >,
< 2419200000 1035000 786 >,
- < 2457600000 1040000 802 >;
+ < 2457600000 1040000 802 >,
+ < 2572800000 1095000 827 >,
+ < 2726400000 1125000 900 >,
+ < 2880000000 1155000 937 >;
qcom,speed3-pvs9-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1377,10 +1465,14 @@
< 2265600000 995000 716 >,
< 2342400000 1010000 751 >,
< 2419200000 1025000 786 >,
- < 2457600000 1030000 802 >;
+ < 2457600000 1030000 802 >,
+ < 2572800000 1085000 827 >,
+ < 2726400000 1115000 900 >,
+ < 2880000000 1145000 937 >;
qcom,speed3-pvs10-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1411,10 +1503,14 @@
< 2265600000 985000 716 >,
< 2342400000 1000000 751 >,
< 2419200000 1015000 786 >,
- < 2457600000 1020000 802 >;
+ < 2457600000 1020000 802 >,
+ < 2572800000 1075000 827 >,
+ < 2726400000 1105000 900 >,
+ < 2880000000 1135000 937 >;
qcom,speed3-pvs11-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1445,10 +1541,14 @@
< 2265600000 975000 716 >,
< 2342400000 990000 751 >,
< 2419200000 1005000 786 >,
- < 2457600000 1010000 802 >;
+ < 2457600000 1010000 802 >,
+ < 2572800000 1065000 827 >,
+ < 2726400000 1095000 900 >,
+ < 2880000000 1125000 937 >;
qcom,speed3-pvs12-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1479,10 +1579,14 @@
< 2265600000 965000 716 >,
< 2342400000 980000 751 >,
< 2419200000 995000 786 >,
- < 2457600000 1000000 802 >;
+ < 2457600000 1000000 802 >,
+ < 2572800000 1065000 827 >,
+ < 2726400000 1085000 900 >,
+ < 2880000000 1115000 937 >;
qcom,speed3-pvs13-bin-v1 =
< 0 0 0 >,
+ < 268800000 770000 68 >,
< 300000000 775000 76 >,
< 345600000 775000 87 >,
< 422400000 775000 106 >,
@@ -1513,10 +1617,14 @@
< 2265600000 955000 716 >,
< 2342400000 970000 751 >,
< 2419200000 985000 786 >,
- < 2457600000 990000 802 >;
+ < 2457600000 990000 802 >,
+ < 2572800000 1045000 827 >,
+ < 2726400000 1065000 900 >,
+ < 2880000000 1095000 937 >;
qcom,speed3-pvs14-bin-v1 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 106 >,
@@ -1547,10 +1655,14 @@
< 2265600000 945000 716 >,
< 2342400000 960000 751 >,
< 2419200000 975000 786 >,
- < 2457600000 980000 802 >;
+ < 2457600000 980000 802 >,
+ < 2572800000 1035000 827 >,
+ < 2726400000 1055000 900 >,
+ < 2880000000 1085000 937 >;
qcom,speed3-pvs15-bin-v1 =
< 0 0 0 >,
+ < 268800000 745000 68 >,
< 300000000 750000 76 >,
< 345600000 750000 87 >,
< 422400000 750000 106 >,
@@ -1581,9 +1693,39 @@
< 2265600000 935000 716 >,
< 2342400000 950000 751 >,
< 2419200000 965000 786 >,
- < 2457600000 970000 802 >;
+ < 2457600000 970000 802 >,
+ < 2572800000 1025000 827 >,
+ < 2726400000 1045000 900 >,
+ < 2880000000 1075000 937 >;
};
+ qcom,msm-cpufreq@0 {
+ reg = <0 4>;
+ compatible = "qcom,msm-cpufreq";
+ qcom,cpufreq-table =
+ < 268800 /* 75 MHz */ >,
+ < 300000 /* 75 MHz */ >,
+ < 422400 /* 150 MHz */ >,
+ < 652800 /* 200 MHz */ >,
+ < 729600 /* 307 MHz */ >,
+ < 883200 /* 307 MHz */ >,
+ < 960000 /* 460 MHz */ >,
+ < 1036800 /* 460 MHz */ >,
+ < 1190400 /* 460 MHz */ >,
+ < 1267200 /* 614 MHz */ >,
+ < 1497600 /* 614 MHz */ >,
+ < 1574400 /* 800 MHz */ >,
+ < 1728000 /* 800 MHz */ >,
+ < 1958400 /* 931 MHz */ >,
+ < 2265600 /* 931 MHz */ >,
+ < 2342400 /* 931 MHz */ >,
+ < 2419200 /* 931 MHz */ >,
+ < 2457600 /* 931 MHz */ >,
+ < 2572800 /* 931 MHz */ >,
+ < 2726400 /* 931 MHz */ >,
+ < 2880000 /* 931 MHz */ >;
+ };
+
i2c@f9928000 { /* BLSP-1 QUP-6 */
cell-index = <3>;
compatible = "qcom,i2c-qup";
@@ -1751,7 +1893,7 @@
<1880000 2068000>,
<3008000 3309000>,
<3760000 4136000>,
- <4468000 2457000>;
+ <4468000 2457600>;
qcom,dec-ocmem-ab-ib = <0 0>,
<176000 519000>,
<456000 519000>,

View file

@ -0,0 +1,48 @@
From ce1d0035c6d67f633444075f4a1cf9aef165d3f0 Mon Sep 17 00:00:00 2001
From: Tad <tad@spotco.us>
Date: Sat, 17 Oct 2015 20:50:31 -0400
Subject: [PATCH] Update defconfig
---
arch/arm/configs/cyanogenmod_bacon_defconfig | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/arch/arm/configs/cyanogenmod_bacon_defconfig b/arch/arm/configs/cyanogenmod_bacon_defconfig
index 5f67061..41a303a 100644
--- a/arch/arm/configs/cyanogenmod_bacon_defconfig
+++ b/arch/arm/configs/cyanogenmod_bacon_defconfig
@@ -125,8 +125,9 @@ CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
-# CONFIG_RD_LZ4 is not set
+CONFIG_RD_LZ4=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_CC_OPTIMIZE_MORE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
CONFIG_PANIC_TIMEOUT=5
@@ -190,7 +191,7 @@ CONFIG_SECCOMP_FILTER=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
-# CONFIG_MODULES is not set
+CONFIG_MODULES=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_LBDAF=y
@@ -1775,12 +1776,12 @@ CONFIG_SERIAL_MSM_HS=y
#
# Diag Support
#
-# CONFIG_DIAG_CHAR is not set
+CONFIG_DIAG_CHAR=y
#
# DIAG traffic over USB
#
-# CONFIG_DIAG_OVER_USB is not set
+CONFIG_DIAG_OVER_USB=y
#
# SDIO support for DIAG

View file

@ -0,0 +1,244 @@
From be4516645f228e02ae04eab6c1c3f564cff1e0ec Mon Sep 17 00:00:00 2001
From: Tad <tad@spotco.us>
Date: Sat, 17 Oct 2015 20:52:02 -0400
Subject: [PATCH] Implement Quick Wakeup
---
arch/arm/configs/cyanogenmod_bacon_defconfig | 2 +
include/linux/quickwakeup.h | 46 ++++++++++++
kernel/power/Kconfig | 20 ++++--
kernel/power/Makefile | 1 +
kernel/power/quickwakeup.c | 104 +++++++++++++++++++++++++++
5 files changed, 167 insertions(+), 6 deletions(-)
create mode 100644 include/linux/quickwakeup.h
create mode 100644 kernel/power/quickwakeup.c
diff --git a/arch/arm/configs/cyanogenmod_bacon_defconfig b/arch/arm/configs/cyanogenmod_bacon_defconfig
index 41a303a..0d2faaf 100644
--- a/arch/arm/configs/cyanogenmod_bacon_defconfig
+++ b/arch/arm/configs/cyanogenmod_bacon_defconfig
@@ -3622,3 +3622,5 @@ CONFIG_NLATTR=y
# CONFIG_CORDIC is not set
CONFIG_QMI_ENCDEC=y
# CONFIG_QMI_ENCDEC_DEBUG is not set
+
+CONFIG_QUICK_WAKEUP=y
diff --git a/include/linux/quickwakeup.h b/include/linux/quickwakeup.h
new file mode 100644
index 0000000..000effa
--- /dev/null
+++ b/include/linux/quickwakeup.h
@@ -0,0 +1,46 @@
+/* include/linux/quickwakeup.h
+ *
+ * Copyright (C) 2014 Motorola Mobility LLC.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QUICKWAKEUP_H_
+#define _QUICKWAKEUP_H_
+
+#ifdef __KERNEL__
+
+struct quickwakeup_ops {
+ struct list_head list;
+ char *name;
+ int (*qw_execute)(void *data);
+ int (*qw_check)(void *data);
+ int execute;
+ void *data; /* arbitrary data passed back to user */
+};
+
+#ifdef CONFIG_QUICK_WAKEUP
+
+int quickwakeup_register(struct quickwakeup_ops *ops);
+void quickwakeup_unregister(struct quickwakeup_ops *ops);
+bool quickwakeup_suspend_again(void);
+
+#else
+
+static inline int quickwakeup_register(struct quickwakeup_ops *ops) { return 0; };
+static inline void quickwakeup_unregister(struct quickwakeup_ops *ops) {};
+static inline bool quickwakeup_suspend_again(void) { return 0; };
+
+#endif /* CONFIG_QUICK_WAKEUP */
+
+#endif /* __KERNEL__ */
+
+#endif /* _QUICKWAKEUP_H_ */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e536c8d..8006962 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -83,20 +83,20 @@ config PM_STD_PARTITION
default ""
---help---
The default resume partition is the partition that the suspend-
- to-disk implementation will look for a suspended disk image.
+ to-disk implementation will look for a suspended disk image.
- The partition specified here will be different for almost every user.
+ The partition specified here will be different for almost every user.
It should be a valid swap partition (at least for now) that is turned
- on before suspending.
+ on before suspending.
The partition specified can be overridden by specifying:
- resume=/dev/<other device>
+ resume=/dev/<other device>
- which will set the resume partition to the device specified.
+ which will set the resume partition to the device specified.
Note there is currently not a way to specify which device to save the
- suspended image to. It will simply pick the first available swap
+ suspended image to. It will simply pick the first available swap
device.
config PM_SLEEP
@@ -285,6 +285,14 @@ config SUSPEND_TIME
Prints the time spent in suspend in the kernel log, and
keeps statistics on the time spent in suspend in
/sys/kernel/debug/suspend_time
+
+config QUICK_WAKEUP
+ bool "Quick wakeup"
+ depends on SUSPEND
+ default n
+ ---help---
+ Allow kernel driver to do periodic jobs without resuming the full system
+ This option can increase battery life on android powered smartphone.
config DEDUCE_WAKEUP_REASONS
bool
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 74c713b..e5bebbc 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
+obj-$(CONFIG_QUICK_WAKEUP) += quickwakeup.o
obj-$(CONFIG_SUSPEND) += wakeup_reason.o
diff --git a/kernel/power/quickwakeup.c b/kernel/power/quickwakeup.c
new file mode 100644
index 0000000..46f9dda
--- /dev/null
+++ b/kernel/power/quickwakeup.c
@@ -0,0 +1,104 @@
+/* kernel/power/quickwakeup.c
+ *
+ * Copyright (C) 2014 Motorola Mobility LLC.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/quickwakeup.h>
+
+static LIST_HEAD(qw_head);
+static DEFINE_MUTEX(list_lock);
+
+int quickwakeup_register(struct quickwakeup_ops *ops)
+{
+ mutex_lock(&list_lock);
+ list_add(&ops->list, &qw_head);
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+
+void quickwakeup_unregister(struct quickwakeup_ops *ops)
+{
+ mutex_lock(&list_lock);
+ list_del(&ops->list);
+ mutex_unlock(&list_lock);
+}
+
+static int quickwakeup_check(void)
+{
+ int check = 0;
+ struct quickwakeup_ops *index;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(index, &qw_head, list) {
+ int ret = index->qw_check(index->data);
+ index->execute = ret;
+ check |= ret;
+ pr_debug("%s: %s votes for %s\n", __func__, index->name,
+ ret ? "execute" : "dont care");
+ }
+
+ mutex_unlock(&list_lock);
+
+ return check;
+}
+
+/* return 1 => suspend again
+ return 0 => continue wakeup
+ */
+static int quickwakeup_execute(void)
+{
+ int suspend_again = 0;
+ int final_vote = 1;
+ struct quickwakeup_ops *index;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(index, &qw_head, list) {
+ if (index->execute) {
+ int ret = index->qw_execute(index->data);
+ index->execute = 0;
+ final_vote &= ret;
+ suspend_again = final_vote;
+ pr_debug("%s: %s votes for %s\n", __func__, index->name,
+ ret ? "suspend again" : "wakeup");
+ }
+ }
+
+ mutex_unlock(&list_lock);
+
+ pr_debug("%s: %s\n", __func__,
+ suspend_again ? "suspend again" : "wakeup");
+
+ return suspend_again;
+}
+
+/* return 1 => suspend again
+ return 0 => continue wakeup
+ */
+bool quickwakeup_suspend_again(void)
+{
+ int ret = 0;
+
+ if (quickwakeup_check())
+ ret = quickwakeup_execute();
+
+ pr_debug("%s- returning %d %s\n", __func__, ret,
+ ret ? "suspend again" : "wakeup");
+
+ return ret;
+}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,943 @@
From 242c13bda04cabd39eefd5cdb8aeba6e6038aa49 Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Wed, 7 Oct 2015 10:54:49 +0200
Subject: [PATCH] cpufreq: nightmare: added new governor
---
drivers/cpufreq/Kconfig | 9 +
drivers/cpufreq/Makefile | 1 +
drivers/cpufreq/cpufreq_nightmare.c | 871 ++++++++++++++++++++++++++++++++++++
include/linux/cpufreq.h | 3 +
4 files changed, 884 insertions(+)
create mode 100644 drivers/cpufreq/cpufreq_nightmare.c
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 03c2115..01c4f7b 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -153,6 +153,11 @@ config CPU_FREQ_DEFAULT_GOV_YANKACTIVE
loading your cpufreq low-level hardware driver, using the
'yankactive' governor for latency-sensitive workloads.
+config CPU_FREQ_DEFAULT_GOV_NIGHTMARE
+ bool "nightmare"
+ select CPU_FREQ_GOV_NIGHTMARE
+ help
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -305,6 +310,10 @@ config CPU_FREQ_GOV_YANKACTIVE
and can be achieved using interactive with a script. This is added for
simplification purposes for myself.
+config CPU_FREQ_GOV_NIGHTMARE
+ tristate "'nightmare' cpufreq governor"
+ depends on CPU_FREQ
+
menu "x86 CPU frequency scaling drivers"
depends on X86
source "drivers/cpufreq/Kconfig.x86"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index eef5c42..515ca87 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_IMPULSE) += cpufreq_impulse.o
obj-$(CONFIG_CPU_FREQ_GOV_ZZMOOVE) += cpufreq_zzmoove.o
obj-$(CONFIG_CPU_FREQ_GOV_INTELLIMM) += cpufreq_intellimm.o
obj-$(CONFIG_CPU_FREQ_GOV_YANKACTIVE) += cpufreq_yankactive.o
+obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq_nightmare.c b/drivers/cpufreq/cpufreq_nightmare.c
new file mode 100644
index 0000000..974761f
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_nightmare.c
@@ -0,0 +1,871 @@
+/*
+ * drivers/cpufreq/cpufreq_nightmare.c
+ *
+ * Copyright (C) 2011 Samsung Electronics co. ltd
+ * ByungChang Cha <bc.cha@samsung.com>
+ *
+ * Based on ondemand governor
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Created by Alucard_24@xda
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+/*
+ * dbs is used in this file as a shortform for demandbased switching
+ * It helps to keep variable names smaller, simpler
+ */
+
+static void do_nightmare_timer(struct work_struct *work);
+static int cpufreq_governor_nightmare(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_nightmare = {
+ .name = "nightmare",
+ .governor = cpufreq_governor_nightmare,
+ .owner = THIS_MODULE,
+};
+
+struct cpufreq_nightmare_cpuinfo {
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_idle;
+ struct cpufreq_frequency_table *freq_table;
+ struct delayed_work work;
+ struct cpufreq_policy *cur_policy;
+ int cpu;
+ unsigned int enable:1;
+ /*
+ * mutex that serializes governor limit change with
+ * do_nightmare_timer invocation. We do not want do_nightmare_timer to run
+ * when user is changing the governor or limits.
+ */
+ struct mutex timer_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_nightmare_cpuinfo, od_nightmare_cpuinfo);
+
+static unsigned int nightmare_enable; /* number of CPUs using this policy */
+/*
+ * nightmare_mutex protects nightmare_enable in governor start/stop.
+ */
+static DEFINE_MUTEX(nightmare_mutex);
+
+/*static atomic_t min_freq_limit[NR_CPUS];
+static atomic_t max_freq_limit[NR_CPUS];*/
+
+/* nightmare tuners */
+static struct nightmare_tuners {
+ atomic_t sampling_rate;
+ atomic_t inc_cpu_load_at_min_freq;
+ atomic_t inc_cpu_load;
+ atomic_t dec_cpu_load;
+ atomic_t freq_for_responsiveness;
+ atomic_t freq_for_responsiveness_max;
+ atomic_t freq_up_brake_at_min_freq;
+ atomic_t freq_up_brake;
+ atomic_t freq_step_at_min_freq;
+ atomic_t freq_step;
+ atomic_t freq_step_dec;
+ atomic_t freq_step_dec_at_max_freq;
+#ifdef CONFIG_CPU_EXYNOS4210
+ atomic_t up_sf_step;
+ atomic_t down_sf_step;
+#endif
+} nightmare_tuners_ins = {
+ .sampling_rate = ATOMIC_INIT(60000),
+ .inc_cpu_load_at_min_freq = ATOMIC_INIT(60),
+ .inc_cpu_load = ATOMIC_INIT(70),
+ .dec_cpu_load = ATOMIC_INIT(50),
+#ifdef CONFIG_CPU_EXYNOS4210
+ .freq_for_responsiveness = ATOMIC_INIT(200000),
+ .freq_for_responsiveness_max = ATOMIC_INIT(1200000),
+#else
+ .freq_for_responsiveness = ATOMIC_INIT(540000),
+ .freq_for_responsiveness_max = ATOMIC_INIT(1890000),
+#endif
+ .freq_step_at_min_freq = ATOMIC_INIT(20),
+ .freq_step = ATOMIC_INIT(20),
+ .freq_up_brake_at_min_freq = ATOMIC_INIT(30),
+ .freq_up_brake = ATOMIC_INIT(30),
+ .freq_step_dec = ATOMIC_INIT(10),
+ .freq_step_dec_at_max_freq = ATOMIC_INIT(10),
+#ifdef CONFIG_CPU_EXYNOS4210
+ .up_sf_step = ATOMIC_INIT(0),
+ .down_sf_step = ATOMIC_INIT(0),
+#endif
+};
+
+/************************** sysfs interface ************************/
+
+/* cpufreq_nightmare Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", atomic_read(&nightmare_tuners_ins.object)); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(inc_cpu_load_at_min_freq, inc_cpu_load_at_min_freq);
+show_one(inc_cpu_load, inc_cpu_load);
+show_one(dec_cpu_load, dec_cpu_load);
+show_one(freq_for_responsiveness, freq_for_responsiveness);
+show_one(freq_for_responsiveness_max, freq_for_responsiveness_max);
+show_one(freq_step_at_min_freq, freq_step_at_min_freq);
+show_one(freq_step, freq_step);
+show_one(freq_up_brake_at_min_freq, freq_up_brake_at_min_freq);
+show_one(freq_up_brake, freq_up_brake);
+show_one(freq_step_dec, freq_step_dec);
+show_one(freq_step_dec_at_max_freq, freq_step_dec_at_max_freq);
+#ifdef CONFIG_CPU_EXYNOS4210
+show_one(up_sf_step, up_sf_step);
+show_one(down_sf_step, down_sf_step);
+#endif
+
+/*#define show_freqlimit_param(file_name, cpu) \
+static ssize_t show_##file_name##_##cpu \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", atomic_read(&file_name[cpu])); \
+}
+
+#define store_freqlimit_param(file_name, cpu) \
+static ssize_t store_##file_name##_##cpu \
+(struct kobject *kobj, struct attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ unsigned int input; \
+ int ret; \
+ ret = sscanf(buf, "%d", &input); \
+ if (ret != 1) \
+ return -EINVAL; \
+ if (input == atomic_read(&file_name[cpu])) { \
+ return count; \
+ } \
+ atomic_set(&file_name[cpu], input); \
+ return count; \
+}*/
+
+/* min freq limit for awaking */
+/*show_freqlimit_param(min_freq_limit, 0);
+show_freqlimit_param(min_freq_limit, 1);
+#if NR_CPUS >= 4
+show_freqlimit_param(min_freq_limit, 2);
+show_freqlimit_param(min_freq_limit, 3);
+#endif*/
+/* max freq limit for awaking */
+/*show_freqlimit_param(max_freq_limit, 0);
+show_freqlimit_param(max_freq_limit, 1);
+#if NR_CPUS >= 4
+show_freqlimit_param(max_freq_limit, 2);
+show_freqlimit_param(max_freq_limit, 3);
+#endif*/
+/* min freq limit for awaking */
+/*store_freqlimit_param(min_freq_limit, 0);
+store_freqlimit_param(min_freq_limit, 1);
+#if NR_CPUS >= 4
+store_freqlimit_param(min_freq_limit, 2);
+store_freqlimit_param(min_freq_limit, 3);
+#endif*/
+/* max freq limit for awaking */
+/*store_freqlimit_param(max_freq_limit, 0);
+store_freqlimit_param(max_freq_limit, 1);
+#if NR_CPUS >= 4
+store_freqlimit_param(max_freq_limit, 2);
+store_freqlimit_param(max_freq_limit, 3);
+#endif
+define_one_global_rw(min_freq_limit_0);
+define_one_global_rw(min_freq_limit_1);
+#if NR_CPUS >= 4
+define_one_global_rw(min_freq_limit_2);
+define_one_global_rw(min_freq_limit_3);
+#endif
+define_one_global_rw(max_freq_limit_0);
+define_one_global_rw(max_freq_limit_1);
+#if NR_CPUS >= 4
+define_one_global_rw(max_freq_limit_2);
+define_one_global_rw(max_freq_limit_3);
+#endif*/
+
+/**
+ * update_sampling_rate - update sampling rate effective immediately if needed.
+ * @new_rate: new sampling rate
+ *
+ * If new rate is smaller than the old, simply updaing
+ * nightmare_tuners_ins.sampling_rate might not be appropriate. For example,
+ * if the original sampling_rate was 1 second and the requested new sampling
+ * rate is 10 ms because the user needs immediate reaction from ondemand
+ * governor, but not sure if higher frequency will be required or not,
+ * then, the governor may change the sampling rate too late; up to 1 second
+ * later. Thus, if we are reducing the sampling rate, we need to make the
+ * new value effective immediately.
+ */
+static void update_sampling_rate(unsigned int new_rate)
+{
+ int cpu;
+
+ atomic_set(&nightmare_tuners_ins.sampling_rate,new_rate);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ struct cpufreq_nightmare_cpuinfo *nightmare_cpuinfo;
+ unsigned long next_sampling, appointed_at;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ nightmare_cpuinfo = &per_cpu(od_nightmare_cpuinfo, policy->cpu);
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&nightmare_cpuinfo->timer_mutex);
+
+ if (!delayed_work_pending(&nightmare_cpuinfo->work)) {
+ mutex_unlock(&nightmare_cpuinfo->timer_mutex);
+ continue;
+ }
+
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = nightmare_cpuinfo->work.timer.expires;
+
+
+ if (time_before(next_sampling, appointed_at)) {
+
+ mutex_unlock(&nightmare_cpuinfo->timer_mutex);
+ cancel_delayed_work_sync(&nightmare_cpuinfo->work);
+ mutex_lock(&nightmare_cpuinfo->timer_mutex);
+
+ #ifdef CONFIG_CPU_EXYNOS4210
+ mod_delayed_work_on(nightmare_cpuinfo->cpu, system_wq, &nightmare_cpuinfo->work, usecs_to_jiffies(new_rate));
+ #else
+ queue_delayed_work_on(nightmare_cpuinfo->cpu, system_wq, &nightmare_cpuinfo->work, usecs_to_jiffies(new_rate));
+ #endif
+ }
+ mutex_unlock(&nightmare_cpuinfo->timer_mutex);
+ }
+ put_online_cpus();
+}
+
+/* sampling_rate */
+static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(input,10000);
+
+ if (input == atomic_read(&nightmare_tuners_ins.sampling_rate))
+ return count;
+
+ update_sampling_rate(input);
+
+ return count;
+}
+
+/* inc_cpu_load_at_min_freq */
+static ssize_t store_inc_cpu_load_at_min_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1) {
+ return -EINVAL;
+ }
+
+ input = min(input,atomic_read(&nightmare_tuners_ins.inc_cpu_load));
+
+ if (input == atomic_read(&nightmare_tuners_ins.inc_cpu_load_at_min_freq))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.inc_cpu_load_at_min_freq,input);
+
+ return count;
+}
+
+/* inc_cpu_load */
+static ssize_t store_inc_cpu_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.inc_cpu_load))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.inc_cpu_load,input);
+
+ return count;
+}
+
+/* dec_cpu_load */
+static ssize_t store_dec_cpu_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,95),5);
+
+ if (input == atomic_read(&nightmare_tuners_ins.dec_cpu_load))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.dec_cpu_load,input);
+
+ return count;
+}
+
+/* freq_for_responsiveness */
+static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_for_responsiveness))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.freq_for_responsiveness,input);
+
+ return count;
+}
+
+/* freq_for_responsiveness_max */
+static ssize_t store_freq_for_responsiveness_max(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_for_responsiveness_max))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.freq_for_responsiveness_max,input);
+
+ return count;
+}
+
+/* freq_step_at_min_freq */
+static ssize_t store_freq_step_at_min_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_step_at_min_freq))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.freq_step_at_min_freq,input);
+
+ return count;
+}
+
+/* freq_step */
+static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_step))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.freq_step,input);
+
+ return count;
+}
+
+/* freq_up_brake_at_min_freq */
+static ssize_t store_freq_up_brake_at_min_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_up_brake_at_min_freq)) {/* nothing to do */
+ return count;
+ }
+
+ atomic_set(&nightmare_tuners_ins.freq_up_brake_at_min_freq,input);
+
+ return count;
+}
+
+/* freq_up_brake */
+static ssize_t store_freq_up_brake(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_up_brake)) {/* nothing to do */
+ return count;
+ }
+
+ atomic_set(&nightmare_tuners_ins.freq_up_brake,input);
+
+ return count;
+}
+
+/* freq_step_dec */
+static ssize_t store_freq_step_dec(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_step_dec)) {/* nothing to do */
+ return count;
+ }
+
+ atomic_set(&nightmare_tuners_ins.freq_step_dec,input);
+
+ return count;
+}
+
+/* freq_step_dec_at_max_freq */
+static ssize_t store_freq_step_dec_at_max_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,100),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.freq_step_dec_at_max_freq)) {/* nothing to do */
+ return count;
+ }
+
+ atomic_set(&nightmare_tuners_ins.freq_step_dec_at_max_freq,input);
+
+ return count;
+}
+#ifdef CONFIG_CPU_EXYNOS4210
+/* up_sf_step */
+static ssize_t store_up_sf_step(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,99),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.up_sf_step))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.up_sf_step,input);
+
+ return count;
+}
+
+/* down_sf_step */
+static ssize_t store_down_sf_step(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ int input;
+ int ret;
+
+ ret = sscanf(buf, "%d", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ input = max(min(input,99),0);
+
+ if (input == atomic_read(&nightmare_tuners_ins.down_sf_step))
+ return count;
+
+ atomic_set(&nightmare_tuners_ins.down_sf_step,input);
+
+ return count;
+}
+#endif
+
+define_one_global_rw(sampling_rate);
+define_one_global_rw(inc_cpu_load_at_min_freq);
+define_one_global_rw(inc_cpu_load);
+define_one_global_rw(dec_cpu_load);
+define_one_global_rw(freq_for_responsiveness);
+define_one_global_rw(freq_for_responsiveness_max);
+define_one_global_rw(freq_step_at_min_freq);
+define_one_global_rw(freq_step);
+define_one_global_rw(freq_up_brake_at_min_freq);
+define_one_global_rw(freq_up_brake);
+define_one_global_rw(freq_step_dec);
+define_one_global_rw(freq_step_dec_at_max_freq);
+#ifdef CONFIG_CPU_EXYNOS4210
+define_one_global_rw(up_sf_step);
+define_one_global_rw(down_sf_step);
+#endif
+
+static struct attribute *nightmare_attributes[] = {
+ &sampling_rate.attr,
+ /*&min_freq_limit_0.attr,
+ &min_freq_limit_1.attr,
+#if NR_CPUS >= 4
+ &min_freq_limit_2.attr,
+ &min_freq_limit_3.attr,
+#endif
+ &max_freq_limit_0.attr,
+ &max_freq_limit_1.attr,
+#if NR_CPUS >= 4
+ &max_freq_limit_2.attr,
+ &max_freq_limit_3.attr,
+#endif*/
+ &inc_cpu_load_at_min_freq.attr,
+ &inc_cpu_load.attr,
+ &dec_cpu_load.attr,
+ &freq_for_responsiveness.attr,
+ &freq_for_responsiveness_max.attr,
+ &freq_step_at_min_freq.attr,
+ &freq_step.attr,
+ &freq_up_brake_at_min_freq.attr,
+ &freq_up_brake.attr,
+ &freq_step_dec.attr,
+ &freq_step_dec_at_max_freq.attr,
+#ifdef CONFIG_CPU_EXYNOS4210
+ &up_sf_step.attr,
+ &down_sf_step.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group nightmare_attr_group = {
+ .attrs = nightmare_attributes,
+ .name = "nightmare",
+};
+
+/************************** sysfs end ************************/
+
+static void nightmare_check_cpu(struct cpufreq_nightmare_cpuinfo *this_nightmare_cpuinfo)
+{
+ struct cpufreq_policy *cpu_policy;
+ unsigned int min_freq;
+ unsigned int max_freq;
+#ifdef CONFIG_CPU_EXYNOS4210
+ int up_sf_step = atomic_read(&nightmare_tuners_ins.up_sf_step);
+ int down_sf_step = atomic_read(&nightmare_tuners_ins.down_sf_step);
+#endif
+ unsigned int freq_for_responsiveness;
+ unsigned int freq_for_responsiveness_max;
+ int dec_cpu_load;
+ int inc_cpu_load;
+ int freq_step;
+ int freq_up_brake;
+ int freq_step_dec;
+ cputime64_t cur_wall_time, cur_idle_time;
+ unsigned int wall_time, idle_time;
+ unsigned int index = 0;
+ unsigned int tmp_freq = 0;
+ unsigned int next_freq = 0;
+ int cur_load = -1;
+ unsigned int cpu;
+
+ cpu = this_nightmare_cpuinfo->cpu;
+ cpu_policy = this_nightmare_cpuinfo->cur_policy;
+
+ cur_idle_time = get_cpu_idle_time_us(cpu, NULL);
+ cur_idle_time += get_cpu_iowait_time_us(cpu, &cur_wall_time);
+
+ wall_time = (unsigned int)
+ (cur_wall_time - this_nightmare_cpuinfo->prev_cpu_wall);
+ this_nightmare_cpuinfo->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int)
+ (cur_idle_time - this_nightmare_cpuinfo->prev_cpu_idle);
+ this_nightmare_cpuinfo->prev_cpu_idle = cur_idle_time;
+
+ /*min_freq = atomic_read(&min_freq_limit[cpu]);
+ max_freq = atomic_read(&max_freq_limit[cpu]);*/
+
+ freq_for_responsiveness = atomic_read(&nightmare_tuners_ins.freq_for_responsiveness);
+ freq_for_responsiveness_max = atomic_read(&nightmare_tuners_ins.freq_for_responsiveness_max);
+ dec_cpu_load = atomic_read(&nightmare_tuners_ins.dec_cpu_load);
+ inc_cpu_load = atomic_read(&nightmare_tuners_ins.inc_cpu_load);
+ freq_step = atomic_read(&nightmare_tuners_ins.freq_step);
+ freq_up_brake = atomic_read(&nightmare_tuners_ins.freq_up_brake);
+ freq_step_dec = atomic_read(&nightmare_tuners_ins.freq_step_dec);
+
+ if (!cpu_policy)
+ return;
+
+ /*printk(KERN_ERR "TIMER CPU[%u], wall[%u], idle[%u]\n",cpu, wall_time, idle_time);*/
+ if (wall_time >= idle_time) { /*if wall_time < idle_time, evaluate cpu load next time*/
+ cur_load = wall_time > idle_time ? (100 * (wall_time - idle_time)) / wall_time : 1;/*if wall_time is equal to idle_time cpu_load is equal to 1*/
+ tmp_freq = cpu_policy->cur;
+ /* Checking Frequency Limit */
+ /*if (max_freq > cpu_policy->max)
+ max_freq = cpu_policy->max;
+ if (min_freq < cpu_policy->min)
+ min_freq = cpu_policy->min;*/
+ min_freq = cpu_policy->min;
+ max_freq = cpu_policy->max;
+ /* CPUs Online Scale Frequency*/
+ if (cpu_policy->cur < freq_for_responsiveness) {
+ inc_cpu_load = atomic_read(&nightmare_tuners_ins.inc_cpu_load_at_min_freq);
+ freq_step = atomic_read(&nightmare_tuners_ins.freq_step_at_min_freq);
+ freq_up_brake = atomic_read(&nightmare_tuners_ins.freq_up_brake_at_min_freq);
+ } else if (cpu_policy->cur > freq_for_responsiveness_max) {
+ freq_step_dec = atomic_read(&nightmare_tuners_ins.freq_step_dec_at_max_freq);
+ }
+ /* Check for frequency increase or for frequency decrease */
+#ifdef CONFIG_CPU_EXYNOS4210
+ if (cur_load >= inc_cpu_load && cpu_policy->cur < max_freq) {
+ tmp_freq = max(min((cpu_policy->cur + ((cur_load + freq_step - freq_up_brake == 0 ? 1 : cur_load + freq_step - freq_up_brake) * 2000)), max_freq), min_freq);
+ } else if (cur_load < dec_cpu_load && cpu_policy->cur > min_freq) {
+ tmp_freq = max(min((cpu_policy->cur - ((100 - cur_load + freq_step_dec == 0 ? 1 : 100 - cur_load + freq_step_dec) * 2000)), max_freq), min_freq);
+ }
+ next_freq = (tmp_freq / 100000) * 100000;
+ if ((next_freq > cpu_policy->cur
+ && (tmp_freq % 100000 > up_sf_step * 1000))
+ || (next_freq < cpu_policy->cur
+ && (tmp_freq % 100000 > down_sf_step * 1000))) {
+ next_freq += 100000;
+ }
+#else
+ if (cur_load >= inc_cpu_load && cpu_policy->cur < max_freq) {
+ tmp_freq = max(min((cpu_policy->cur + ((cur_load + freq_step - freq_up_brake == 0 ? 1 : cur_load + freq_step - freq_up_brake) * 3840)), max_freq), min_freq);
+ } else if (cur_load < dec_cpu_load && cpu_policy->cur > min_freq) {
+ tmp_freq = max(min((cpu_policy->cur - ((100 - cur_load + freq_step_dec == 0 ? 1 : 100 - cur_load + freq_step_dec) * 3840)), max_freq), min_freq);
+ }
+ cpufreq_frequency_table_target(cpu_policy, this_nightmare_cpuinfo->freq_table, tmp_freq,
+ CPUFREQ_RELATION_H, &index);
+ if (this_nightmare_cpuinfo->freq_table[index].frequency != cpu_policy->cur) {
+ cpufreq_frequency_table_target(cpu_policy, this_nightmare_cpuinfo->freq_table, tmp_freq,
+ CPUFREQ_RELATION_L, &index);
+ }
+ next_freq = this_nightmare_cpuinfo->freq_table[index].frequency;
+#endif
+ /*printk(KERN_ERR "FREQ CALC.: CPU[%u], load[%d], target freq[%u], cur freq[%u], min freq[%u], max_freq[%u]\n",cpu, cur_load, next_freq, cpu_policy->cur, cpu_policy->min, max_freq);*/
+ if (next_freq != cpu_policy->cur && cpu_online(cpu)) {
+ __cpufreq_driver_target(cpu_policy, next_freq, CPUFREQ_RELATION_L);
+ }
+ }
+
+}
+
+static void do_nightmare_timer(struct work_struct *work)
+{
+ struct cpufreq_nightmare_cpuinfo *nightmare_cpuinfo;
+ int delay;
+ unsigned int cpu;
+
+ nightmare_cpuinfo = container_of(work, struct cpufreq_nightmare_cpuinfo, work.work);
+ cpu = nightmare_cpuinfo->cpu;
+
+ mutex_lock(&nightmare_cpuinfo->timer_mutex);
+ nightmare_check_cpu(nightmare_cpuinfo);
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(atomic_read(&nightmare_tuners_ins.sampling_rate));
+ if (num_online_cpus() > 1) {
+ delay -= jiffies % delay;
+ }
+
+#ifdef CONFIG_CPU_EXYNOS4210
+ mod_delayed_work_on(cpu, system_wq, &nightmare_cpuinfo->work, delay);
+#else
+ queue_delayed_work_on(cpu, system_wq, &nightmare_cpuinfo->work, delay);
+#endif
+ mutex_unlock(&nightmare_cpuinfo->timer_mutex);
+}
+
+static int cpufreq_governor_nightmare(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu;
+ struct cpufreq_nightmare_cpuinfo *this_nightmare_cpuinfo;
+ int rc, delay;
+
+ cpu = policy->cpu;
+ this_nightmare_cpuinfo = &per_cpu(od_nightmare_cpuinfo, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ mutex_lock(&nightmare_mutex);
+
+ this_nightmare_cpuinfo->cur_policy = policy;
+
+ this_nightmare_cpuinfo->prev_cpu_idle = get_cpu_idle_time_us(cpu, NULL);
+ this_nightmare_cpuinfo->prev_cpu_idle += get_cpu_iowait_time_us(cpu, &this_nightmare_cpuinfo->prev_cpu_wall);
+
+ this_nightmare_cpuinfo->freq_table = cpufreq_frequency_get_table(cpu);
+ this_nightmare_cpuinfo->cpu = cpu;
+
+ mutex_init(&this_nightmare_cpuinfo->timer_mutex);
+
+ nightmare_enable++;
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (nightmare_enable == 1) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &nightmare_attr_group);
+ if (rc) {
+ mutex_unlock(&nightmare_mutex);
+ return rc;
+ }
+ }
+
+ /*if (atomic_read(&min_freq_limit[cpu]) == 0)
+ atomic_set(&min_freq_limit[cpu], policy->min);
+
+ if (atomic_read(&max_freq_limit[cpu]) == 0)
+ atomic_set(&max_freq_limit[cpu], policy->max);*/
+
+ mutex_unlock(&nightmare_mutex);
+
+ delay=usecs_to_jiffies(atomic_read(&nightmare_tuners_ins.sampling_rate));
+ if (num_online_cpus() > 1) {
+ delay -= jiffies % delay;
+ }
+
+ this_nightmare_cpuinfo->enable = 1;
+#ifdef CONFIG_CPU_EXYNOS4210
+ INIT_DEFERRABLE_WORK(&this_nightmare_cpuinfo->work, do_nightmare_timer);
+ mod_delayed_work_on(this_nightmare_cpuinfo->cpu, system_wq, &this_nightmare_cpuinfo->work, delay);
+#else
+ INIT_DELAYED_WORK_DEFERRABLE(&this_nightmare_cpuinfo->work, do_nightmare_timer);
+ queue_delayed_work_on(this_nightmare_cpuinfo->cpu, system_wq, &this_nightmare_cpuinfo->work, delay);
+#endif
+
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ this_nightmare_cpuinfo->enable = 0;
+ cancel_delayed_work_sync(&this_nightmare_cpuinfo->work);
+
+ mutex_lock(&nightmare_mutex);
+ nightmare_enable--;
+ mutex_destroy(&this_nightmare_cpuinfo->timer_mutex);
+
+ if (!nightmare_enable) {
+ sysfs_remove_group(cpufreq_global_kobject,
+ &nightmare_attr_group);
+ }
+ mutex_unlock(&nightmare_mutex);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&this_nightmare_cpuinfo->timer_mutex);
+ if (policy->max < this_nightmare_cpuinfo->cur_policy->cur)
+ __cpufreq_driver_target(this_nightmare_cpuinfo->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_nightmare_cpuinfo->cur_policy->cur)
+ __cpufreq_driver_target(this_nightmare_cpuinfo->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&this_nightmare_cpuinfo->timer_mutex);
+
+ break;
+ }
+ return 0;
+}
+
+static int __init cpufreq_gov_nightmare_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_nightmare);
+}
+
+static void __exit cpufreq_gov_nightmare_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_nightmare);
+}
+
+MODULE_AUTHOR("Alucard24@XDA");
+MODULE_DESCRIPTION("'cpufreq_nightmare' - A dynamic cpufreq/cpuhotplug governor v4.1 (SnapDragon)");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE
+fs_initcall(cpufreq_gov_nightmare_init);
+#else
+module_init(cpufreq_gov_nightmare_init);
+#endif
+module_exit(cpufreq_gov_nightmare_exit);
+
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 133ce32..82bf4fa 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -415,6 +415,9 @@ extern struct cpufreq_governor cpufreq_gov_intellimm;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_YANKACTIVE)
extern struct cpufreq_governor cpufreq_gov_yankactive;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_yankactive)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE)
+extern struct cpufreq_governor cpufreq_gov_nightmare;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_nightmare)
#endif

View file

@ -0,0 +1,472 @@
From fa9ccd0ade182f79bafff583bb8c4d8b889b99aa Mon Sep 17 00:00:00 2001
From: Gokhan Moral <gm@alumni.bilkent.edu.tr>
Date: Mon, 21 Jan 2013 18:54:43 -0600
Subject: [PATCH] block: scheduler: add SIO
---
block/Kconfig.iosched | 14 ++
block/Makefile | 1 +
block/sio-iosched.c | 403 ++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 418 insertions(+)
create mode 100644 block/sio-iosched.c
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 6be069a..5891165 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -73,6 +73,16 @@ config IOSCHED_FIOPS
IOPS equally among all processes in the system. It's mainly for
Flash based storage.
+config IOSCHED_SIO
+ tristate "Simple I/O scheduler"
+ default y
+ ---help---
+ The Simple I/O scheduler is an extremely simple scheduler,
+ based on noop and deadline, that relies on deadlines to
+ ensure fairness. The algorithm does not do any sorting but
+ basic merging, trying to keep a minimum overhead. It is aimed
+ mainly for aleatory access devices (eg: flash devices).
+
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
@@ -99,6 +109,9 @@ choice
config DEFAULT_FIOPS
bool "FIOPS" if IOSCHED_FIOPS=y
+ config DEFAULT_SIO
+ bool "SIO" if IOSCHED_SIO=y
+
config DEFAULT_NOOP
bool "No-op"
@@ -110,6 +123,7 @@ config DEFAULT_IOSCHED
default "row" if DEFAULT_ROW
default "cfq" if DEFAULT_CFQ
default "fiops" if DEFAULT_FIOPS
+ default "sio" if DEFAULT_SIO
default "noop" if DEFAULT_NOOP
endmenu
diff --git a/block/Makefile b/block/Makefile
index afc813a..b962c76 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_IOSCHED_ROW) += row-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o
+obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/sio-iosched.c b/block/sio-iosched.c
new file mode 100644
index 0000000..3661a9a
--- /dev/null
+++ b/block/sio-iosched.c
@@ -0,0 +1,403 @@
+/*
+ * Simple IO scheduler
+ * Based on Noop, Deadline and V(R) IO schedulers.
+ *
+ * Copyright (C) 2012 Miguel Boton <mboton@gmail.com>
+ *
+ *
+ * This algorithm does not do any kind of sorting, as it is aimed for
+ * aleatory access devices, but it does some basic merging. We try to
+ * keep minimum overhead to achieve low latency.
+ *
+ * Asynchronous and synchronous requests are not treated separately, but
+ * we relay on deadlines to ensure fairness.
+ *
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/version.h>
+
+enum { ASYNC, SYNC };
+
+/* Tunables */
+static const int sync_read_expire = HZ / 2; /* max time before a sync read is submitted. */
+static const int sync_write_expire = 2 * HZ; /* max time before a sync write is submitted. */
+
+static const int async_read_expire = 4 * HZ; /* ditto for async, these limits are SOFT! */
+static const int async_write_expire = 16 * HZ; /* ditto for async, these limits are SOFT! */
+
+static const int writes_starved = 2; /* max times reads can starve a write */
+static const int fifo_batch = 8; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+/* Elevator data */
+struct sio_data {
+ /* Request queues */
+ struct list_head fifo_list[2][2];
+
+ /* Attributes */
+ unsigned int batched;
+ unsigned int starved;
+
+ /* Settings */
+ int fifo_expire[2][2];
+ int fifo_batch;
+ int writes_starved;
+};
+
+static void
+sio_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * If next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo.
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ /* Delete next request */
+ rq_fifo_clear(next);
+}
+
+static void
+sio_add_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ /*
+ * Add request to the proper fifo list and set its
+ * expire time.
+ */
+ rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync][data_dir]);
+ list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]);
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
+static int
+sio_queue_empty(struct request_queue *q)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+
+ /* Check if fifo lists are empty */
+ return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) &&
+ list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]);
+}
+#endif
+
+static struct request *
+sio_expired_request(struct sio_data *sd, int sync, int data_dir)
+{
+ struct list_head *list = &sd->fifo_list[sync][data_dir];
+ struct request *rq;
+
+ if (list_empty(list))
+ return NULL;
+
+ /* Retrieve request */
+ rq = rq_entry_fifo(list->next);
+
+ /* Request has expired */
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+static struct request *
+sio_choose_expired_request(struct sio_data *sd)
+{
+ struct request *rq;
+
+ /*
+ * Check expired requests.
+ * Asynchronous requests have priority over synchronous.
+ * Write requests have priority over read.
+ */
+ rq = sio_expired_request(sd, ASYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = sio_expired_request(sd, ASYNC, READ);
+ if (rq)
+ return rq;
+
+ rq = sio_expired_request(sd, SYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = sio_expired_request(sd, SYNC, READ);
+ if (rq)
+ return rq;
+
+ return NULL;
+}
+
+static struct request *
+sio_choose_request(struct sio_data *sd, int data_dir)
+{
+ struct list_head *sync = sd->fifo_list[SYNC];
+ struct list_head *async = sd->fifo_list[ASYNC];
+
+ /*
+ * Retrieve request from available fifo list.
+ * Synchronous requests have priority over asynchronous.
+ * Read requests have priority over write.
+ */
+ if (!list_empty(&sync[data_dir]))
+ return rq_entry_fifo(sync[data_dir].next);
+ if (!list_empty(&async[data_dir]))
+ return rq_entry_fifo(async[data_dir].next);
+
+ if (!list_empty(&sync[!data_dir]))
+ return rq_entry_fifo(sync[!data_dir].next);
+ if (!list_empty(&async[!data_dir]))
+ return rq_entry_fifo(async[!data_dir].next);
+
+ return NULL;
+}
+
+static inline void
+sio_dispatch_request(struct sio_data *sd, struct request *rq)
+{
+ /*
+ * Remove the request from the fifo list
+ * and dispatch it.
+ */
+ rq_fifo_clear(rq);
+ elv_dispatch_add_tail(rq->q, rq);
+
+ sd->batched++;
+
+ if (rq_data_dir(rq))
+ sd->starved = 0;
+ else
+ sd->starved++;
+}
+
+static int
+sio_dispatch_requests(struct request_queue *q, int force)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ struct request *rq = NULL;
+ int data_dir = READ;
+
+ /*
+ * Retrieve any expired request after a batch of
+ * sequential requests.
+ */
+ if (sd->batched > sd->fifo_batch) {
+ sd->batched = 0;
+ rq = sio_choose_expired_request(sd);
+ }
+
+ /* Retrieve request */
+ if (!rq) {
+ if (sd->starved > sd->writes_starved)
+ data_dir = WRITE;
+
+ rq = sio_choose_request(sd, data_dir);
+ if (!rq)
+ return 0;
+ }
+
+ /* Dispatch request */
+ sio_dispatch_request(sd, rq);
+
+ return 1;
+}
+
+static struct request *
+sio_former_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir])
+ return NULL;
+
+ /* Return former request */
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+sio_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.next == &sd->fifo_list[sync][data_dir])
+ return NULL;
+
+ /* Return latter request */
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *
+sio_init_queue(struct request_queue *q)
+{
+ struct sio_data *sd;
+
+ /* Allocate structure */
+ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
+ if (!sd)
+ return NULL;
+
+ /* Initialize fifo lists */
+ INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]);
+ INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]);
+ INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]);
+ INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]);
+
+ /* Initialize data */
+ sd->batched = 0;
+ sd->fifo_expire[SYNC][READ] = sync_read_expire;
+ sd->fifo_expire[SYNC][WRITE] = sync_write_expire;
+ sd->fifo_expire[ASYNC][READ] = async_read_expire;
+ sd->fifo_expire[ASYNC][WRITE] = async_write_expire;
+ sd->fifo_batch = fifo_batch;
+
+ return sd;
+}
+
+static void
+sio_exit_queue(struct elevator_queue *e)
+{
+ struct sio_data *sd = e->elevator_data;
+
+ BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ]));
+ BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE]));
+ BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ]));
+ BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE]));
+
+ /* Free structure */
+ kfree(sd);
+}
+
+/*
+ * sysfs code
+ */
+
+static ssize_t
+sio_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+sio_var_store(int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtol(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return sio_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1);
+SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1);
+SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1);
+SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1);
+SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0);
+SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data; \
+ int ret = sio_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0);
+STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \
+ sio_##name##_store)
+
+static struct elv_fs_entry sio_attrs[] = {
+ DD_ATTR(sync_read_expire),
+ DD_ATTR(sync_write_expire),
+ DD_ATTR(async_read_expire),
+ DD_ATTR(async_write_expire),
+ DD_ATTR(fifo_batch),
+ DD_ATTR(writes_starved),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_sio = {
+ .ops = {
+ .elevator_merge_req_fn = sio_merged_requests,
+ .elevator_dispatch_fn = sio_dispatch_requests,
+ .elevator_add_req_fn = sio_add_request,
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
+ .elevator_queue_empty_fn = sio_queue_empty,
+#endif
+ .elevator_former_req_fn = sio_former_request,
+ .elevator_latter_req_fn = sio_latter_request,
+ .elevator_init_fn = sio_init_queue,
+ .elevator_exit_fn = sio_exit_queue,
+ },
+
+ .elevator_attrs = sio_attrs,
+ .elevator_name = "sio",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init sio_init(void)
+{
+ /* Register elevator */
+ elv_register(&iosched_sio);
+
+ return 0;
+}
+
+static void __exit sio_exit(void)
+{
+ /* Unregister elevator */
+ elv_unregister(&iosched_sio);
+}
+
+#ifdef CONFIG_FAST_RESUME
+beforeresume_initcall(sio_init);
+#else
+module_init(sio_init);
+#endif
+module_exit(sio_exit);
+
+MODULE_AUTHOR("Miguel Boton");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple IO scheduler");
+MODULE_VERSION("0.2");

View file

@ -0,0 +1,356 @@
From 56582d323a6b57c908d29f9d8f5687a05edc20b6 Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Tue, 10 Feb 2015 06:20:16 +0100
Subject: [PATCH] block: scheduler: add ZEN
---
arch/arm/configs/ak_bacon_defconfig | 1 +
block/Kconfig.iosched | 11 ++
block/Makefile | 1 +
block/zen-iosched.c | 277 ++++++++++++++++++++++++++++++++++++
4 files changed, 290 insertions(+)
create mode 100644 block/zen-iosched.c
diff --git a/arch/arm/configs/ak_bacon_defconfig b/arch/arm/configs/ak_bacon_defconfig
index 6579e5d..550fde7 100644
--- a/arch/arm/configs/ak_bacon_defconfig
+++ b/arch/arm/configs/ak_bacon_defconfig
@@ -243,6 +243,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_IOSCHED_FIOPS=y
CONFIG_IOSCHED_SIO=y
CONFIG_IOSCHED_BFQ=y
+CONFIG_IOSCHED_ZEN=y
CONFIG_CGROUP_BFQIO=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_DEFAULT_IOSCHED="deadline"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 5b0a02b..371cb50 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -105,6 +105,13 @@ config IOSCHED_SIO
basic merging, trying to keep a minimum overhead. It is aimed
mainly for aleatory access devices (eg: flash devices).
+config IOSCHED_ZEN
+ tristate "Zen I/O scheduler"
+ default y
+ ---help---
+ FCFS, dispatches are back-inserted, deadlines ensure fairness.
+ Should work best with devices where there is no travel delay.
+
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
@@ -140,6 +147,9 @@ choice
config DEFAULT_NOOP
bool "No-op"
+ config DEFAULT_ZEN
+ bool "ZEN" if IOSCHED_ZEN=y
+
endchoice
config DEFAULT_IOSCHED
@@ -151,6 +161,7 @@ config DEFAULT_IOSCHED
default "sio" if DEFAULT_SIO
default "noop" if DEFAULT_NOOP
default "bfq" if DEFAULT_BFQ
+ default "zen" if DEFAULT_ZEN
endmenu
diff --git a/block/Makefile b/block/Makefile
index 1ab9572..a9832ce 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o
obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
+obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/zen-iosched.c b/block/zen-iosched.c
new file mode 100644
index 0000000..77145de
--- /dev/null
+++ b/block/zen-iosched.c
@@ -0,0 +1,277 @@
+/*
+ * Zen IO scheduler
+ * Primarily based on Noop, deadline, and SIO IO schedulers.
+ *
+ * Copyright (C) 2012 Brandon Berhent <bbedward@gmail.com>
+ *
+ * FCFS, dispatches are back-inserted, deadlines ensure fairness.
+ * Should work best with devices where there is no travel delay.
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+enum zen_data_dir { ASYNC, SYNC };
+
+static const int sync_expire = HZ / 4; /* max time before a sync is submitted. */
+static const int async_expire = 2 * HZ; /* ditto for async, these limits are SOFT! */
+static const int fifo_batch = 1;
+
+struct zen_data {
+ /* Runtime Data */
+ /* Requests are only present on fifo_list */
+ struct list_head fifo_list[2];
+
+ unsigned int batching; /* number of sequential requests made */
+
+ /* tunables */
+ int fifo_expire[2];
+ int fifo_batch;
+};
+
+static inline struct zen_data *
+zen_get_data(struct request_queue *q) {
+ return q->elevator->elevator_data;
+}
+
+static void zen_dispatch(struct zen_data *, struct request *);
+
+static void
+zen_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * if next expires before rq, assign its expire time to arq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ /* next request is gone */
+ rq_fifo_clear(next);
+}
+
+static void zen_add_request(struct request_queue *q, struct request *rq)
+{
+ struct zen_data *zdata = zen_get_data(q);
+ const int dir = rq_data_dir(rq);
+
+ if (zdata->fifo_expire[dir]) {
+ rq_set_fifo_time(rq, jiffies + zdata->fifo_expire[dir]);
+ list_add_tail(&rq->queuelist, &zdata->fifo_list[dir]);
+ }
+}
+
+static void zen_dispatch(struct zen_data *zdata, struct request *rq)
+{
+ /* Remove request from list and dispatch it */
+ rq_fifo_clear(rq);
+ elv_dispatch_add_tail(rq->q, rq);
+
+ /* Increment # of sequential requests */
+ zdata->batching++;
+}
+
+/*
+ * get the first expired request in direction ddir
+ */
+static struct request *
+zen_expired_request(struct zen_data *zdata, int ddir)
+{
+ struct request *rq;
+
+ if (list_empty(&zdata->fifo_list[ddir]))
+ return NULL;
+
+ rq = rq_entry_fifo(zdata->fifo_list[ddir].next);
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+/*
+ * zen_check_fifo returns 0 if there are no expired requests on the fifo,
+ * otherwise it returns the next expired request
+ */
+static struct request *
+zen_check_fifo(struct zen_data *zdata)
+{
+ struct request *rq_sync = zen_expired_request(zdata, SYNC);
+ struct request *rq_async = zen_expired_request(zdata, ASYNC);
+
+ if (rq_async && rq_sync) {
+ if (time_after(rq_fifo_time(rq_async), rq_fifo_time(rq_sync)))
+ return rq_sync;
+ } else if (rq_sync) {
+ return rq_sync;
+ } else if (rq_async) {
+ return rq_async;
+ }
+
+ return 0;
+}
+
+static struct request *
+zen_choose_request(struct zen_data *zdata)
+{
+ /*
+ * Retrieve request from available fifo list.
+ * Synchronous requests have priority over asynchronous.
+ */
+ if (!list_empty(&zdata->fifo_list[SYNC]))
+ return rq_entry_fifo(zdata->fifo_list[SYNC].next);
+ if (!list_empty(&zdata->fifo_list[ASYNC]))
+ return rq_entry_fifo(zdata->fifo_list[ASYNC].next);
+
+ return NULL;
+}
+
+static int zen_dispatch_requests(struct request_queue *q, int force)
+{
+ struct zen_data *zdata = zen_get_data(q);
+ struct request *rq = NULL;
+
+ /* Check for and issue expired requests */
+ if (zdata->batching > zdata->fifo_batch) {
+ zdata->batching = 0;
+ rq = zen_check_fifo(zdata);
+ }
+
+ if (!rq) {
+ rq = zen_choose_request(zdata);
+ if (!rq)
+ return 0;
+ }
+
+ zen_dispatch(zdata, rq);
+
+ return 1;
+}
+
+static void *zen_init_queue(struct request_queue *q)
+{
+ struct zen_data *zdata;
+
+ zdata = kmalloc_node(sizeof(*zdata), GFP_KERNEL, q->node);
+ if (!zdata)
+ return NULL;
+ INIT_LIST_HEAD(&zdata->fifo_list[SYNC]);
+ INIT_LIST_HEAD(&zdata->fifo_list[ASYNC]);
+ zdata->fifo_expire[SYNC] = sync_expire;
+ zdata->fifo_expire[ASYNC] = async_expire;
+ zdata->fifo_batch = fifo_batch;
+ return zdata;
+}
+
+static void zen_exit_queue(struct elevator_queue *e)
+{
+ struct zen_data *zdata = e->elevator_data;
+
+ BUG_ON(!list_empty(&zdata->fifo_list[SYNC]));
+ BUG_ON(!list_empty(&zdata->fifo_list[ASYNC]));
+ kfree(zdata);
+}
+
+/* Sysfs */
+static ssize_t
+zen_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+zen_var_store(int *var, const char *page, size_t count)
+{
+ *var = simple_strtol(page, NULL, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct zen_data *zdata = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return zen_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(zen_sync_expire_show, zdata->fifo_expire[SYNC], 1);
+SHOW_FUNCTION(zen_async_expire_show, zdata->fifo_expire[ASYNC], 1);
+SHOW_FUNCTION(zen_fifo_batch_show, zdata->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct zen_data *zdata = e->elevator_data; \
+ int __data; \
+ int ret = zen_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(zen_sync_expire_store, &zdata->fifo_expire[SYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(zen_async_expire_store, &zdata->fifo_expire[ASYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(zen_fifo_batch_store, &zdata->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, zen_##name##_show, \
+ zen_##name##_store)
+
+static struct elv_fs_entry zen_attrs[] = {
+ DD_ATTR(sync_expire),
+ DD_ATTR(async_expire),
+ DD_ATTR(fifo_batch),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_zen = {
+ .ops = {
+ .elevator_merge_req_fn = zen_merged_requests,
+ .elevator_dispatch_fn = zen_dispatch_requests,
+ .elevator_add_req_fn = zen_add_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_init_fn = zen_init_queue,
+ .elevator_exit_fn = zen_exit_queue,
+ },
+ .elevator_attrs = zen_attrs,
+ .elevator_name = "zen",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init zen_init(void)
+{
+ elv_register(&iosched_zen);
+
+ return 0;
+}
+
+static void __exit zen_exit(void)
+{
+ elv_unregister(&iosched_zen);
+}
+
+module_init(zen_init);
+module_exit(zen_exit);
+
+
+MODULE_AUTHOR("Brandon Berhent");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Zen IO scheduler");
+MODULE_VERSION("1.0");

View file

@ -0,0 +1,98 @@
From 297aa104900af483185e01fa8a47b2e1a6babe1c Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Wed, 15 Apr 2015 15:24:38 +0200
Subject: [PATCH] block: scheduler: add ZEN v2
---
block/zen-iosched.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)
diff --git a/block/zen-iosched.c b/block/zen-iosched.c
index 77145de..a3fc2c0 100644
--- a/block/zen-iosched.c
+++ b/block/zen-iosched.c
@@ -16,8 +16,8 @@
enum zen_data_dir { ASYNC, SYNC };
-static const int sync_expire = HZ / 4; /* max time before a sync is submitted. */
-static const int async_expire = 2 * HZ; /* ditto for async, these limits are SOFT! */
+static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */
+static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */
static const int fifo_batch = 1;
struct zen_data {
@@ -25,7 +25,7 @@ struct zen_data {
/* Requests are only present on fifo_list */
struct list_head fifo_list[2];
- unsigned int batching; /* number of sequential requests made */
+ unsigned int batching; /* number of sequential requests made */
/* tunables */
int fifo_expire[2];
@@ -40,17 +40,17 @@ zen_get_data(struct request_queue *q) {
static void zen_dispatch(struct zen_data *, struct request *);
static void
-zen_merged_requests(struct request_queue *q, struct request *rq,
+zen_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
* if next expires before rq, assign its expire time to arq
* and move into next position (next will be deleted) in fifo
*/
- if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
- list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
}
}
@@ -61,11 +61,11 @@ zen_merged_requests(struct request_queue *q, struct request *rq,
static void zen_add_request(struct request_queue *q, struct request *rq)
{
struct zen_data *zdata = zen_get_data(q);
- const int dir = rq_data_dir(rq);
+ const int sync = rq_is_sync(rq);
- if (zdata->fifo_expire[dir]) {
- rq_set_fifo_time(rq, jiffies + zdata->fifo_expire[dir]);
- list_add_tail(&rq->queuelist, &zdata->fifo_list[dir]);
+ if (zdata->fifo_expire[sync]) {
+ rq_set_fifo_time(rq, jiffies + zdata->fifo_expire[sync]);
+ list_add_tail(&rq->queuelist, &zdata->fifo_list[sync]);
}
}
@@ -91,7 +91,7 @@ zen_expired_request(struct zen_data *zdata, int ddir)
return NULL;
rq = rq_entry_fifo(zdata->fifo_list[ddir].next);
- if (time_after(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq_fifo_time(rq)))
return rq;
return NULL;
@@ -257,9 +257,7 @@ static struct elevator_type iosched_zen = {
static int __init zen_init(void)
{
- elv_register(&iosched_zen);
-
- return 0;
+ return elv_register(&iosched_zen);
}
static void __exit zen_exit(void)
@@ -274,4 +272,4 @@ module_exit(zen_exit);
MODULE_AUTHOR("Brandon Berhent");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Zen IO scheduler");
-MODULE_VERSION("1.0");
+MODULE_VERSION("1.1");

View file

@ -0,0 +1,520 @@
From da18817f3c6a0eb67ab1572684d16344a8e44197 Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Tue, 10 Feb 2015 06:25:15 +0100
Subject: [PATCH] block: scheduler: add VR
---
arch/arm/configs/ak_bacon_defconfig | 1 +
block/Kconfig.iosched | 17 +-
block/Makefile | 1 +
block/vr-iosched.c | 432 ++++++++++++++++++++++++++++++++++++
4 files changed, 448 insertions(+), 3 deletions(-)
create mode 100644 block/vr-iosched.c
diff --git a/arch/arm/configs/ak_bacon_defconfig b/arch/arm/configs/ak_bacon_defconfig
index 550fde7..ca2e7a3 100644
--- a/arch/arm/configs/ak_bacon_defconfig
+++ b/arch/arm/configs/ak_bacon_defconfig
@@ -244,6 +244,7 @@ CONFIG_IOSCHED_FIOPS=y
CONFIG_IOSCHED_SIO=y
CONFIG_IOSCHED_BFQ=y
CONFIG_IOSCHED_ZEN=y
+CONFIG_IOSCHED_VR=y
CONFIG_CGROUP_BFQIO=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_DEFAULT_IOSCHED="deadline"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 371cb50..24695a3 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -106,12 +106,19 @@ config IOSCHED_SIO
mainly for aleatory access devices (eg: flash devices).
config IOSCHED_ZEN
- tristate "Zen I/O scheduler"
- default y
- ---help---
+ tristate "Zen I/O scheduler"
+ default y
+ ---help---
FCFS, dispatches are back-inserted, deadlines ensure fairness.
Should work best with devices where there is no travel delay.
+config IOSCHED_VR
+ tristate "V(R) I/O scheduler"
+ default y
+ ---help---
+ Requests are chosen according to SSTF with a penalty of rev_penalty
+ for switching head direction.
+
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
@@ -150,6 +157,9 @@ choice
config DEFAULT_ZEN
bool "ZEN" if IOSCHED_ZEN=y
+ config DEFAULT_VR
+ bool "VR" if IOSCHED_VR=y
+
endchoice
config DEFAULT_IOSCHED
@@ -162,6 +172,7 @@ config DEFAULT_IOSCHED
default "noop" if DEFAULT_NOOP
default "bfq" if DEFAULT_BFQ
default "zen" if DEFAULT_ZEN
+ default "vr" if DEFAULT_VR
endmenu
diff --git a/block/Makefile b/block/Makefile
index a9832ce..1c32716 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o
obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o
+obj-$(CONFIG_IOSCHED_VR) += vr-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/vr-iosched.c b/block/vr-iosched.c
new file mode 100644
index 0000000..5d8954b
--- /dev/null
+++ b/block/vr-iosched.c
@@ -0,0 +1,432 @@
+/*
+* V(R) I/O Scheduler
+*
+* Copyright (C) 2007 Aaron Carroll <aaronc@gelato.unsw.edu.au>
+*
+*
+* The algorithm:
+*
+* The next request is decided based on its distance from the last
+* request, with a multiplicative penalty of `rev_penalty' applied
+* for reversing the head direction. A rev_penalty of 1 means SSTF
+* behaviour. As this variable is increased, the algorithm approaches
+* pure SCAN. Setting rev_penalty to 0 forces SCAN.
+*
+* Async and synch requests are not treated seperately. Instead we
+* rely on deadlines to ensure fairness.
+*
+*/
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+#include <asm/div64.h>
+
+enum vr_data_dir {
+ ASYNC,
+ SYNC,
+};
+
+enum vr_head_dir {
+ FORWARD,
+ BACKWARD,
+};
+
+static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */
+static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */
+static const int fifo_batch = 1;
+static const int rev_penalty = 10; /* penalty for reversing head direction */
+
+struct vr_data {
+struct rb_root sort_list;
+struct list_head fifo_list[2];
+
+struct request *next_rq;
+struct request *prev_rq;
+
+unsigned int nbatched;
+sector_t last_sector; /* head position */
+int head_dir;
+
+/* tunables */
+int fifo_expire[2];
+int fifo_batch;
+int rev_penalty;
+};
+
+static void vr_move_request(struct vr_data *, struct request *);
+
+static inline struct vr_data *
+vr_get_data(struct request_queue *q)
+{
+ return q->elevator->elevator_data;
+}
+
+static void
+vr_add_rq_rb(struct vr_data *vd, struct request *rq)
+{
+ elv_rb_add(&vd->sort_list, rq);
+
+ if (blk_rq_pos(rq) >= vd->last_sector) {
+ if (!vd->next_rq || blk_rq_pos(vd->next_rq) > blk_rq_pos(rq))
+ vd->next_rq = rq;
+ } else {
+ if (!vd->prev_rq || blk_rq_pos(vd->prev_rq) < blk_rq_pos(rq))
+ vd->prev_rq = rq;
+ }
+
+ BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq);
+ BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq));
+}
+
+static void
+vr_del_rq_rb(struct vr_data *vd, struct request *rq)
+{
+ /*
+ * We might be deleting our cached next request.
+ * If so, find its sucessor.
+ */
+
+ if (vd->next_rq == rq)
+ vd->next_rq = elv_rb_latter_request(NULL, rq);
+ else if (vd->prev_rq == rq)
+ vd->prev_rq = elv_rb_former_request(NULL, rq);
+
+ BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq);
+ BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq));
+
+ elv_rb_del(&vd->sort_list, rq);
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void
+vr_add_request(struct request_queue *q, struct request *rq)
+{
+ struct vr_data *vd = vr_get_data(q);
+ const int dir = rq_is_sync(rq);
+
+ vr_add_rq_rb(vd, rq);
+
+ if (vd->fifo_expire[dir]) {
+ rq_set_fifo_time(rq, jiffies + vd->fifo_expire[dir]);
+ list_add_tail(&rq->queuelist, &vd->fifo_list[dir]);
+ }
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void
+vr_remove_request(struct request_queue *q, struct request *rq)
+{
+ struct vr_data *vd = vr_get_data(q);
+
+ rq_fifo_clear(rq);
+ vr_del_rq_rb(vd, rq);
+}
+
+static int
+vr_merge(struct request_queue *q, struct request **rqp, struct bio *bio)
+{
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+ struct vr_data *vd = vr_get_data(q);
+ struct request *rq = elv_rb_find(&vd->sort_list, sector);
+
+ if (rq && elv_rq_merge_ok(rq, bio)) {
+ *rqp = rq;
+ return ELEVATOR_FRONT_MERGE;
+ }
+ return ELEVATOR_NO_MERGE;
+}
+
+static void
+vr_merged_request(struct request_queue *q, struct request *req, int type)
+{
+ struct vr_data *vd = vr_get_data(q);
+
+ /*
+ * if the merge was a front merge, we need to reposition request
+ */
+ if (type == ELEVATOR_FRONT_MERGE) {
+ vr_del_rq_rb(vd, req);
+ vr_add_rq_rb(vd, req);
+ }
+}
+
+static void
+vr_merged_requests(struct request_queue *q, struct request *rq,
+struct request *next)
+{
+ /*
+ * if next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ vr_remove_request(q, next);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+vr_move_request(struct vr_data *vd, struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (blk_rq_pos(rq) > vd->last_sector)
+ vd->head_dir = FORWARD;
+ else
+ vd->head_dir = BACKWARD;
+
+ vd->last_sector = blk_rq_pos(rq);
+ vd->next_rq = elv_rb_latter_request(NULL, rq);
+ vd->prev_rq = elv_rb_former_request(NULL, rq);
+
+ BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq);
+
+ vr_remove_request(q, rq);
+ elv_dispatch_add_tail(q, rq);
+ vd->nbatched++;
+}
+
+/*
+ * get the first expired request in direction ddir
+ */
+static struct request *
+vr_expired_request(struct vr_data *vd, int ddir)
+{
+ struct request *rq;
+
+ if (list_empty(&vd->fifo_list[ddir]))
+ return NULL;
+
+ rq = rq_entry_fifo(vd->fifo_list[ddir].next);
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+/*
+ * Returns the oldest expired request
+ */
+static struct request *
+vr_check_fifo(struct vr_data *vd)
+{
+ struct request *rq_sync = vr_expired_request(vd, SYNC);
+ struct request *rq_async = vr_expired_request(vd, ASYNC);
+
+ if (rq_async && rq_sync) {
+ if (time_after(rq_fifo_time(rq_async), rq_fifo_time(rq_sync)))
+ return rq_sync;
+ }
+ else if (rq_sync)
+ return rq_sync;
+
+ return rq_async;
+}
+
+/*
+* Return the request with the lowest penalty
+*/
+static struct request *
+vr_choose_request(struct vr_data *vd)
+{
+ int penalty = (vd->rev_penalty) ? : INT_MAX;
+ struct request *next = vd->next_rq;
+ struct request *prev = vd->prev_rq;
+ sector_t next_pen, prev_pen;
+
+ BUG_ON(prev && prev == next);
+
+ if (!prev)
+ return next;
+ else if (!next)
+ return prev;
+
+/* At this point both prev and next are defined and distinct */
+
+ next_pen = blk_rq_pos(next) - vd->last_sector;
+ prev_pen = vd->last_sector - blk_rq_pos(prev);
+
+ if (vd->head_dir == FORWARD)
+ next_pen = do_div(next_pen, penalty);
+ else
+ prev_pen = do_div(prev_pen, penalty);
+
+ if (next_pen <= prev_pen)
+ return next;
+
+ return prev;
+}
+
+static int
+vr_dispatch_requests(struct request_queue *q, int force)
+{
+ struct vr_data *vd = vr_get_data(q);
+ struct request *rq = NULL;
+
+/* Check for and issue expired requests */
+ if (vd->nbatched > vd->fifo_batch) {
+ vd->nbatched = 0;
+ rq = vr_check_fifo(vd);
+ }
+
+ if (!rq) {
+ rq = vr_choose_request(vd);
+ if (!rq)
+ return 0;
+ }
+
+ vr_move_request(vd, rq);
+
+ return 1;
+}
+
+
+static void
+vr_exit_queue(struct elevator_queue *e)
+{
+ struct vr_data *vd = e->elevator_data;
+ BUG_ON(!RB_EMPTY_ROOT(&vd->sort_list));
+ kfree(vd);
+}
+
+/*
+* initialize elevator private data (vr_data).
+*/
+static void *vr_init_queue(struct request_queue *q)
+{
+ struct vr_data *vd;
+
+ vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!vd)
+ return NULL;
+
+ INIT_LIST_HEAD(&vd->fifo_list[SYNC]);
+ INIT_LIST_HEAD(&vd->fifo_list[ASYNC]);
+ vd->sort_list = RB_ROOT;
+ vd->fifo_expire[SYNC] = sync_expire;
+ vd->fifo_expire[ASYNC] = async_expire;
+ vd->fifo_batch = fifo_batch;
+ vd->rev_penalty = rev_penalty;
+ return vd;
+}
+
+/*
+ * sysfs parts below
+ */
+
+static ssize_t
+vr_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+vr_var_store(int *var, const char *page, size_t count)
+{
+ *var = simple_strtol(page, NULL, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+struct vr_data *vd = e->elevator_data; \
+int __data = __VAR; \
+if (__CONV) \
+__data = jiffies_to_msecs(__data); \
+return vr_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(vr_sync_expire_show, vd->fifo_expire[SYNC], 1);
+SHOW_FUNCTION(vr_async_expire_show, vd->fifo_expire[ASYNC], 1);
+SHOW_FUNCTION(vr_fifo_batch_show, vd->fifo_batch, 0);
+SHOW_FUNCTION(vr_rev_penalty_show, vd->rev_penalty, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+struct vr_data *vd = e->elevator_data; \
+int __data; \
+int ret = vr_var_store(&__data, (page), count); \
+if (__data < (MIN)) \
+__data = (MIN); \
+else if (__data > (MAX)) \
+__data = (MAX); \
+if (__CONV) \
+*(__PTR) = msecs_to_jiffies(__data); \
+else \
+*(__PTR) = __data; \
+return ret; \
+}
+STORE_FUNCTION(vr_sync_expire_store, &vd->fifo_expire[SYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(vr_async_expire_store, &vd->fifo_expire[ASYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(vr_fifo_batch_store, &vd->fifo_batch, 0, INT_MAX, 0);
+STORE_FUNCTION(vr_rev_penalty_store, &vd->rev_penalty, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+__ATTR(name, S_IRUGO|S_IWUSR, vr_##name##_show, \
+vr_##name##_store)
+
+static struct elv_fs_entry vr_attrs[] = {
+ DD_ATTR(sync_expire),
+ DD_ATTR(async_expire),
+ DD_ATTR(fifo_batch),
+ DD_ATTR(rev_penalty),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_vr = {
+ .ops = {
+ .elevator_merge_fn = vr_merge,
+ .elevator_merged_fn = vr_merged_request,
+ .elevator_merge_req_fn = vr_merged_requests,
+ .elevator_dispatch_fn = vr_dispatch_requests,
+ .elevator_add_req_fn = vr_add_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_init_fn = vr_init_queue,
+ .elevator_exit_fn = vr_exit_queue,
+ },
+
+ .elevator_attrs = vr_attrs,
+ .elevator_name = "vr",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init vr_init(void)
+{
+ elv_register(&iosched_vr);
+
+ return 0;
+}
+
+static void __exit vr_exit(void)
+{
+ elv_unregister(&iosched_vr);
+}
+
+module_init(vr_init);
+module_exit(vr_exit);
+
+MODULE_AUTHOR("Aaron Carroll");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V(R) IO scheduler");

View file

@ -0,0 +1,508 @@
From 349edfd0842c4afd8e1edb980b194d9b9b69a9df Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Tue, 10 Feb 2015 08:52:40 +0100
Subject: [PATCH] block: scheduler: add SIOPLUS
---
arch/arm/configs/ak_bacon_defconfig | 1 +
block/Kconfig.iosched | 14 ++
block/Makefile | 1 +
block/sioplus-iosched.c | 405 ++++++++++++++++++++++++++++++++++++
include/linux/elevator.h | 2 +
5 files changed, 423 insertions(+)
create mode 100755 block/sioplus-iosched.c
diff --git a/arch/arm/configs/ak_bacon_defconfig b/arch/arm/configs/ak_bacon_defconfig
index ca2e7a3..debdd57 100644
--- a/arch/arm/configs/ak_bacon_defconfig
+++ b/arch/arm/configs/ak_bacon_defconfig
@@ -242,6 +242,7 @@ CONFIG_IOSCHED_ROW=y
CONFIG_IOSCHED_CFQ=y
CONFIG_IOSCHED_FIOPS=y
CONFIG_IOSCHED_SIO=y
+CONFIG_IOSCHED_SIOPLUS=y
CONFIG_IOSCHED_BFQ=y
CONFIG_IOSCHED_ZEN=y
CONFIG_IOSCHED_VR=y
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 24695a3..5984987 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -105,6 +105,16 @@ config IOSCHED_SIO
basic merging, trying to keep a minimum overhead. It is aimed
mainly for aleatory access devices (eg: flash devices).
+config IOSCHED_SIOPLUS
+ tristate "Simple I/O scheduler plus"
+ default y
+ ---help---
+ The Simple I/O scheduler is an extremely simple scheduler,
+ based on noop and deadline, that relies on deadlines to
+ ensure fairness. The algorithm does not do any sorting but
+ basic merging, trying to keep a minimum overhead. It is aimed
+ mainly for aleatory access devices (eg: flash devices).
+
config IOSCHED_ZEN
tristate "Zen I/O scheduler"
default y
@@ -151,6 +161,9 @@ choice
config DEFAULT_SIO
bool "SIO" if IOSCHED_SIO=y
+ config DEFAULT_SIOPLUS
+ bool "SIOPLUS" if IOSCHED_SIOPLUS=y
+
config DEFAULT_NOOP
bool "No-op"
@@ -169,6 +182,7 @@ config DEFAULT_IOSCHED
default "cfq" if DEFAULT_CFQ
default "fiops" if DEFAULT_FIOPS
default "sio" if DEFAULT_SIO
+ default "sioplus" if DEFAULT_SIOPLUS
default "noop" if DEFAULT_NOOP
default "bfq" if DEFAULT_BFQ
default "zen" if DEFAULT_ZEN
diff --git a/block/Makefile b/block/Makefile
index 1c32716..6a12b16 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o
obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
+obj-$(CONFIG_IOSCHED_SIOPLUS) += sioplus-iosched.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o
obj-$(CONFIG_IOSCHED_VR) += vr-iosched.o
diff --git a/block/sioplus-iosched.c b/block/sioplus-iosched.c
new file mode 100755
index 0000000..6809cb8
--- /dev/null
+++ b/block/sioplus-iosched.c
@@ -0,0 +1,405 @@
+/*
+ * Simple IO scheduler plus
+ * Based on Noop, Deadline and V(R) IO schedulers.
+ *
+ * Copyright (C) 2012 Miguel Boton <mboton@gmail.com>
+ * (C) 2013, 2014 Boy Petersen <boypetersen@gmail.com>
+ *
+ *
+ * This algorithm does not do any kind of sorting, as it is aimed for
+ * aleatory access devices, but it does some basic merging. We try to
+ * keep minimum overhead to achieve low latency.
+ *
+ * Asynchronous and synchronous requests are not treated separately, but
+ * we relay on deadlines to ensure fairness.
+ *
+ * The plus version incorporates several fixes and logic improvements.
+ *
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+enum { ASYNC, SYNC };
+
+/* Tunables */
+static const int sync_read_expire = (HZ / 4); /* max time before a sync read is submitted. */
+static const int sync_write_expire = (HZ / 4) * 5; /* max time before a sync write is submitted. */
+
+static const int async_read_expire = (HZ / 2); /* ditto for async, these limits are SOFT! */
+static const int async_write_expire = (HZ * 2); /* ditto for async, these limits are SOFT! */
+
+static const int writes_starved = 1; /* max times reads can starve a write */
+static const int fifo_batch = 3; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+/* Elevator data */
+struct sio_data {
+ /* Request queues */
+ struct list_head fifo_list[2][2];
+
+ /* Attributes */
+ unsigned int batched;
+ unsigned int starved;
+
+ /* Settings */
+ int fifo_expire[2][2];
+ int fifo_batch;
+ int writes_starved;
+};
+
+static void
+sio_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * If next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo.
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ /* Delete next request */
+ rq_fifo_clear(next);
+}
+
+static void
+sio_add_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ /*
+ * Add request to the proper fifo list and set its
+ * expire time.
+ */
+ rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync][data_dir]);
+ list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]);
+}
+
+static int
+sio_queue_empty(struct request_queue *q)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+
+ /* Check if fifo lists are empty */
+ return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) &&
+ list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]);
+}
+
+static struct request *
+sio_expired_request(struct sio_data *sd, int sync, int data_dir)
+{
+ struct list_head *list = &sd->fifo_list[sync][data_dir];
+ struct request *rq;
+
+ if (list_empty(list))
+ return NULL;
+
+ /* Retrieve request */
+ rq = rq_entry_fifo(list->next);
+
+ /* Request has expired */
+ if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+static struct request *
+sio_choose_expired_request(struct sio_data *sd)
+{
+ struct request *rq;
+
+ /* Reset (non-expired-)batch-counter */
+ sd->batched = 0;
+
+ /*
+ * Check expired requests.
+ * Asynchronous requests have priority over synchronous.
+ * Write requests have priority over read.
+ */
+ rq = sio_expired_request(sd, ASYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = sio_expired_request(sd, ASYNC, READ);
+ if (rq)
+ return rq;
+
+ rq = sio_expired_request(sd, SYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = sio_expired_request(sd, SYNC, READ);
+ if (rq)
+ return rq;
+
+
+ return NULL;
+}
+
+static struct request *
+sio_choose_request(struct sio_data *sd, int data_dir)
+{
+ struct list_head *sync = sd->fifo_list[SYNC];
+ struct list_head *async = sd->fifo_list[ASYNC];
+
+ /* Increase (non-expired-)batch-counter */
+ sd->batched++;
+
+ /*
+ * Retrieve request from available fifo list.
+ * Synchronous requests have priority over asynchronous.
+ * Read requests have priority over write.
+ */
+ if (!list_empty(&sync[data_dir]))
+ return rq_entry_fifo(sync[data_dir].next);
+ if (!list_empty(&async[data_dir]))
+ return rq_entry_fifo(async[data_dir].next);
+
+ if (!list_empty(&sync[!data_dir]))
+ return rq_entry_fifo(sync[!data_dir].next);
+ if (!list_empty(&async[!data_dir]))
+ return rq_entry_fifo(async[!data_dir].next);
+
+ return NULL;
+}
+
+static inline void
+sio_dispatch_request(struct sio_data *sd, struct request *rq)
+{
+
+ /*
+ * Remove the request from the fifo list
+ * and dispatch it.
+ */
+ rq_fifo_clear(rq);
+ elv_dispatch_add_tail(rq->q, rq);
+
+ if (rq_data_dir(rq)) {
+ sd->starved = 0;
+ } else {
+ if (!list_empty(&sd->fifo_list[SYNC][WRITE]) ||
+ !list_empty(&sd->fifo_list[ASYNC][WRITE]))
+ sd->starved++;
+ }
+}
+
+static int
+sio_dispatch_requests(struct request_queue *q, int force)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ struct request *rq = NULL;
+ int data_dir = READ;
+
+ /*
+ * Retrieve any expired request after a batch of
+ * sequential requests.
+ */
+ if (sd->batched >= sd->fifo_batch)
+ rq = sio_choose_expired_request(sd);
+
+ /* Retrieve request */
+ if (!rq) {
+ if (sd->starved >= sd->writes_starved)
+ data_dir = WRITE;
+
+ rq = sio_choose_request(sd, data_dir);
+ if (!rq)
+ return 0;
+ }
+
+ /* Dispatch request */
+ sio_dispatch_request(sd, rq);
+
+ return 1;
+}
+
+static struct request *
+sio_former_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir])
+ return NULL;
+
+ /* Return former request */
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+sio_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.next == &sd->fifo_list[sync][data_dir])
+ return NULL;
+
+ /* Return latter request */
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *
+sio_init_queue(struct request_queue *q)
+{
+ struct sio_data *sd;
+
+ /* Allocate structure */
+ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
+ if (!sd)
+ return NULL;
+
+ /* Initialize fifo lists */
+ INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]);
+ INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]);
+ INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]);
+ INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]);
+
+ /* Initialize data */
+ sd->batched = 0;
+ sd->fifo_expire[SYNC][READ] = sync_read_expire;
+ sd->fifo_expire[SYNC][WRITE] = sync_write_expire;
+ sd->fifo_expire[ASYNC][READ] = async_read_expire;
+ sd->fifo_expire[ASYNC][WRITE] = async_write_expire;
+ sd->fifo_batch = fifo_batch;
+ sd->writes_starved = writes_starved;
+
+ return sd;
+}
+
+static void
+sio_exit_queue(struct elevator_queue *e)
+{
+ struct sio_data *sd = e->elevator_data;
+
+ BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ]));
+ BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE]));
+ BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ]));
+ BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE]));
+
+ /* Free structure */
+ kfree(sd);
+}
+
+/*
+ * sysfs code
+ */
+
+static ssize_t
+sio_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+sio_var_store(int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtol(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return sio_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1);
+SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1);
+SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1);
+SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1);
+SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0);
+SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data; \
+ int ret = sio_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 1, INT_MAX, 0);
+STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 1, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \
+ sio_##name##_store)
+
+static struct elv_fs_entry sio_attrs[] = {
+ DD_ATTR(sync_read_expire),
+ DD_ATTR(sync_write_expire),
+ DD_ATTR(async_read_expire),
+ DD_ATTR(async_write_expire),
+ DD_ATTR(fifo_batch),
+ DD_ATTR(writes_starved),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_sioplus = {
+ .ops = {
+ .elevator_merge_req_fn = sio_merged_requests,
+ .elevator_dispatch_fn = sio_dispatch_requests,
+ .elevator_add_req_fn = sio_add_request,
+ .elevator_queue_empty_fn = sio_queue_empty,
+ .elevator_former_req_fn = sio_former_request,
+ .elevator_latter_req_fn = sio_latter_request,
+ .elevator_init_fn = sio_init_queue,
+ .elevator_exit_fn = sio_exit_queue,
+ },
+
+ .elevator_attrs = sio_attrs,
+ .elevator_name = "sioplus",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init sioplus_init(void)
+{
+ /* Register elevator */
+ elv_register(&iosched_sioplus);
+
+ return 0;
+}
+
+static void __exit sioplus_exit(void)
+{
+ /* Unregister elevator */
+ elv_unregister(&iosched_sioplus);
+}
+
+module_init(sioplus_init);
+module_exit(sioplus_exit);
+
+MODULE_AUTHOR("Miguel Boton");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple IO scheduler plus");
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index b36b28f..3e71452 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -22,6 +22,7 @@ typedef void (elevator_bio_merged_fn) (struct request_queue *,
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_queue_empty_fn) (struct request_queue *);
typedef int (elevator_reinsert_req_fn) (struct request_queue *,
struct request *);
typedef bool (elevator_is_urgent_fn) (struct request_queue *);
@@ -55,6 +56,7 @@ struct elevator_ops
elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
+ elevator_queue_empty_fn *elevator_queue_empty_fn;
elevator_completed_req_fn *elevator_completed_req_fn;
elevator_request_list_fn *elevator_former_req_fn;

View file

@ -0,0 +1,339 @@
From 90993e0fa9c09291755d1ea6b0ada38ce5171e3e Mon Sep 17 00:00:00 2001
From: anarkia1976 <stefano.villa1976@gmail.com>
Date: Wed, 11 Feb 2015 06:17:28 +0100
Subject: [PATCH] block: scheduler: add TRIPNDROID
---
arch/arm/configs/ak_bacon_defconfig | 1 +
block/Kconfig.iosched | 10 ++
block/Makefile | 1 +
block/tripndroid-iosched.c | 261 ++++++++++++++++++++++++++++++++++++
4 files changed, 273 insertions(+)
create mode 100644 block/tripndroid-iosched.c
diff --git a/arch/arm/configs/ak_bacon_defconfig b/arch/arm/configs/ak_bacon_defconfig
index debdd57..09a4fce 100644
--- a/arch/arm/configs/ak_bacon_defconfig
+++ b/arch/arm/configs/ak_bacon_defconfig
@@ -246,6 +246,7 @@ CONFIG_IOSCHED_SIOPLUS=y
CONFIG_IOSCHED_BFQ=y
CONFIG_IOSCHED_ZEN=y
CONFIG_IOSCHED_VR=y
+CONFIG_IOSCHED_TRIPNDROID=y
CONFIG_CGROUP_BFQIO=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_DEFAULT_IOSCHED="deadline"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 5984987..20ab48f 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -129,6 +129,12 @@ config IOSCHED_VR
Requests are chosen according to SSTF with a penalty of rev_penalty
for switching head direction.
+config IOSCHED_TRIPNDROID
+ tristate "Tripndroid"
+ default y
+ ---help---
+ The Trip N Droid scheduler
+
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
@@ -173,6 +179,9 @@ choice
config DEFAULT_VR
bool "VR" if IOSCHED_VR=y
+ config DEFAULT_TRIPNDROID
+ bool "TRIPNDROID" if IOSCHED_TRIPNDROID=y
+
endchoice
config DEFAULT_IOSCHED
@@ -187,6 +196,7 @@ config DEFAULT_IOSCHED
default "bfq" if DEFAULT_BFQ
default "zen" if DEFAULT_ZEN
default "vr" if DEFAULT_VR
+ default "tripndroid" if DEFAULT_TRIPNDROID
endmenu
diff --git a/block/Makefile b/block/Makefile
index 6a12b16..9e525b2 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_IOSCHED_SIOPLUS) += sioplus-iosched.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o
obj-$(CONFIG_IOSCHED_VR) += vr-iosched.o
+obj-$(CONFIG_IOSCHED_TRIPNDROID) += tripndroid-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/tripndroid-iosched.c b/block/tripndroid-iosched.c
new file mode 100644
index 0000000..a4d0080
--- /dev/null
+++ b/block/tripndroid-iosched.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2013, TripNDroid Mobile Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+enum { ASYNC, SYNC };
+
+static const int sync_read_expire = 1 * HZ; /* max time before a sync read is submitted. */
+static const int sync_write_expire = 1 * HZ; /* max time before a sync write is submitted. */
+static const int async_read_expire = 2 * HZ; /* ditto for async, these limits are SOFT! */
+static const int async_write_expire = 2 * HZ; /* ditto for async, these limits are SOFT! */
+
+static const int writes_starved = 1; /* max times reads can starve a write */
+static const int fifo_batch = 1; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+struct tripndroid_data {
+
+ struct list_head fifo_list[2][2];
+
+ unsigned int batched;
+ unsigned int starved;
+
+ int fifo_expire[2][2];
+ int fifo_batch;
+ int writes_starved;
+};
+
+static void tripndroid_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * If next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo.
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ rq_fifo_clear(next);
+}
+
+static void tripndroid_add_request(struct request_queue *q, struct request *rq)
+{
+ struct tripndroid_data *td = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ rq_set_fifo_time(rq, jiffies + td->fifo_expire[sync][data_dir]);
+ list_add(&rq->queuelist, &td->fifo_list[sync][data_dir]);
+}
+
+static struct request *tripndroid_expired_request(struct tripndroid_data *td, int sync, int data_dir)
+{
+ struct list_head *list = &td->fifo_list[sync][data_dir];
+ struct request *rq;
+
+ if (list_empty(list))
+ return NULL;
+
+ rq = rq_entry_fifo(list->next);
+
+ if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+static struct request *tripndroid_choose_expired_request(struct tripndroid_data *td)
+{
+ struct request *rq;
+
+ /* Asynchronous requests have priority over synchronous.
+ * Write requests have priority over read. */
+
+ rq = tripndroid_expired_request(td, ASYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = tripndroid_expired_request(td, ASYNC, READ);
+ if (rq)
+ return rq;
+
+ rq = tripndroid_expired_request(td, SYNC, WRITE);
+ if (rq)
+ return rq;
+ rq = tripndroid_expired_request(td, SYNC, READ);
+ if (rq)
+ return rq;
+
+ return NULL;
+}
+
+static struct request *tripndroid_choose_request(struct tripndroid_data *td, int data_dir)
+{
+ struct list_head *sync = td->fifo_list[SYNC];
+ struct list_head *async = td->fifo_list[ASYNC];
+
+ if (!list_empty(&sync[data_dir]))
+ return rq_entry_fifo(sync[data_dir].next);
+ if (!list_empty(&sync[!data_dir]))
+ return rq_entry_fifo(sync[!data_dir].next);
+
+ if (!list_empty(&async[data_dir]))
+ return rq_entry_fifo(async[data_dir].next);
+ if (!list_empty(&async[!data_dir]))
+ return rq_entry_fifo(async[!data_dir].next);
+
+ return NULL;
+}
+
+static inline void tripndroid_dispatch_request(struct tripndroid_data *td, struct request *rq)
+{
+ /* Dispatch the request */
+ rq_fifo_clear(rq);
+ elv_dispatch_add_tail(rq->q, rq);
+
+ td->batched++;
+
+ if (rq_data_dir(rq))
+ td->starved = 0;
+ else
+ td->starved++;
+}
+
+static int tripndroid_dispatch_requests(struct request_queue *q, int force)
+{
+ struct tripndroid_data *td = q->elevator->elevator_data;
+ struct request *rq = NULL;
+ int data_dir = READ;
+
+ if (td->batched > td->fifo_batch) {
+ td->batched = 0;
+ rq = tripndroid_choose_expired_request(td);
+ }
+
+ if (!rq) {
+ if (td->starved > td->writes_starved)
+ data_dir = WRITE;
+
+ rq = tripndroid_choose_request(td, data_dir);
+ if (!rq)
+ return 0;
+ }
+
+ tripndroid_dispatch_request(td, rq);
+
+ return 1;
+}
+
+static struct request *tripndroid_former_request(struct request_queue *q, struct request *rq)
+{
+ struct tripndroid_data *td = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.prev == &td->fifo_list[sync][data_dir])
+ return NULL;
+
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *tripndroid_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct tripndroid_data *td = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+ const int data_dir = rq_data_dir(rq);
+
+ if (rq->queuelist.next == &td->fifo_list[sync][data_dir])
+ return NULL;
+
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *tripndroid_init_queue(struct request_queue *q)
+{
+ struct tripndroid_data *td;
+
+ td = kmalloc_node(sizeof(*td), GFP_KERNEL, q->node);
+ if (!td)
+ return NULL;
+
+ INIT_LIST_HEAD(&td->fifo_list[SYNC][READ]);
+ INIT_LIST_HEAD(&td->fifo_list[SYNC][WRITE]);
+ INIT_LIST_HEAD(&td->fifo_list[ASYNC][READ]);
+ INIT_LIST_HEAD(&td->fifo_list[ASYNC][WRITE]);
+
+ td->batched = 0;
+ td->fifo_expire[SYNC][READ] = sync_read_expire;
+ td->fifo_expire[SYNC][WRITE] = sync_write_expire;
+ td->fifo_expire[ASYNC][READ] = async_read_expire;
+ td->fifo_expire[ASYNC][WRITE] = async_write_expire;
+ td->fifo_batch = fifo_batch;
+
+ return td;
+}
+
+static void tripndroid_exit_queue(struct elevator_queue *e)
+{
+ struct tripndroid_data *td = e->elevator_data;
+
+ BUG_ON(!list_empty(&td->fifo_list[SYNC][READ]));
+ BUG_ON(!list_empty(&td->fifo_list[SYNC][WRITE]));
+ BUG_ON(!list_empty(&td->fifo_list[ASYNC][READ]));
+ BUG_ON(!list_empty(&td->fifo_list[ASYNC][WRITE]));
+
+ kfree(td);
+}
+
+static struct elevator_type iosched_tripndroid = {
+ .ops = {
+ .elevator_merge_req_fn = tripndroid_merged_requests,
+ .elevator_dispatch_fn = tripndroid_dispatch_requests,
+ .elevator_add_req_fn = tripndroid_add_request,
+ .elevator_former_req_fn = tripndroid_former_request,
+ .elevator_latter_req_fn = tripndroid_latter_request,
+ .elevator_init_fn = tripndroid_init_queue,
+ .elevator_exit_fn = tripndroid_exit_queue,
+ },
+ .elevator_name = "tripndroid",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init tripndroid_init(void)
+{
+ elv_register(&iosched_tripndroid);
+ return 0;
+}
+
+static void __exit tripndroid_exit(void)
+{
+ elv_unregister(&iosched_tripndroid);
+}
+
+module_init(tripndroid_init);
+module_exit(tripndroid_exit);
+
+MODULE_AUTHOR("TripNRaVeR");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TripNDroid IO Scheduler");