Skip to content

Instantly share code, notes, and snippets.

@luca020400
Created April 10, 2015 11:02
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save luca020400/92cf44690cf458ca52b5 to your computer and use it in GitHub Desktop.
Save luca020400/92cf44690cf458ca52b5 to your computer and use it in GitHub Desktop.
patch
This file has been truncated, but you can view the full file.
diff --git a/.gitignore b/.gitignore
index 57af07c..970539b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,86 +1 @@
-#
-# NOTE! Don't add files that are generated in specific
-# subdirectories here. Add them in the ".gitignore" file
-# in that subdirectory instead.
-#
-# NOTE! Please use 'git ls-files -i --exclude-standard'
-# command after changing this file, to see if there are
-# any tracked files which get ignored after the change.
-#
-# Normal rules
-#
-.*
-*.o
-*.o.*
-*.a
-*.s
-*.ko
-*.so
-*.so.dbg
-*.mod.c
-*.i
-*.lst
-*.symtypes
-*.order
-modules.builtin
-*.elf
-*.bin
-*.gz
-*.bz2
-*.lzma
-*.xz
-*.lzo
-*.patch
-*.gcno
-
-#
-# Top-level generic files
-#
-/tags
-/TAGS
-/linux
-/vmlinux
-/vmlinuz
-/System.map
-/Module.markers
-/Module.symvers
-
-#
-# Debian directory (make deb-pkg)
-#
-/debian/
-
-#
-# git files that we don't want to ignore even it they are dot-files
-#
-!.gitignore
-!.mailmap
-
-#
-# Generated include files
-#
-include/config
-include/linux/version.h
-include/generated
-arch/*/include/generated
-
-# stgit generated dirs
-patches-*
-
-# quilt's files
-patches
-series
-
-# cscope files
-cscope.*
-ncscope.*
-
-# gnu global files
-GPATH
-GRTAGS
-GSYMS
-GTAGS
-
-*.orig
-*~
-\#*#
+/kbuild.sh
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index d62d0c6..5c72eed 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -49,30 +49,3 @@ DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
consistent or non-consistent memory as it sees fit. By using this API,
you are guaranteeing to the platform that you have all the correct and
necessary sync points for this memory in the driver.
-
-DMA_ATTR_NO_KERNEL_MAPPING
---------------------------
-
-DMA_ATTR_NO_KERNEL_MAPPING lets the platform to avoid creating a kernel
-virtual mapping for the allocated buffer. On some architectures creating
-such mapping is non-trivial task and consumes very limited resources
-(like kernel virtual address space or dma consistent address space).
-Buffers allocated with this attribute can be only passed to user space
-by calling dma_mmap_attrs(). By using this API, you are guaranteeing
-that you won't dereference the pointer returned by dma_alloc_attr(). You
-can threat it as a cookie that must be passed to dma_mmap_attrs() and
-dma_free_attrs(). Make sure that both of these also get this attribute
-set on each call.
-
-Since it is optional for platforms to implement
-DMA_ATTR_NO_KERNEL_MAPPING, those that do not will simply ignore the
-attribute and exhibit default behavior.
-
-DMA_ATTR_STRONGLY_ORDERED
--------------------------
-
-DMA_ATTR_STRONGLY_ORDERED allocates memory with a very restrictive type
-of mapping (no unaligned accesses, no re-ordering, no write merging, no
-buffering, no pre-fetching). This has severe performance penalties and
-should not be used for general purpose DMA allocations. It should only
-be used if one of the restrictions on strongly ordered memory is required.
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
deleted file mode 100644
index 1ec3081..0000000
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-MSM Bus Scaling Driver
-
-The msm bus scaling driver provides the ability to configure
-bus performance parameters across the entire chip-set.
-Various clients use MSM scaling APIs to request bandwidth
-between multiple master-slave pairs. The bus driver then finds
-the optimal path between the master and the slave, and aggregates
-the bandwidth and clock requests for all master-slave pairs on
-that path, and programs hardware accordingly.
-
-The device-tree data required for bus-scaling can be embedded within
-the clients' device nodes. The clients can register with the bus driver
-using the following properties:
-
-- qcom,msm_bus,name: String representing the client-name
-- qcom,msm_bus,num_cases: Total number of usecases
-- qcom,msm_bus,active_only: Context flag for requests in active or
- dual (active & sleep) contex
-- qcom,msm_bus,num_paths: Total number of master-slave pairs
-- qcom,msm_bus,vectors: Arrays of unsigned integers representing:
- master-id, slave-id, arbitrated bandwidth,
- instantaneous bandwidth
-
-Example:
-
- qcom,msm_bus,name = "client-name";
- qcom,msm_bus,num_cases = <3>;
- qcom,msm_bus,active_only = <0>;
- qcom,msm_bus,num_paths = <2>;
- qcom,msm_bus,vectors =
- <22 512 0 0>, <26 512 0 0>,
- <22 512 320000 320000000>, <26 512 3200000 320000000>,
- <22 512 160000 160000000>, <26 512 1600000 160000000>;
-
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
deleted file mode 100644
index 5c6b804..0000000
--- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-ION Memory Manager (ION)
-
-ION is a memory manager that allows for sharing of buffers between different
-processes and between user space and kernel space. ION manages different
-memory spaces by separating the memory spaces into "heaps". Depending on the
-type of heap ION must reserve memory using the msm specific memory reservation
-bindings (see Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
-
-Required properties
-
-- compatible: "qcom,msm-ion"
-- reg: The ID of the ION heap.
-
-Optional properties
-
-- compatible: "qcom,msm-ion-reserve" This is required if memory is to be reserved
- as specified by qcom,memory-reservation-size below.
-- qcom,heap-align: Alignment of start of the memory in the heap.
-- qcom,heap-adjacent: ID of heap this heap needs to be adjacent to.
-- qcom,memory-reservation-size: size of reserved memory for the ION heap.
-- qcom,memory-reservation-type: type of memory to be reserved
-(see memory-reserve.txt for information about memory reservations)
-
-Example:
- qcom,ion {
- compatible = "qcom,msm-ion";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,ion-heap@30 { /* SYSTEM HEAP */
- reg = <30>;
- };
-
- qcom,ion-heap@8 { /* CP_MM HEAP */
- compatible = "qcom,msm-ion-reserve";
- reg = <8>;
- qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x7800000>;
- };
-
- qcom,ion-heap@29 { /* FIRMWARE HEAP */
- compatible = "qcom,msm-ion-reserve";
- reg = <29>;
- qcom,heap-align = <0x20000>;
- qcom,heap-adjacent = <8>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0xA00000>;
-
- };
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index 3b5726c..67933e7 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -14,10 +14,6 @@ Required properties:
translation context.
- qcom,iommu-ctx-name : Name of the context bank
-Optional properties:
-- qcom,needs-alt-core-clk : boolean to enable the secondary core clock for
- access to the IOMMU configuration registers
-
Example:
qcom,iommu@fda64000 {
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 8dfb6a5..b0714d8 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -29,9 +29,6 @@ Rules on what kind of patches are accepted, and which ones are not, into the
Procedure for submitting patches to the -stable tree:
- - If the patch covers files in net/ or drivers/net please follow netdev stable
- submission guidelines as described in
- Documentation/networking/netdev-FAQ.txt
- Send the patch, after verifying that it follows the above rules, to
stable@vger.kernel.org. You must note the upstream commit ID in the
changelog of your submission, as well as the kernel version you wish
diff --git a/Makefile b/Makefile
index f156af2..0f8be35 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 104
+SUBLEVEL = 103
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 5ebab58..7a3d38d 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -489,11 +489,6 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
}
#endif
-#define ioread16be(p) be16_to_cpu(ioread16(p))
-#define ioread32be(p) be32_to_cpu(ioread32(p))
-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
-
#define inb_p inb
#define inw_p inw
#define inl_p inl
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index b8ce18f..a0a5d27 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,7 +12,6 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
-#include <asm/special_insns.h>
#include "op_impl.h"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 25a0dd2..bc7bf53 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,7 +3,6 @@ config ARM
default y
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_MEMBLOCK
@@ -247,14 +246,6 @@ config GENERIC_BUG
def_bool y
depends on BUG
-config ARCH_RANDOM
- bool "SOC specific random number generation"
- help
- Allow the kernel to use an architecture specific implementation for
- random number generation
-
- If unsure, say N
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9d7eb53..210ad1b 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,8 +173,7 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr <= safe_dma_addr &&
- b->safe_dma_addr + b->size > safe_dma_addr) {
+ if (b->safe_dma_addr == safe_dma_addr) {
rb = b;
break;
}
@@ -308,9 +307,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
{
dma_addr_t dma_addr;
int ret;
@@ -325,7 +323,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
if (ret == 0) {
- arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
+ __dma_page_cpu_to_dev(page, offset, size, dir);
return dma_addr;
}
@@ -336,6 +334,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
return map_single(dev, page_address(page) + offset, size, dir);
}
+EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -343,8 +342,8 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
* the safe buffer. (basically return things back to the way they
* should be)
*/
-static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
{
struct safe_buffer *buf;
@@ -353,18 +352,19 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
- arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
+ dma_addr & ~PAGE_MASK, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
+EXPORT_SYMBOL(__dma_unmap_page);
-static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
+int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ unsigned long off, size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
- unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -373,8 +373,6 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
- off = addr - buf->safe_dma_addr;
-
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -390,21 +388,12 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
}
return 0;
}
+EXPORT_SYMBOL(dmabounce_sync_for_cpu);
-static void dmabounce_sync_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
- return;
-
- arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
-}
-
-static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
+int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ unsigned long off, size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
- unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -413,8 +402,6 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
- off = addr - buf->safe_dma_addr;
-
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -430,38 +417,7 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
}
return 0;
}
-
-static void dmabounce_sync_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__dmabounce_sync_for_device(dev, handle, size, dir))
- return;
-
- arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
-}
-
-static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
-{
- if (dev->archdata.dmabounce)
- return 0;
-
- return arm_dma_ops.set_dma_mask(dev, dma_mask);
-}
-
-static struct dma_map_ops dmabounce_ops = {
- .alloc = arm_dma_alloc,
- .free = arm_dma_free,
- .mmap = arm_dma_mmap,
- .map_page = dmabounce_map_page,
- .unmap_page = dmabounce_unmap_page,
- .sync_single_for_cpu = dmabounce_sync_for_cpu,
- .sync_single_for_device = dmabounce_sync_for_device,
- .map_sg = arm_dma_map_sg,
- .unmap_sg = arm_dma_unmap_sg,
- .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
- .sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = dmabounce_set_mask,
-};
+EXPORT_SYMBOL(dmabounce_sync_for_device);
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
@@ -523,7 +479,6 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif
dev->archdata.dmabounce = device_info;
- set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n");
@@ -542,7 +497,6 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
- set_dma_ops(dev, NULL);
if (!device_info) {
dev_warn(dev,
diff --git a/arch/arm/configs/u8833_defconfig b/arch/arm/configs/u8833_defconfig
index 859a639..4a7a464 100644
--- a/arch/arm/configs/u8833_defconfig
+++ b/arch/arm/configs/u8833_defconfig
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm 3.4.104 Kernel Configuration
+# Linux/arm 3.4.102 Kernel Configuration
#
CONFIG_ARM=y
CONFIG_ARM_HAS_SG_CHAIN=y
@@ -28,7 +28,6 @@ CONFIG_NEED_MACH_IO_H=y
CONFIG_NEED_MACH_MEMORY_H=y
CONFIG_PHYS_OFFSET=0x00200000
CONFIG_GENERIC_BUG=y
-# CONFIG_ARCH_RANDOM is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_HAVE_IRQ_WORK=y
CONFIG_IRQ_WORK=y
@@ -39,7 +38,7 @@ CONFIG_IRQ_WORK=y
CONFIG_EXPERIMENTAL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
-CONFIG_LOCALVERSION="-Chil360-experimental"
+CONFIG_LOCALVERSION="-Chil360-v0.50"
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_LZMA=y
@@ -167,7 +166,7 @@ CONFIG_ARCH_USE_BUILTIN_BSWAP=y
CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
-CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_CLK=y
@@ -470,6 +469,10 @@ CONFIG_MSM_DALRPC=y
CONFIG_MSM_DALRPC_TEST=m
# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set
CONFIG_CHIL360_OC=y
+# CONFIG_CHIL360_RAM_STOCK is not set
+# CONFIG_CHIL360_RAM_MEDIUM is not set
+CONFIG_CHIL360_RAM_HIGH=y
+# CONFIG_CHIL360_RAM_EXTRA_HIGH is not set
# CONFIG_MSM_AVS_HW is not set
# CONFIG_MSM_HW3D is not set
CONFIG_MSM_ADSP=y
@@ -1203,9 +1206,7 @@ CONFIG_REGMAP_I2C=y
CONFIG_DMA_SHARED_BUFFER=y
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
-CONFIG_SYNC=y
-CONFIG_SW_SYNC=y
-CONFIG_SW_SYNC_USER=y
+# CONFIG_SYNC is not set
CONFIG_CMA=y
# CONFIG_CMA_DEBUG is not set
@@ -1218,7 +1219,7 @@ CONFIG_CMA_SIZE_SEL_PERCENTAGE=y
# CONFIG_CMA_SIZE_SEL_MIN is not set
# CONFIG_CMA_SIZE_SEL_MAX is not set
CONFIG_CMA_ALIGNMENT=8
-CONFIG_CMA_AREAS=7
+CONFIG_CMA_AREAS=4
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_TESTS is not set
@@ -1313,6 +1314,7 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
#
# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_AD525X_DPOT is not set
+CONFIG_ANDROID_PMEM=y
# CONFIG_ATMEL_PWM is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
@@ -2298,9 +2300,8 @@ CONFIG_MSM_ACTUATOR=y
CONFIG_QUP_EXCLUSIVE_TO_CAMERA=y
# CONFIG_S5K3L1YX is not set
# CONFIG_IMX091 is not set
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+# CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE is not set
# CONFIG_OV7692 is not set
-# CONFIG_SENSOR_POWER_CHECK_PATCH is not set
# CONFIG_V4L_MEM2MEM_DRIVERS is not set
CONFIG_RADIO_ADAPTERS=y
# CONFIG_RADIO_SI470X is not set
@@ -2510,8 +2511,6 @@ CONFIG_FB_MSM_PANEL_NONE=y
CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888=y
# CONFIG_FB_MSM_EBI2_EPSON_S1D_QVGA_PANEL is not set
# CONFIG_FB_MSM_EBI2_PANEL_DETECT is not set
-# CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL is not set
-# CONFIG_FB_MSM_QPIC_PANEL_DETECT is not set
CONFIG_FB_DYNAMIC_GAMMA=y
CONFIG_FB_AUTO_CABC=y
# CONFIG_HUAWEI_OLD_BACKLIGHT is not set
@@ -3197,7 +3196,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=2048
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
@@ -3354,8 +3353,7 @@ CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_PCRYPT is not set
CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_CRYPTD=y
-CONFIG_CRYPTO_ABLK_HELPER=y
+# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
# CONFIG_CRYPTO_TEST is not set
@@ -3408,7 +3406,6 @@ CONFIG_CRYPTO_SHA256=y
#
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_ARM=y
-CONFIG_CRYPTO_AES_ARM_BS=y
# CONFIG_CRYPTO_ANUBIS is not set
CONFIG_CRYPTO_ARC4=y
# CONFIG_CRYPTO_BLOWFISH is not set
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
deleted file mode 100644
index 5530d45..0000000
--- a/arch/arm/include/asm/archrandom.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef ARM_ASM_ARCHRANDOM_H
-#define ARM_ASM_ARCHRANDOM_H
-
-extern int arch_get_random_long(unsigned long *v);
-extern int arch_get_random_int(unsigned int *v);
-
-#endif
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 6e2cb0e..7aa3680 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,7 +7,6 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
- struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index abb222f..3536307 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,34 +5,12 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
#define DMA_ERROR_CODE (~0)
-extern struct dma_map_ops arm_dma_ops;
-
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
-{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
- return &arm_dma_ops;
-}
-
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- BUG_ON(!dev);
- dev->archdata.dma_ops = ops;
-}
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->set_dma_mask(dev, mask);
-}
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
@@ -86,6 +64,63 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ * Private support functions: these are not part of the API and are
+ * liable to change. Drivers must not use these.
+ */
+static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_cpu_to_dev(kaddr, size, dir);
+}
+
+static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_dev_to_cpu(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_dev_to_cpu(kaddr, size, dir);
+}
+
+static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_cpu_to_dev(page, off, size, dir);
+}
+
+static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_dev_to_cpu(page, off, size, dir);
+}
+
+extern int dma_supported(struct device *, u64);
+extern int dma_set_mask(struct device *, u64);
+
+/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -148,170 +183,69 @@ static inline void dma_coherent_post_ops(void)
#endif
}
-extern int dma_supported(struct device *dev, u64 mask);
-
/**
- * arm_dma_alloc - allocate consistent memory for DMA
+ * dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
*
- * Allocate some memory for a device for performing DMA. This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
+ * Allocate some uncached, unbuffered memory for a device for
+ * performing DMA. This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
*/
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
-
-#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
- BUG_ON(!ops);
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
+extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
+ * dma_free_coherent - free memory allocated by dma_alloc_coherent
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
+ * dma_alloc_coherent().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
-
-#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
+extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
+ * dma_mmap_coherent - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
-}
-
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
-static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
-}
-
-static inline void dma_free_stronglyordered(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
-static inline int dma_mmap_stronglyordered(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
-static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
-}
+int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
-static inline void dma_free_nonconsistent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-static inline int dma_mmap_nonconsistent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
+/**
+ * dma_alloc_writecombine - allocate writecombining memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, buffered memory for a device for
+ * performing DMA. This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
+ gfp_t);
+#define dma_free_writecombine(dev,size,cpu_addr,handle) \
+ dma_free_coherent(dev,size,cpu_addr,handle)
+int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
/*
* This can be called during boot to increase the size of the consistent
@@ -320,6 +254,8 @@ static inline int dma_mmap_nonconsistent(struct device *dev,
*/
extern void __init init_consistent_dma_size(unsigned long size);
+
+#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -359,7 +295,82 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
+/*
+ * The DMA API, implemented by dmabounce.c. See below for descriptions.
+ */
+extern dma_addr_t __dma_map_page(struct device *, struct page *,
+ unsigned long, size_t, enum dma_data_direction);
+extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
+
+/*
+ * Private functions
+ */
+int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+#else
+static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
+{
+ return 1;
+}
+
+static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
+{
+ return 1;
+}
+
+
+static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+#endif /* CONFIG_DMABOUNCE */
+
+/**
+ * dma_map_single - map a single buffer for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @cpu_addr: CPU direct mapped address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_single() or
+ * dma_sync_single_for_cpu().
+ */
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ unsigned long offset;
+ struct page *page;
+ dma_addr_t addr;
+
+ BUG_ON(!virt_addr_valid(cpu_addr));
+ BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
+ BUG_ON(!valid_dma_direction(dir));
+
+ page = virt_to_page(cpu_addr);
+ offset = (unsigned long)cpu_addr & ~PAGE_MASK;
+ addr = __dma_map_page(dev, page, offset, size, dir);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
+ return addr;
+}
/**
* dma_cache_pre_ops - clean or invalidate cache before dma transfer is
@@ -412,17 +423,146 @@ static inline void dma_cache_post_ops(void *virtual_addr,
___dma_single_cpu_to_dev(virtual_addr,
size, DMA_FROM_DEVICE);
}
+
+/**
+ * dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ addr = __dma_map_page(dev, page, offset, size, dir);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+/**
+ * dma_unmap_single - unmap a single buffer previously mapped
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_single)
+ * @dir: DMA transfer direction (same as passed to dma_map_single)
+ *
+ * Unmap a single streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ debug_dma_unmap_page(dev, handle, size, dir, true);
+ __dma_unmap_page(dev, handle, size, dir);
+}
+
+/**
+ * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ debug_dma_unmap_page(dev, handle, size, dir, false);
+ __dma_unmap_page(dev, handle, size, dir);
+}
+
+/**
+ * dma_sync_single_range_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @offset: offset of region to start sync
+ * @size: size of region to sync
+ * @dir: DMA transfer direction (same as passed to dma_map_single)
+ *
+ * Make physical memory consistent for a single streaming mode DMA
+ * translation after a transfer.
+ *
+ * If you perform a dma_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, you
+ * must first the perform a dma_sync_for_device, and then the
+ * device again owns the buffer.
+ */
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+
+ debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
+
+ if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+ return;
+
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+
+ debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
+
+ if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
+ return;
+
+ __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(dev, handle, 0, size, dir);
+}
+
/*
* The scatter list versions of the above methods.
*/
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern int dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
+extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 0f2a9cb..d03b6a9 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -76,21 +76,26 @@
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr
msr spsr_cxsf, \rpsr
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- sub r0, sp, #4 @ uninhabited address
- strex r1, r2, [r0] @ clear the exclusive monitor
-#endif
+#if defined(CONFIG_CPU_V6)
+ ldr r0, [sp]
+ strex r1, r2, [sp] @ clear the exclusive monitor
+ ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
+#elif defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#else
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#endif
.endm
.macro restore_user_regs, fast = 0, offset = 0
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
- @ We must avoid clrex due to Cortex-A15 erratum #830321
+#if defined(CONFIG_CPU_V6)
strex r1, r2, [sp] @ clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
@@ -113,10 +118,7 @@
.macro svc_exit, rpsr
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
-
+ clrex @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
@@ -125,16 +127,13 @@
.endm
.macro restore_user_regs, fast = 0, offset = 0
+ clrex @ clear the exclusive monitor
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
msr spsr_cxsf, r1 @ save in spsr_svc
-
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r1, r2, [sp] @ clear the exclusive monitor
-
.if \fast
ldmdb sp, {r1 - r12} @ get calling r1 - r12
.else
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7d767c3..a8f2858 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -185,7 +185,6 @@ SECTIONS
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- COMPAT_EXPORTS
SECURITY_INITCALL
INIT_RAM_FS
}
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 66af7b8..d580dd4 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1703,6 +1703,36 @@ config CHIL360_OC
help
Unlock the overclock freq speed
+choice
+ prompt "Chil360 RAM Mapping"
+ default CHIL360_RAM_STOCK
+
+config CHIL360_RAM_STOCK
+ bool "Stock (390MB free)"
+ default n
+ help
+ Unlock more free ram on y300/g510
+
+config CHIL360_RAM_MEDIUM
+ bool "Medium (397MB free)"
+ default n
+ help
+ Unlock more free ram on y300/g510
+
+config CHIL360_RAM_HIGH
+ bool "High (404MB free)"
+ default n
+ help
+ Unlock more free ram on y300/g510
+
+config CHIL360_RAM_EXTRA_HIGH
+ bool "Extra High (424MB free)"
+ default n
+ help
+ Unlock more free ram on y300/g510
+
+endchoice
+
config MSM_AVS_HW
bool "Enable Adaptive Voltage Scaling (AVS)"
default n
@@ -1714,6 +1744,7 @@ config MSM_AVS_HW
config MSM_HW3D
tristate "MSM Hardware 3D Register Driver"
+ default y
help
Provides access to registers needed by the userspace OpenGL|ES
library.
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index e5e3840..fae9b9b 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -356,7 +356,7 @@ obj-$(CONFIG_ARCH_MSM9625) += gpiomux-v2.o gpiomux.o
obj-$(CONFIG_MSM_SLEEP_STATS) += idle_stats.o
obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
-obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o
+obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_dcvs_idle.o
obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
obj-$(CONFIG_MSM_SHOW_RESUME_IRQ) += msm_show_resume_irq.o
obj-$(CONFIG_BT_MSM_PINTEST) += btpintest.o
@@ -392,5 +392,3 @@ obj-$(CONFIG_DEBUG_FS) += msm_cpr-debug.o
endif
obj-$(CONFIG_MSM_FIQ) += msm7k_fiq.o
obj-$(CONFIG_MSM_FIQ) += msm7k_fiq_handler.o
-
-obj-$(CONFIG_ARCH_RANDOM) += early_random.o
diff --git a/arch/arm/mach-msm/audio-7627a-devices.c b/arch/arm/mach-msm/audio-7627a-devices.c
index 46906ce..8019fa7 100644
--- a/arch/arm/mach-msm/audio-7627a-devices.c
+++ b/arch/arm/mach-msm/audio-7627a-devices.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/android_pmem.h>
#include <mach/board.h>
#include "board-msm7627a.h"
diff --git a/arch/arm/mach-msm/board-8064-display.c b/arch/arm/mach-msm/board-8064-display.c
index 0642b31..0f591ea 100644
--- a/arch/arm/mach-msm/board-8064-display.c
+++ b/arch/arm/mach-msm/board-8064-display.c
@@ -16,7 +16,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <asm/mach-types.h>
#include <mach/msm_memtypes.h>
#include <mach/board.h>
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 174a528..fca48ad 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -27,7 +27,7 @@
#include <linux/spi/spi.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/memory.h>
#include <linux/memblock.h>
#include <linux/msm_thermal.h>
@@ -304,7 +304,9 @@ static struct ion_co_heap_pdata fw_co_apq8064_ion_pdata = {
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap apq8064_heaps[] = {
+static struct ion_platform_data apq8064_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -367,11 +369,7 @@ struct ion_platform_heap apq8064_heaps[] = {
.extra_data = (void *) &co_apq8064_ion_pdata,
},
#endif
-};
-
-static struct ion_platform_data apq8064_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = apq8064_heaps,
+ }
};
static struct platform_device apq8064_ion_dev = {
@@ -416,27 +414,58 @@ static void __init reserve_ion_memory(void)
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ unsigned int reusable_count = 0;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ apq8064_fmem_pdata.size = 0;
+ apq8064_fmem_pdata.reserved_size_low = 0;
+ apq8064_fmem_pdata.reserved_size_high = 0;
+ apq8064_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ /* We only support 1 reusable heap. Check if more than one heap
+ * is specified as reusable and set as non-reusable if found.
+ */
+ for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
+ const struct ion_platform_heap *heap =
+ &(apq8064_ion_pdata.heaps[i]);
+
+ if (heap->type == ION_HEAP_TYPE_CP && heap->extra_data) {
+ struct ion_cp_heap_pdata *data = heap->extra_data;
+
+ reusable_count += (data->reusable) ? 1 : 0;
+
+ if (data->reusable && reusable_count > 1) {
+ pr_err("%s: Too many heaps specified as "
+ "reusable. Heap %s was not configured "
+ "as reusable.\n", __func__, heap->name);
+ data->reusable = 0;
+ }
+ }
+ }
+
for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
const struct ion_platform_heap *heap =
&(apq8064_ion_pdata.heaps[i]);
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
+ int mem_is_fmem = 0;
switch (heap->type) {
case ION_HEAP_TYPE_CP:
+ mem_is_fmem = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ mem_is_fmem = ((struct ion_co_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -455,12 +484,21 @@ static void __init reserve_ion_memory(void)
fixed_middle_size += heap->size;
else if (fixed_position == FIXED_HIGH)
fixed_high_size += heap->size;
+
+ if (mem_is_fmem)
+ apq8064_fmem_pdata.size += heap->size;
}
}
if (!fixed_size)
return;
+ if (apq8064_fmem_pdata.size) {
+ apq8064_fmem_pdata.reserved_size_low = fixed_low_size +
+ HOLE_SIZE;
+ apq8064_fmem_pdata.reserved_size_high = fixed_high_size;
+ }
+
/* Since the fixed area may be carved out of lowmem,
* make sure the length is a multiple of 1M.
*/
@@ -479,7 +517,7 @@ static void __init reserve_ion_memory(void)
int fixed_position = NOT_FIXED;
struct ion_cp_heap_pdata *pdata = NULL;
- switch ((int)heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_CP:
pdata =
(struct ion_cp_heap_pdata *)heap->extra_data;
@@ -628,6 +666,19 @@ static void __init apq8064_reserve(void)
apq8064_set_display_params(prim_panel_name, ext_panel_name,
ext_resolution);
msm_reserve();
+ if (apq8064_fmem_pdata.size) {
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+ if (reserve_info->fixed_area_size) {
+ apq8064_fmem_pdata.phys =
+ reserve_info->fixed_area_start + MSM_MM_FW_SIZE;
+ pr_info("mm fw at %lx (fixed) size %x\n",
+ reserve_info->fixed_area_start, MSM_MM_FW_SIZE);
+ pr_info("fmem start %lx (fixed) size %lx\n",
+ apq8064_fmem_pdata.phys,
+ apq8064_fmem_pdata.size);
+ }
+#endif
+ }
}
static void __init place_movable_zone(void)
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index 84ddc31..34f6f36 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -22,7 +22,7 @@
#include <mach/board.h>
#include <mach/gpiomux.h>
#include <mach/socinfo.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/ion.h>
#include "devices.h"
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index a4e194d..5082a8d 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -73,7 +73,7 @@
#include <mach/msm_xo.h>
#include <mach/restart.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/ion.h>
#include <mach/mdm2.h>
#include <mach/msm_rtb.h>
@@ -360,7 +360,9 @@ static struct ion_co_heap_pdata fw_co_msm8930_ion_pdata = {
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap msm8930_heaps[] = {
+static struct ion_platform_data msm8930_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -423,12 +425,7 @@ struct ion_platform_heap msm8930_heaps[] = {
.extra_data = (void *) &co_msm8930_ion_pdata,
},
#endif
-};
-
-static struct ion_platform_data msm8930_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = msm8930_heaps,
-
+ }
};
static struct platform_device msm8930_ion_dev = {
@@ -473,27 +470,58 @@ static void __init reserve_ion_memory(void)
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ unsigned int reusable_count = 0;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ msm8930_fmem_pdata.size = 0;
+ msm8930_fmem_pdata.reserved_size_low = 0;
+ msm8930_fmem_pdata.reserved_size_high = 0;
+ msm8930_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ /* We only support 1 reusable heap. Check if more than one heap
+ * is specified as reusable and set as non-reusable if found.
+ */
+ for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
+ const struct ion_platform_heap *heap =
+ &(msm8930_ion_pdata.heaps[i]);
+
+ if (heap->type == ION_HEAP_TYPE_CP && heap->extra_data) {
+ struct ion_cp_heap_pdata *data = heap->extra_data;
+
+ reusable_count += (data->reusable) ? 1 : 0;
+
+ if (data->reusable && reusable_count > 1) {
+ pr_err("%s: Too many heaps specified as "
+ "reusable. Heap %s was not configured "
+ "as reusable.\n", __func__, heap->name);
+ data->reusable = 0;
+ }
+ }
+ }
+
for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
const struct ion_platform_heap *heap =
&(msm8930_ion_pdata.heaps[i]);
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
+ int mem_is_fmem = 0;
switch (heap->type) {
case ION_HEAP_TYPE_CP:
+ mem_is_fmem = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ mem_is_fmem = ((struct ion_co_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -513,12 +541,20 @@ static void __init reserve_ion_memory(void)
else if (fixed_position == FIXED_HIGH)
fixed_high_size += heap->size;
+ if (mem_is_fmem)
+ msm8930_fmem_pdata.size += heap->size;
}
}
if (!fixed_size)
return;
+ if (msm8930_fmem_pdata.size) {
+ msm8930_fmem_pdata.reserved_size_low = fixed_low_size +
+ HOLE_SIZE;
+ msm8930_fmem_pdata.reserved_size_high = fixed_high_size;
+ }
+
/* Since the fixed area may be carved out of lowmem,
* make sure the length is a multiple of 1M.
*/
@@ -537,7 +573,7 @@ static void __init reserve_ion_memory(void)
int fixed_position = NOT_FIXED;
struct ion_cp_heap_pdata *pdata = NULL;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_CP:
pdata =
(struct ion_cp_heap_pdata *)heap->extra_data;
@@ -670,6 +706,18 @@ static void __init msm8930_early_memory(void)
static void __init msm8930_reserve(void)
{
msm_reserve();
+ if (msm8930_fmem_pdata.size) {
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+ if (reserve_info->fixed_area_size) {
+ msm8930_fmem_pdata.phys =
+ reserve_info->fixed_area_start + MSM_MM_FW_SIZE;
+ pr_info("mm fw at %lx (fixed) size %x\n",
+ reserve_info->fixed_area_start, MSM_MM_FW_SIZE);
+ pr_info("fmem start %lx (fixed) size %lx\n",
+ msm8930_fmem_pdata.phys, msm8930_fmem_pdata.size);
+ }
+#endif
+ }
}
static int msm8930_change_memory_power(u64 start, u64 size,
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 1415e0d..79e6d32 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -15,7 +15,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <mach/msm_bus_board.h>
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 8c3db15..1e178be 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -78,7 +78,7 @@
#endif
#include <linux/smsc3503.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/ion.h>
#include <mach/mdm2.h>
#include <mach/mdm-peripheral.h>
@@ -382,7 +382,9 @@ static struct ion_co_heap_pdata fw_co_msm8960_ion_pdata = {
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap msm8960_heaps[] = {
+static struct ion_platform_data msm8960_ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -445,11 +447,7 @@ struct ion_platform_heap msm8960_heaps[] = {
.extra_data = (void *) &co_msm8960_ion_pdata,
},
#endif
-};
-
-static struct ion_platform_data msm8960_ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = msm8960_heaps,
+ }
};
static struct platform_device msm8960_ion_dev = {
@@ -521,15 +519,41 @@ static void __init reserve_ion_memory(void)
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ unsigned int reusable_count = 0;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
adjust_mem_for_liquid();
+ msm8960_fmem_pdata.size = 0;
+ msm8960_fmem_pdata.reserved_size_low = 0;
+ msm8960_fmem_pdata.reserved_size_high = 0;
+ msm8960_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ /* We only support 1 reusable heap. Check if more than one heap
+ * is specified as reusable and set as non-reusable if found.
+ */
+ for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
+ const struct ion_platform_heap *heap =
+ &(msm8960_ion_pdata.heaps[i]);
+
+ if (heap->type == ION_HEAP_TYPE_CP && heap->extra_data) {
+ struct ion_cp_heap_pdata *data = heap->extra_data;
+
+ reusable_count += (data->reusable) ? 1 : 0;
+
+ if (data->reusable && reusable_count > 1) {
+ pr_err("%s: Too many heaps specified as "
+ "reusable. Heap %s was not configured "
+ "as reusable.\n", __func__, heap->name);
+ data->reusable = 0;
+ }
+ }
+ }
+
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap =
&(msm8960_ion_pdata.heaps[i]);
@@ -539,9 +563,12 @@ static void __init reserve_ion_memory(void)
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
+ int mem_is_fmem = 0;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_CP:
+ mem_is_fmem = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
align = ((struct ion_cp_heap_pdata *)
@@ -551,6 +578,8 @@ static void __init reserve_ion_memory(void)
heap->extra_data)->iommu_map_all;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ mem_is_fmem = ((struct ion_co_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
adjacent_mem_id = ((struct ion_co_heap_pdata *)
@@ -568,6 +597,9 @@ static void __init reserve_ion_memory(void)
}
}
+ if (mem_is_fmem && adjacent_mem_id != INVALID_HEAP_ID)
+ msm8960_fmem_pdata.align = align;
+
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
else
@@ -579,12 +611,21 @@ static void __init reserve_ion_memory(void)
fixed_middle_size += heap->size;
else if (fixed_position == FIXED_HIGH)
fixed_high_size += heap->size;
+
+ if (mem_is_fmem)
+ msm8960_fmem_pdata.size += heap->size;
}
}
if (!fixed_size)
return;
+ if (msm8960_fmem_pdata.size) {
+ msm8960_fmem_pdata.reserved_size_low = fixed_low_size +
+ HOLE_SIZE;
+ msm8960_fmem_pdata.reserved_size_high = fixed_high_size;
+ }
+
/* Since the fixed area may be carved out of lowmem,
* make sure the length is a multiple of 1M.
*/
@@ -603,7 +644,7 @@ static void __init reserve_ion_memory(void)
int fixed_position = NOT_FIXED;
struct ion_cp_heap_pdata *pdata = NULL;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_CP:
pdata =
(struct ion_cp_heap_pdata *)heap->extra_data;
@@ -754,6 +795,19 @@ static void __init msm8960_reserve(void)
{
msm8960_set_display_params(prim_panel_name, ext_panel_name);
msm_reserve();
+ if (msm8960_fmem_pdata.size) {
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+ if (reserve_info->fixed_area_size) {
+ msm8960_fmem_pdata.phys =
+ reserve_info->fixed_area_start + MSM_MM_FW_SIZE;
+ pr_info("mm fw at %lx (fixed) size %x\n",
+ reserve_info->fixed_area_start, MSM_MM_FW_SIZE);
+ pr_info("fmem start %lx (fixed) size %lx\n",
+ msm8960_fmem_pdata.phys,
+ msm8960_fmem_pdata.size);
+ }
+#endif
+ }
}
static int msm8960_change_memory_power(u64 start, u64 size,
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 4ea4e54..557331a 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -24,6 +24,9 @@
#include <linux/ion.h>
#endif
#include <linux/memory.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
#include <linux/regulator/stub-regulator.h>
#include <linux/regulator/machine.h>
#include <asm/mach/map.h>
@@ -380,7 +383,6 @@ static struct reserve_info msm_8974_reserve_info __initdata = {
static void __init msm_8974_early_memory(void)
{
reserve_info = &msm_8974_reserve_info;
- of_scan_flat_dt(dt_scan_for_memory_reserve, msm_8974_reserve_table);
}
void __init msm_8974_reserve(void)
diff --git a/arch/arm/mach-msm/board-9615-display.c b/arch/arm/mach-msm/board-9615-display.c
index 4e4ce7a..74bc984 100644
--- a/arch/arm/mach-msm/board-9615-display.c
+++ b/arch/arm/mach-msm/board-9615-display.c
@@ -15,7 +15,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <asm/mach-types.h>
#include <mach/msm_memtypes.h>
#include <mach/board.h>
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 50c7851..a312e2b 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -29,7 +29,7 @@
#include <linux/power/ltc4088-charger.h>
#include <linux/gpio.h>
#include <linux/msm_tsens.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/memory.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -80,7 +80,9 @@ static struct ion_co_heap_pdata co_ion_pdata = {
.align = PAGE_SIZE,
};
-static struct ion_platform_heap msm9615_heaps[] = {
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -99,11 +101,7 @@ static struct ion_platform_heap msm9615_heaps[] = {
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &co_ion_pdata,
},
-};
-
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = msm9615_heaps,
+ }
};
static struct platform_device ion_dev = {
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index c471374..3e7d9cb 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -797,8 +797,6 @@ static struct msm_panel_common_pdata mdp_pdata = {
.gpio = 97,
.mdp_rev = MDP_REV_303,
.cont_splash_enabled = 0x1,
- .splash_screen_addr = 0x00,
- .splash_screen_size = 0x00,
};
static char lcdc_splash_is_enabled()
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index d619984..a7fed3e 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -54,6 +54,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c.h>
+#include <linux/android_pmem.h>
#include <mach/camera.h>
#ifdef CONFIG_USB_G_ANDROID
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index cc3d5d8..002cd6f 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -38,6 +38,7 @@
#include <linux/i2c.h>
#include <linux/i2c/sx150x.h>
#include <linux/gpio.h>
+#include <linux/android_pmem.h>
#include <linux/bootmem.h>
#include <linux/mfd/marimba.h>
#include <mach/vreg.h>
@@ -47,10 +48,9 @@
#include <mach/msm_battery.h>
#include <linux/smsc911x.h>
#include <linux/atmel_maxtouch.h>
+/* update Qcomm original base line , delete 1 line for fmem disable and avoid deadlock*/
#include <linux/msm_adc.h>
-#include <linux/msm_ion.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
+#include <linux/ion.h>
#include "devices.h"
#include "timer.h"
#include "board-msm7x27a-regulator.h"
@@ -82,16 +82,8 @@ static ssize_t buf_vkey_size=0;
#include <asm-arm/huawei/usb_switch_huawei.h>
#endif
-#define CAMERA_HEAP_BASE 0x0
-#ifdef CONFIG_CMA
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_DMA
-#else
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_CARVEOUT
-#endif
-
-#define RESERVE_KERNEL_EBI1_SIZE 0x3A000
-#define MSM_RESERVE_AUDIO_SIZE 0xF0000
-#define BOOTLOADER_BASE_ADDR 0x10000
+#define PMEM_KERNEL_EBI1_SIZE 0x3A000
+#define MSM_PMEM_AUDIO_SIZE 0x1F4000
#if defined(CONFIG_GPIO_SX150X)
enum {
@@ -262,29 +254,57 @@ static struct msm_i2c_platform_data msm_gsbi1_qup_i2c_pdata = {
};
#ifdef CONFIG_ARCH_MSM7X27A
-#define MSM_RESERVE_MDP_SIZE 0x2300000
-#define MSM7x25A_MSM_RESERVE_MDP_SIZE 0x1500000
+#define MSM_PMEM_MDP_SIZE 0x2300000
+#define MSM7x25A_MSM_PMEM_MDP_SIZE 0x1500000
+
+ // Chil360 RAM tweak
+#ifdef CONFIG_CHIL360_RAM_STOCK
+#define MSM_PMEM_ADSP_SIZE 0x1200000 // 18mb
+#elif defined(CONFIG_CHIL360_RAM_MEDIUM)
+#define MSM_PMEM_ADSP_SIZE 0xD00000 // 13mb
+#elif defined(CONFIG_CHIL360_RAM_EXTRA_HIGH)
+//#define MSM_PMEM_ADSP_SIZE 0x800000 // 8mb
+#define MSM_PMEM_ADSP_SIZE 0x400000 // 4mb
+#else
+#define MSM_PMEM_ADSP_SIZE 0xC00000 // 12mb
+#endif
+
-#define MSM_RESERVE_ADSP_SIZE 0x1300000
-#define MSM7x25A_MSM_RESERVE_ADSP_SIZE 0xB91000
+#define MSM_PMEM_ADSP_BIG_SIZE 0x1E00000
+#define MSM7x25A_MSM_PMEM_ADSP_SIZE 0xB91000
#define CAMERA_ZSL_SIZE (SZ_1M * 60)
+
+#define MSM_3M_PMEM_ADSP_SIZE (0x1048000)
+/* enlarge the pmem space for HDR on 8950s
+ */
+static unsigned int get_pmem_adsp_size(void)
+{
+ if( machine_is_msm8x25_C8950D()
+ || machine_is_msm8x25_U8950D()
+ /*delete some line; to reduce pmem for releasing memory*/
+ ||machine_is_msm8x25_U8950()){
+ return CAMERA_ZSL_SIZE;
+ }
+ else if (machine_is_msm7x27a_H867G()
+ || machine_is_msm7x27a_H868C()
+ || machine_is_msm8x25_Y301_A1() )
+ {
+ return MSM_3M_PMEM_ADSP_SIZE;
+ }
+ else
+ return MSM_PMEM_ADSP_SIZE;
+
+}
#endif
#ifdef CONFIG_ION_MSM
-#define MSM_ION_HEAP_NUM 5
+#define MSM_ION_HEAP_NUM 4
static struct platform_device ion_dev;
static int msm_ion_camera_size;
static int msm_ion_audio_size;
static int msm_ion_sf_size;
-static int msm_ion_camera_size_carving;
#endif
-#define CAMERA_HEAP_BASE 0x0
-#ifdef CONFIG_CMA
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_DMA
-#else
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_CARVEOUT
-#endif
static struct android_usb_platform_data android_usb_pdata = {
.update_pid_and_serial_num = usb_diag_update_pid_and_serial_num,
@@ -488,8 +508,7 @@ static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = {
};
/* 8625 PM platform data */
-static struct msm_pm_platform_data
- msm8625_pm_data[MSM_PM_SLEEP_MODE_NR * CONFIG_NR_CPUS] = {
+static struct msm_pm_platform_data msm8625_pm_data[MSM_PM_SLEEP_MODE_NR * 2] = {
/* CORE0 entries */
[MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
.idle_supported = 1,
@@ -516,7 +535,7 @@ static struct msm_pm_platform_data
.idle_enabled = 0,
.suspend_enabled = 0,
.latency = 500,
- .residency = 500,
+ .residency = 6000,
},
[MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
@@ -535,7 +554,7 @@ static struct msm_pm_platform_data
.idle_enabled = 0,
.suspend_enabled = 0,
.latency = 500,
- .residency = 500,
+ .residency = 6000,
},
[MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
@@ -547,44 +566,6 @@ static struct msm_pm_platform_data
.residency = 10,
},
- /* picked latency & redisdency values from 7x30 */
- [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 0,
- .suspend_enabled = 0,
- .latency = 500,
- .residency = 500,
- },
-
- [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 1,
- .suspend_enabled = 1,
- .latency = 2,
- .residency = 10,
- },
-
- /* picked latency & redisdency values from 7x30 */
- [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 0,
- .suspend_enabled = 0,
- .latency = 500,
- .residency = 500,
- },
-
- [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 1,
- .suspend_enabled = 1,
- .latency = 2,
- .residency = 10,
- },
-
};
static struct msm_pm_boot_platform_data msm_pm_8625_boot_pdata __initdata = {
@@ -592,23 +573,62 @@ static struct msm_pm_boot_platform_data msm_pm_8625_boot_pdata __initdata = {
.v_addr = MSM_CFG_CTL_BASE,
};
-static unsigned reserve_mdp_size = MSM_RESERVE_MDP_SIZE;
-static int __init reserve_mdp_size_setup(char *p)
+static struct android_pmem_platform_data android_pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 1,
+ .memory_type = MEMTYPE_EBI1,
+ /* update Qcomm original base line , delete 3 lines for fmem disable and avoid deadlock*/
+};
+
+static struct platform_device android_pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 1,
+ .dev = { .platform_data = &android_pmem_adsp_pdata },
+};
+
+static unsigned pmem_mdp_size = MSM_PMEM_MDP_SIZE;
+static int __init pmem_mdp_size_setup(char *p)
{
- reserve_mdp_size = memparse(p, NULL);
+ pmem_mdp_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_mdp_size", reserve_mdp_size_setup);
+early_param("pmem_mdp_size", pmem_mdp_size_setup);
-static unsigned reserve_adsp_size = MSM_RESERVE_ADSP_SIZE;
-static int __init reserve_adsp_size_setup(char *p)
+static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
+static int __init pmem_adsp_size_setup(char *p)
{
- reserve_adsp_size = memparse(p, NULL);
+ pmem_adsp_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_adsp_size", reserve_adsp_size_setup);
+early_param("pmem_adsp_size", pmem_adsp_size_setup);
+
+static struct android_pmem_platform_data android_pmem_audio_pdata = {
+ .name = "pmem_audio",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 0,
+ .memory_type = MEMTYPE_EBI1,
+};
+
+static struct platform_device android_pmem_audio_device = {
+ .name = "android_pmem",
+ .id = 2,
+ .dev = { .platform_data = &android_pmem_audio_pdata },
+};
+
+static struct android_pmem_platform_data android_pmem_pdata = {
+ .name = "pmem",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 1,
+ .memory_type = MEMTYPE_EBI1,
+};
+static struct platform_device android_pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &android_pmem_pdata },
+};
static u32 msm_calculate_batt_capacity(u32 current_voltage);
@@ -808,6 +828,10 @@ static struct platform_device *msm7627a_surf_ffa_devices[] __initdata = {
static struct platform_device *common_devices[] __initdata = {
&android_usb_device,
+ &android_pmem_device,
+ &android_pmem_adsp_device,
+ &android_pmem_audio_device,
+ /* update Qcomm original base line , delete 1 line for fmem disable and avoid deadlock*/
&msm_device_nand,
&msm_device_snd,
&msm_device_cad,
@@ -848,46 +872,41 @@ static struct platform_device *msm8625_surf_devices[] __initdata = {
&msm8625_kgsl_3d0,
};
-static unsigned reserve_kernel_ebi1_size = RESERVE_KERNEL_EBI1_SIZE;
-static int __init reserve_kernel_ebi1_size_setup(char *p)
+static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE;
+static int __init pmem_kernel_ebi1_size_setup(char *p)
{
- reserve_kernel_ebi1_size = memparse(p, NULL);
+ pmem_kernel_ebi1_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_kernel_ebi1_size", reserve_kernel_ebi1_size_setup);
+early_param("pmem_kernel_ebi1_size", pmem_kernel_ebi1_size_setup);
-static unsigned reserve_audio_size = MSM_RESERVE_AUDIO_SIZE;
-static int __init reserve_audio_size_setup(char *p)
+static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE;
+static int __init pmem_audio_size_setup(char *p)
{
- reserve_audio_size = memparse(p, NULL);
+ pmem_audio_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_audio_size", reserve_audio_size_setup);
+early_param("pmem_audio_size", pmem_audio_size_setup);
static void fix_sizes(void)
{
if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) {
- reserve_mdp_size = MSM7x25A_MSM_RESERVE_MDP_SIZE;
- reserve_adsp_size = MSM7x25A_MSM_RESERVE_ADSP_SIZE;
+ pmem_mdp_size = MSM7x25A_MSM_PMEM_MDP_SIZE;
+ pmem_adsp_size = MSM7x25A_MSM_PMEM_ADSP_SIZE;
} else {
- reserve_mdp_size = MSM_RESERVE_MDP_SIZE;
- reserve_adsp_size = MSM_RESERVE_ADSP_SIZE;
+ pmem_mdp_size = get_mdp_pmem_size();
+ printk("pmem_mdp_size=%08x\n",pmem_mdp_size);
+ pmem_adsp_size = get_pmem_adsp_size();
+ printk("pmem_adsp_size=%08x\n",pmem_adsp_size);
}
/*delete qcom code */
+/*
if (get_ddr_size() > SZ_512M)
- reserve_adsp_size = CAMERA_ZSL_SIZE;
+ pmem_adsp_size = CAMERA_ZSL_SIZE;*/
#ifdef CONFIG_ION_MSM
- msm_ion_audio_size = MSM_RESERVE_AUDIO_SIZE;
- msm_ion_sf_size = reserve_mdp_size;
-#ifdef CONFIG_CMA
- if (get_ddr_size() > SZ_256M)
- reserve_adsp_size = CAMERA_ZSL_SIZE;
- msm_ion_camera_size = reserve_adsp_size;
- msm_ion_camera_size_carving = 0;
-#else
- msm_ion_camera_size = reserve_adsp_size;
- msm_ion_camera_size_carving = msm_ion_camera_size;
-#endif
+ msm_ion_camera_size = pmem_adsp_size;
+ msm_ion_audio_size = (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE);
+ msm_ion_sf_size = pmem_mdp_size;
#endif
}
@@ -897,45 +916,31 @@ static struct ion_co_heap_pdata co_ion_pdata = {
.adjacent_mem_id = INVALID_HEAP_ID,
.align = PAGE_SIZE,
};
-
-static struct ion_co_heap_pdata co_mm_ion_pdata = {
- .adjacent_mem_id = INVALID_HEAP_ID,
- .align = PAGE_SIZE,
-};
-
-static u64 msm_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device ion_cma_device = {
- .name = "ion-cma-device",
- .id = -1,
- .dev = {
- .dma_mask = &msm_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
#endif
/**
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap msm7x27a_heaps[] = {
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
.name = ION_VMALLOC_HEAP_NAME,
},
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- /* ION_ADSP = CAMERA */
+ /* PMEM_ADSP = CAMERA */
{
.id = ION_CAMERA_HEAP_ID,
- .type = CAMERA_HEAP_TYPE,
+ .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_CAMERA_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .extra_data = (void *)&co_mm_ion_pdata,
- .priv = (void *)&ion_cma_device.dev,
+ .extra_data = (void *)&co_ion_pdata,
},
- /* AUDIO HEAP 1*/
+ /* PMEM_AUDIO */
{
.id = ION_AUDIO_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
@@ -943,7 +948,7 @@ struct ion_platform_heap msm7x27a_heaps[] = {
.memory_type = ION_EBI_TYPE,
.extra_data = (void *)&co_ion_pdata,
},
- /* ION_MDP = SF */
+ /* PMEM_MDP = SF */
{
.id = ION_SF_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
@@ -951,23 +956,8 @@ struct ion_platform_heap msm7x27a_heaps[] = {
.memory_type = ION_EBI_TYPE,
.extra_data = (void *)&co_ion_pdata,
},
- /* AUDIO HEAP 2*/
- {
- .id = ION_AUDIO_HEAP_BL_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_AUDIO_BL_HEAP_NAME,
- .memory_type = ION_EBI_TYPE,
- .extra_data = (void *)&co_ion_pdata,
- .base = BOOTLOADER_BASE_ADDR,
- },
-
#endif
-};
-
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .has_outer_cache = 1,
- .heaps = msm7x27a_heaps,
+ }
};
static struct platform_device ion_dev = {
@@ -988,22 +978,61 @@ static struct memtype_reserve msm7x27a_reserve_table[] __initdata = {
},
};
+/* update Qcomm original base line , delete 7 lines for fmem disable and avoid deadlock*/
+
+static void __init size_pmem_devices(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+
+ /* update Qcomm original base line , delete 2 lines for fmem disable and avoid deadlock*/
+ android_pmem_adsp_pdata.size = pmem_adsp_size;
+ android_pmem_pdata.size = pmem_mdp_size;
+ android_pmem_audio_pdata.size = pmem_audio_size;
+
+ /* update Qcomm original base line , delete 19 lines for fmem disable and avoid deadlock*/
+
+#endif
+#endif
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+static void __init reserve_memory_for(struct android_pmem_platform_data *p)
+{
+ msm7x27a_reserve_table[p->memory_type].size += p->size;
+}
+#endif
+#endif
+
+static void __init reserve_pmem_memory(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+ /* update Qcomm original base line , delete 3 lines and add 3 lines for fmem disable and avoid deadlock*/
+ reserve_memory_for(&android_pmem_adsp_pdata);
+ reserve_memory_for(&android_pmem_pdata);
+ reserve_memory_for(&android_pmem_audio_pdata);
+
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
+#endif
+#endif
+}
+
static void __init size_ion_devices(void)
{
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
ion_pdata.heaps[1].size = msm_ion_camera_size;
- ion_pdata.heaps[2].size = RESERVE_KERNEL_EBI1_SIZE;
+ ion_pdata.heaps[2].size = msm_ion_audio_size;
ion_pdata.heaps[3].size = msm_ion_sf_size;
- ion_pdata.heaps[4].size = msm_ion_audio_size;
#endif
}
static void __init reserve_ion_memory(void)
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
- msm7x27a_reserve_table[MEMTYPE_EBI1].size += RESERVE_KERNEL_EBI1_SIZE;
- msm7x27a_reserve_table[MEMTYPE_EBI1].size +=
- msm_ion_camera_size_carving;
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_camera_size;
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_audio_size;
msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_sf_size;
#endif
}
@@ -1011,6 +1040,8 @@ static void __init reserve_ion_memory(void)
static void __init msm7x27a_calculate_reserve_sizes(void)
{
fix_sizes();
+ size_pmem_devices();
+ reserve_pmem_memory();
size_ion_devices();
reserve_ion_memory();
}
@@ -1073,17 +1104,7 @@ extern unsigned long get_mempools_pstart_addr(void);
static void __init msm7x27a_reserve(void)
{
reserve_info = &msm7x27a_reserve_info;
- memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K);
- memblock_remove(BOOTLOADER_BASE_ADDR, msm_ion_audio_size);
msm_reserve();
-#ifdef CONFIG_CMA
- dma_declare_contiguous(
- &ion_cma_device.dev,
- msm_ion_camera_size,
- CAMERA_HEAP_BASE,
- 0x26000000);
-#endif
-
#ifdef CONFIG_SRECORDER_MSM
if (0x0 != get_mempools_pstart_addr())
{
@@ -1097,12 +1118,15 @@ static void __init msm7x27a_reserve(void)
}
-/* �˶δ��뱻ȫ���Ƶ�static void __init msm7x27a_reserve(void)����ǰ�� */
+/* �˶δ��뱻ȫ���Ƶ�static void __init msm7x27a_reserve(void)����ǰ�� */
static void __init msm8625_reserve(void)
{
msm7x27a_reserve();
- memblock_remove(MSM8625_CPU_PHYS, SZ_8);
+
+/* �˶δ��뱻ȫ���Ƶ���ʵ�ֱ��Ƶ�static void __init msm7x27a_reserve(void)��������ʵ�� */
+
+ memblock_remove(MSM8625_SECONDARY_PHYS, SZ_8);
memblock_remove(MSM8625_WARM_BOOT_PHYS, SZ_32);
memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K);
}
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 4228e39..67ea97a 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -38,6 +38,7 @@
#include <linux/msm_adc.h>
#include <linux/dma-mapping.h>
#include <linux/regulator/consumer.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
@@ -52,6 +53,7 @@
#include <mach/msm_spi.h>
#include <mach/qdsp5v2/msm_lpa.h>
#include <mach/dma.h>
+#include <linux/android_pmem.h>
#include <linux/input/msm_ts.h>
#include <mach/pmic.h>
#include <mach/rpc_pmapp.h>
@@ -3525,6 +3527,19 @@ static int msm_hsusb_pmic_notif_init(void (*callback)(int online), int init)
}
#endif
+static struct android_pmem_platform_data android_pmem_pdata = {
+ .name = "pmem",
+ .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING,
+ .cached = 1,
+ .memory_type = MEMTYPE_EBI0,
+};
+
+static struct platform_device android_pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &android_pmem_pdata },
+};
+
#ifndef CONFIG_SPI_QSD
static int lcdc_gpio_array_num[] = {
45, /* spi_clk */
@@ -4002,6 +4017,32 @@ static struct platform_device msm_migrate_pages_device = {
.id = -1,
};
+static struct android_pmem_platform_data android_pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 0,
+ .memory_type = MEMTYPE_EBI0,
+};
+
+static struct android_pmem_platform_data android_pmem_audio_pdata = {
+ .name = "pmem_audio",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 0,
+ .memory_type = MEMTYPE_EBI0,
+};
+
+static struct platform_device android_pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 2,
+ .dev = { .platform_data = &android_pmem_adsp_pdata },
+};
+
+static struct platform_device android_pmem_audio_device = {
+ .name = "android_pmem",
+ .id = 4,
+ .dev = { .platform_data = &android_pmem_audio_pdata },
+};
+
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) || \
defined(CONFIG_CRYPTO_DEV_QCEDEV) || \
@@ -5350,6 +5391,7 @@ static struct platform_device *devices[] __initdata = {
#ifdef CONFIG_I2C_SSBI
&msm_device_ssbi7,
#endif
+ &android_pmem_device,
&msm_fb_device,
#ifdef CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE
&msm_v4l2_video_overlay_device,
@@ -5361,6 +5403,8 @@ static struct platform_device *devices[] __initdata = {
&msm_rotator_device,
#endif
&lcdc_sharp_panel_device,
+ &android_pmem_adsp_device,
+ &android_pmem_audio_device,
&msm_device_i2c,
&msm_device_i2c_2,
&msm_device_uart_dm1,
@@ -7131,7 +7175,9 @@ static struct ion_co_heap_pdata co_ion_pdata = {
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap msm7x30_heaps[] = {
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -7166,11 +7212,7 @@ struct ion_platform_heap msm7x30_heaps[] = {
.extra_data = (void *)&co_ion_pdata,
},
#endif
-};
-
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = msm7x30_heaps,
+ }
};
static struct platform_device ion_dev = {
@@ -7206,6 +7248,39 @@ static void fix_sizes(void)
#endif
}
+static void __init size_pmem_devices(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+
+ android_pmem_adsp_pdata.size = size;
+ android_pmem_audio_pdata.size = pmem_audio_size;
+ android_pmem_pdata.size = pmem_sf_size;
+#endif
+#endif
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+static void __init reserve_memory_for(struct android_pmem_platform_data *p)
+{
+ msm7x30_reserve_table[p->memory_type].size += p->size;
+}
+#endif
+#endif
+
+static void __init reserve_pmem_memory(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+ reserve_memory_for(&android_pmem_adsp_pdata);
+ reserve_memory_for(&android_pmem_audio_pdata);
+ reserve_memory_for(&android_pmem_pdata);
+ msm7x30_reserve_table[MEMTYPE_EBI0].size += pmem_kernel_ebi0_size;
+#endif
+#endif
+}
+
static void __init reserve_mdp_memory(void)
{
mdp_pdata.ov0_wb_size = MSM_FB_OVERLAY0_WRITEBACK_SIZE;
@@ -7233,6 +7308,8 @@ static void __init reserve_ion_memory(void)
static void __init msm7x30_calculate_reserve_sizes(void)
{
fix_sizes();
+ size_pmem_devices();
+ reserve_pmem_memory();
reserve_mdp_memory();
size_ion_devices();
reserve_ion_memory();
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 6e0c862..425ecf9 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -40,6 +40,10 @@
#include <linux/dma-mapping.h>
#include <linux/i2c/bq27520.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+
#if defined(CONFIG_SMB137B_CHARGER) || defined(CONFIG_SMB137B_CHARGER_MODULE)
#include <linux/i2c/smb137b.h>
#endif
@@ -99,7 +103,7 @@
#include "pm-boot.h"
#include "board-storage-common-a.h"
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/ion.h>
#include <mach/msm_rtb.h>
@@ -2821,49 +2825,6 @@ static struct platform_device msm_fb_device = {
.dev.platform_data = &msm_fb_pdata,
};
-#define PMEM_BUS_WIDTH(_bw) \
- { \
- .vectors = &(struct msm_bus_vectors){ \
- .src = MSM_BUS_MASTER_AMPSS_M0, \
- .dst = MSM_BUS_SLAVE_SMI, \
- .ib = (_bw), \
- .ab = 0, \
- }, \
- .num_paths = 1, \
- }
-
-static struct msm_bus_paths mem_smi_table[] = {
- [0] = PMEM_BUS_WIDTH(0), /* Off */
- [1] = PMEM_BUS_WIDTH(1), /* On */
-};
-
-static struct msm_bus_scale_pdata smi_client_pdata = {
- .usecase = mem_smi_table,
- .num_usecases = ARRAY_SIZE(mem_smi_table),
- .name = "mem_smi",
-};
-
-int request_smi_region(void *data)
-{
- int bus_id = (int) data;
-
- msm_bus_scale_client_update_request(bus_id, 1);
- return 0;
-}
-
-int release_smi_region(void *data)
-{
- int bus_id = (int) data;
-
- msm_bus_scale_client_update_request(bus_id, 0);
- return 0;
-}
-
-void *setup_smi_region(void)
-{
- return (void *)msm_bus_scale_register_client(&smi_client_pdata);
-}
-
#ifdef CONFIG_ANDROID_PMEM
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct android_pmem_platform_data android_pmem_pdata = {
@@ -2905,7 +2866,48 @@ static struct platform_device android_pmem_audio_device = {
.dev = { .platform_data = &android_pmem_audio_pdata },
};
#endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/
+#define PMEM_BUS_WIDTH(_bw) \
+ { \
+ .vectors = &(struct msm_bus_vectors){ \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_SMI, \
+ .ib = (_bw), \
+ .ab = 0, \
+ }, \
+ .num_paths = 1, \
+ }
+
+static struct msm_bus_paths mem_smi_table[] = {
+ [0] = PMEM_BUS_WIDTH(0), /* Off */
+ [1] = PMEM_BUS_WIDTH(1), /* On */
+};
+
+static struct msm_bus_scale_pdata smi_client_pdata = {
+ .usecase = mem_smi_table,
+ .num_usecases = ARRAY_SIZE(mem_smi_table),
+ .name = "mem_smi",
+};
+
+int request_smi_region(void *data)
+{
+ int bus_id = (int) data;
+ msm_bus_scale_client_update_request(bus_id, 1);
+ return 0;
+}
+
+int release_smi_region(void *data)
+{
+ int bus_id = (int) data;
+
+ msm_bus_scale_client_update_request(bus_id, 0);
+ return 0;
+}
+
+void *setup_smi_region(void)
+{
+ return (void *)msm_bus_scale_register_client(&smi_client_pdata);
+}
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct android_pmem_platform_data android_pmem_smipool_pdata = {
.name = "pmem_smipool",
@@ -5430,7 +5432,9 @@ static struct ion_co_heap_pdata co_ion_pdata = {
* to each other.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap msm8x60_heaps[] = {
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -5505,11 +5509,7 @@ struct ion_platform_heap msm8x60_heaps[] = {
.extra_data = (void *)&co_ion_pdata,
},
#endif
-};
-
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .heaps = msm8x60_heaps,
+ }
};
static struct platform_device ion_dev = {
@@ -5563,8 +5563,7 @@ static void __init reserve_ion_memory(void)
for (i = 0; i < ion_pdata.nr; i++) {
struct ion_platform_heap *heap = &(ion_pdata.heaps[i]);
- if (heap->extra_data &&
- heap->type == (enum ion_heap_type) ION_HEAP_TYPE_CP) {
+ if (heap->extra_data && heap->type == ION_HEAP_TYPE_CP) {
int map_all = ((struct ion_cp_heap_pdata *)
heap->extra_data)->iommu_map_all;
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index e601bd2..c42875a 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -20,6 +20,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c.h>
+#include <linux/android_pmem.h>
#include <linux/bootmem.h>
#include <linux/mfd/marimba.h>
#include <linux/power_supply.h>
@@ -31,9 +32,7 @@
#include <linux/input/ft5x06_ts.h>
#include <linux/msm_adc.h>
#include <linux/regulator/msm-gpio-regulator.h>
-#include <linux/msm_ion.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
+#include <linux/ion.h>
#include <asm/mach/mmc.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -62,9 +61,8 @@
#include "board-msm7x27a-regulator.h"
#include "board-msm7627a.h"
-#define RESERVE_KERNEL_EBI1_SIZE 0x3A000
-#define MSM_RESERVE_AUDIO_SIZE 0xF0000
-#define BOOTLOADER_BASE_ADDR 0x10000
+#define PMEM_KERNEL_EBI1_SIZE 0x3A000
+#define MSM_PMEM_AUDIO_SIZE 0x1F4000
#define BAHAMA_SLAVE_ID_FM_REG 0x02
#define FM_GPIO 83
#define BT_PCM_BCLK_MODE 0x88
@@ -132,26 +130,17 @@ static struct msm_i2c_platform_data msm_gsbi1_qup_i2c_pdata = {
};
#ifdef CONFIG_ARCH_MSM7X27A
-
-#define MSM_RESERVE_MDP_SIZE 0x2300000
-#define MSM_RESERVE_ADSP_SIZE 0x1200000
+#define MSM_PMEM_MDP_SIZE 0x2300000
+#define MSM_PMEM_ADSP_SIZE 0x1200000
#define CAMERA_ZSL_SIZE (SZ_1M * 60)
#ifdef CONFIG_ION_MSM
-#define MSM_ION_HEAP_NUM 5
+#define MSM_ION_HEAP_NUM 4
static struct platform_device ion_dev;
static int msm_ion_camera_size;
static int msm_ion_audio_size;
static int msm_ion_sf_size;
-static int msm_ion_camera_size_carving;
-#endif
#endif
-
-#define CAMERA_HEAP_BASE 0x0
-#ifdef CONFIG_CMA
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_DMA
-#else
-#define CAMERA_HEAP_TYPE ION_HEAP_TYPE_CARVEOUT
#endif
static struct android_usb_platform_data android_usb_pdata = {
@@ -337,8 +326,7 @@ static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = {
};
/* 8625 PM platform data */
-static struct msm_pm_platform_data
- msm8625_pm_data[MSM_PM_SLEEP_MODE_NR * CONFIG_NR_CPUS] = {
+static struct msm_pm_platform_data msm8625_pm_data[MSM_PM_SLEEP_MODE_NR * 2] = {
/* CORE0 entries */
[MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
.idle_supported = 1,
@@ -365,7 +353,7 @@ static struct msm_pm_platform_data
.idle_enabled = 0,
.suspend_enabled = 0,
.latency = 500,
- .residency = 500,
+ .residency = 6000,
},
[MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
@@ -384,7 +372,7 @@ static struct msm_pm_platform_data
.idle_enabled = 0,
.suspend_enabled = 0,
.latency = 500,
- .residency = 500,
+ .residency = 6000,
},
[MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
@@ -396,44 +384,6 @@ static struct msm_pm_platform_data
.residency = 10,
},
- /* picked latency & redisdency values from 7x30 */
- [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 0,
- .suspend_enabled = 0,
- .latency = 500,
- .residency = 500,
- },
-
- [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 1,
- .suspend_enabled = 1,
- .latency = 2,
- .residency = 10,
- },
-
- /* picked latency & redisdency values from 7x30 */
- [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 0,
- .suspend_enabled = 0,
- .latency = 500,
- .residency = 500,
- },
-
- [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
- .idle_supported = 1,
- .suspend_supported = 1,
- .idle_enabled = 1,
- .suspend_enabled = 1,
- .latency = 2,
- .residency = 10,
- },
-
};
static struct msm_pm_boot_platform_data msm_pm_8625_boot_pdata __initdata = {
@@ -441,23 +391,61 @@ static struct msm_pm_boot_platform_data msm_pm_8625_boot_pdata __initdata = {
.v_addr = MSM_CFG_CTL_BASE,
};
-static unsigned reserve_mdp_size = MSM_RESERVE_MDP_SIZE;
-static int __init reserve_mdp_size_setup(char *p)
+static struct android_pmem_platform_data android_pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 1,
+ .memory_type = MEMTYPE_EBI1,
+};
+
+static struct platform_device android_pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 1,
+ .dev = { .platform_data = &android_pmem_adsp_pdata },
+};
+
+static unsigned pmem_mdp_size = MSM_PMEM_MDP_SIZE;
+static int __init pmem_mdp_size_setup(char *p)
{
- reserve_mdp_size = memparse(p, NULL);
+ pmem_mdp_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_mdp_size", reserve_mdp_size_setup);
+early_param("pmem_mdp_size", pmem_mdp_size_setup);
-static unsigned reserve_adsp_size = MSM_RESERVE_ADSP_SIZE;
-static int __init reserve_adsp_size_setup(char *p)
+static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
+static int __init pmem_adsp_size_setup(char *p)
{
- reserve_adsp_size = memparse(p, NULL);
+ pmem_adsp_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_adsp_size", reserve_adsp_size_setup);
+early_param("pmem_adsp_size", pmem_adsp_size_setup);
+
+static struct android_pmem_platform_data android_pmem_audio_pdata = {
+ .name = "pmem_audio",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 0,
+ .memory_type = MEMTYPE_EBI1,
+};
+
+static struct platform_device android_pmem_audio_device = {
+ .name = "android_pmem",
+ .id = 2,
+ .dev = { .platform_data = &android_pmem_audio_pdata },
+};
+
+static struct android_pmem_platform_data android_pmem_pdata = {
+ .name = "pmem",
+ .allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
+ .cached = 1,
+ .memory_type = MEMTYPE_EBI1,
+};
+static struct platform_device android_pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &android_pmem_pdata },
+};
static u32 msm_calculate_batt_capacity(u32 current_voltage);
@@ -568,6 +556,9 @@ static struct platform_device qrd_vreg_gpio_ext_1p8v __devinitdata = {
static struct platform_device *common_devices[] __initdata = {
&android_usb_device,
+ &android_pmem_device,
+ &android_pmem_adsp_device,
+ &android_pmem_audio_device,
&msm_batt_device,
&msm_device_adspdec,
&msm_device_snd,
@@ -613,39 +604,30 @@ static struct platform_device *msm8625_evb_devices[] __initdata = {
&qrd_vreg_gpio_ext_1p8v,
};
-static unsigned reserve_kernel_ebi1_size = RESERVE_KERNEL_EBI1_SIZE;
-static int __init reserve_kernel_ebi1_size_setup(char *p)
+static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE;
+static int __init pmem_kernel_ebi1_size_setup(char *p)
{
- reserve_kernel_ebi1_size = memparse(p, NULL);
+ pmem_kernel_ebi1_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_kernel_ebi1_size", reserve_kernel_ebi1_size_setup);
+early_param("pmem_kernel_ebi1_size", pmem_kernel_ebi1_size_setup);
-static unsigned reserve_audio_size = MSM_RESERVE_AUDIO_SIZE;
-static int __init reserve_audio_size_setup(char *p)
+static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE;
+static int __init pmem_audio_size_setup(char *p)
{
- reserve_audio_size = memparse(p, NULL);
+ pmem_audio_size = memparse(p, NULL);
return 0;
}
-early_param("reserve_audio_size", reserve_audio_size_setup);
+early_param("pmem_audio_size", pmem_audio_size_setup);
static void fix_sizes(void)
{
if (get_ddr_size() > SZ_512M)
- reserve_adsp_size = CAMERA_ZSL_SIZE;
-
+ pmem_adsp_size = CAMERA_ZSL_SIZE;
#ifdef CONFIG_ION_MSM
- msm_ion_audio_size = MSM_RESERVE_AUDIO_SIZE;
- msm_ion_sf_size = reserve_mdp_size;
-#ifdef CONFIG_CMA
- if (get_ddr_size() > SZ_256M)
- reserve_adsp_size = CAMERA_ZSL_SIZE;
- msm_ion_camera_size = reserve_adsp_size;
- msm_ion_camera_size_carving = 0;
-#else
- msm_ion_camera_size = reserve_adsp_size;
- msm_ion_camera_size_carving = msm_ion_camera_size;
-#endif
+ msm_ion_camera_size = pmem_adsp_size;
+ msm_ion_audio_size = (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE);
+ msm_ion_sf_size = pmem_mdp_size;
#endif
}
@@ -655,45 +637,31 @@ static struct ion_co_heap_pdata co_ion_pdata = {
.adjacent_mem_id = INVALID_HEAP_ID,
.align = PAGE_SIZE,
};
-
-static struct ion_co_heap_pdata co_mm_ion_pdata = {
- .adjacent_mem_id = INVALID_HEAP_ID,
- .align = PAGE_SIZE,
-};
-
-static u64 msm_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device ion_cma_device = {
- .name = "ion-cma-device",
- .id = -1,
- .dev = {
- .dma_mask = &msm_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
#endif
/**
* These heaps are listed in the order they will be allocated.
* Don't swap the order unless you know what you are doing!
*/
-struct ion_platform_heap qrd7627a_heaps[] = {
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
+ .heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
.type = ION_HEAP_TYPE_SYSTEM,
.name = ION_VMALLOC_HEAP_NAME,
},
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- /* ION_ADSP = CAMERA */
+ /* PMEM_ADSP = CAMERA */
{
.id = ION_CAMERA_HEAP_ID,
- .type = CAMERA_HEAP_TYPE,
+ .type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_CAMERA_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .extra_data = (void *)&co_mm_ion_pdata,
- .priv = (void *)&ion_cma_device.dev,
+ .extra_data = (void *)&co_ion_pdata,
},
- /* AUDIO HEAP 1*/
+ /* PMEM_AUDIO */
{
.id = ION_AUDIO_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
@@ -701,7 +669,7 @@ struct ion_platform_heap qrd7627a_heaps[] = {
.memory_type = ION_EBI_TYPE,
.extra_data = (void *)&co_ion_pdata,
},
- /* ION_MDP = SF */
+ /* PMEM_MDP = SF */
{
.id = ION_SF_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
@@ -709,23 +677,8 @@ struct ion_platform_heap qrd7627a_heaps[] = {
.memory_type = ION_EBI_TYPE,
.extra_data = (void *)&co_ion_pdata,
},
- /* AUDIO HEAP 2*/
- {
- .id = ION_AUDIO_HEAP_BL_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_AUDIO_BL_HEAP_NAME,
- .memory_type = ION_EBI_TYPE,
- .extra_data = (void *)&co_ion_pdata,
- .base = BOOTLOADER_BASE_ADDR,
- },
-
#endif
-};
-
-static struct ion_platform_data ion_pdata = {
- .nr = MSM_ION_HEAP_NUM,
- .has_outer_cache = 1,
- .heaps = qrd7627a_heaps,
+ }
};
static struct platform_device ion_dev = {
@@ -746,22 +699,63 @@ static struct memtype_reserve msm7627a_reserve_table[] __initdata = {
},
};
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
+ &android_pmem_adsp_pdata,
+ &android_pmem_audio_pdata,
+ &android_pmem_pdata,
+};
+#endif
+#endif
+
+static void __init size_pmem_devices(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+ android_pmem_adsp_pdata.size = pmem_adsp_size;
+ android_pmem_pdata.size = pmem_mdp_size;
+ android_pmem_audio_pdata.size = pmem_audio_size;
+#endif
+#endif
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+static void __init reserve_memory_for(struct android_pmem_platform_data *p)
+{
+ msm7627a_reserve_table[p->memory_type].size += p->size;
+}
+#endif
+#endif
+
+static void __init reserve_pmem_memory(void)
+{
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
+ reserve_memory_for(pmem_pdata_array[i]);
+
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
+#endif
+#endif
+}
+
static void __init size_ion_devices(void)
{
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
ion_pdata.heaps[1].size = msm_ion_camera_size;
- ion_pdata.heaps[2].size = RESERVE_KERNEL_EBI1_SIZE;
+ ion_pdata.heaps[2].size = msm_ion_audio_size;
ion_pdata.heaps[3].size = msm_ion_sf_size;
- ion_pdata.heaps[4].size = msm_ion_audio_size;
#endif
}
static void __init reserve_ion_memory(void)
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
- msm7627a_reserve_table[MEMTYPE_EBI1].size += RESERVE_KERNEL_EBI1_SIZE;
- msm7627a_reserve_table[MEMTYPE_EBI1].size +=
- msm_ion_camera_size_carving;
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += msm_ion_camera_size;
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += msm_ion_audio_size;
msm7627a_reserve_table[MEMTYPE_EBI1].size += msm_ion_sf_size;
#endif
}
@@ -769,6 +763,8 @@ static void __init reserve_ion_memory(void)
static void __init msm7627a_calculate_reserve_sizes(void)
{
fix_sizes();
+ size_pmem_devices();
+ reserve_pmem_memory();
size_ion_devices();
reserve_ion_memory();
}
@@ -787,22 +783,14 @@ static struct reserve_info msm7627a_reserve_info __initdata = {
static void __init msm7627a_reserve(void)
{
reserve_info = &msm7627a_reserve_info;
- memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K);
- memblock_remove(BOOTLOADER_BASE_ADDR, msm_ion_audio_size);
msm_reserve();
-#ifdef CONFIG_CMA
- dma_declare_contiguous(
- &ion_cma_device.dev,
- msm_ion_camera_size,
- CAMERA_HEAP_BASE,
- 0x26000000);
-#endif
+ memblock_remove(MSM8625_WARM_BOOT_PHYS, SZ_32);
}
static void __init msm8625_reserve(void)
{
+ memblock_remove(MSM8625_SECONDARY_PHYS, SZ_8);
memblock_remove(MSM8625_NON_CACHE_MEM, SZ_2K);
- memblock_remove(MSM8625_CPU_PHYS, SZ_8);
msm7627a_reserve();
}
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index d0fba70..aa67690 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -33,7 +33,7 @@
#include <mach/msm_smd.h>
#include <mach/msm_dcvs.h>
#include <mach/msm_rtb.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include "clock.h"
#include "devices.h"
#include "footswitch.h"
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 8d4307c..ffa3c38 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/io.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_iomap.h>
#include <mach/irqs-8930.h>
#include <mach/rpm.h>
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 77d744b..7d93fe7 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -15,11 +15,12 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/msm_rotator.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/gpio.h>
#include <linux/coresight.h>
#include <asm/clkdev.h>
#include <linux/msm_kgsl.h>
+#include <linux/android_pmem.h>
#include <mach/irqs-8960.h>
#include <mach/dma.h>
#include <linux/dma-mapping.h>
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index f2520b7..1432902 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -365,25 +365,25 @@ static struct msm_iommu_dev vcodec_b_iommu = {
static struct msm_iommu_dev gfx3d_iommu = {
.name = "gfx3d",
.ncb = 3,
- .ttbr_split = 1,
+ .ttbr_split = 2,
};
static struct msm_iommu_dev gfx3d1_iommu = {
.name = "gfx3d1",
.ncb = 3,
- .ttbr_split = 1,
+ .ttbr_split = 2,
};
static struct msm_iommu_dev gfx2d0_iommu = {
.name = "gfx2d0",
.ncb = 2,
- .ttbr_split = 1,
+ .ttbr_split = 2,
};
static struct msm_iommu_dev gfx2d1_iommu = {
.name = "gfx2d1",
.ncb = 2,
- .ttbr_split = 1,
+ .ttbr_split = 2,
};
static struct msm_iommu_dev vcap_iommu = {
diff --git a/arch/arm/mach-msm/devices-msm7x27.c b/arch/arm/mach-msm/devices-msm7x27.c
index 69d7430..4619cca 100644
--- a/arch/arm/mach-msm/devices-msm7x27.c
+++ b/arch/arm/mach-msm/devices-msm7x27.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <mach/kgsl.h>
+#include <linux/msm_kgsl.h>
#include <linux/regulator/machine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 9d896ad..a3fe520 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <mach/kgsl.h>
+#include <linux/msm_kgsl.h>
#include <linux/regulator/machine.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -882,14 +882,6 @@ static struct resource kgsl_3d0_resources[] = {
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
- .gpu_freq = 300000000,
- .bus_freq = 200000000,
- },
- {
- .gpu_freq = 266000000,
- .bus_freq = 200000000,
- },
- {
.gpu_freq = 245760000,
.bus_freq = 200000000,
},
@@ -898,12 +890,12 @@ static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.bus_freq = 160000000,
},
{
- .gpu_freq = 192000000,
+ .gpu_freq = 133330000,
.bus_freq = 0,
},
},
.init_level = 0,
- .num_levels = 5,
+ .num_levels = 3,
.set_grp_async = set_grp_xbar_async,
.idle_timeout = HZ,
.strtstp_sleepwake = true,
@@ -937,12 +929,19 @@ void __init msm8x25_kgsl_3d0_init(void)
if (cpu_is_msm8625()) {
kgsl_3d0_pdata.idle_timeout = HZ/5;
kgsl_3d0_pdata.strtstp_sleepwake = false;
-
- if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2) {
- /* 8x25 v2.0 & above supports a higher GPU frequency */
- kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 320000000;
- kgsl_3d0_pdata.pwrlevel[0].bus_freq = 200000000;
- }
+ kgsl_3d0_pdata.num_levels = 4;
+
+ kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 320000000;
+ kgsl_3d0_pdata.pwrlevel[0].bus_freq = 200000000;
+
+ kgsl_3d0_pdata.pwrlevel[1].gpu_freq = 245760000;
+ kgsl_3d0_pdata.pwrlevel[1].bus_freq = 200000000;
+
+ kgsl_3d0_pdata.pwrlevel[2].gpu_freq = 192000000;
+ kgsl_3d0_pdata.pwrlevel[2].bus_freq = 160000000;
+
+ kgsl_3d0_pdata.pwrlevel[3].gpu_freq = 96000000;
+ kgsl_3d0_pdata.pwrlevel[3].bus_freq = 0;
}
}
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 3b45775..d52e190 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -18,6 +18,7 @@
#include <linux/msm_rotator.h>
#include <linux/dma-mapping.h>
#include <linux/msm_kgsl.h>
+#include <linux/android_pmem.h>
#include <linux/regulator/machine.h>
#include <linux/init.h>
#include <mach/irqs.h>
@@ -25,7 +26,7 @@
#include <mach/dma.h>
#include <mach/board.h>
#include <asm/clkdev.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include "devices.h"
#include "footswitch.h"
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index dbb3d41..98097a0 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/consumer.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/irqs.h>
#include <mach/dma.h>
#include <asm/mach/mmc.h>
@@ -45,6 +45,7 @@
#ifdef CONFIG_MSM_DSPS
#include <mach/msm_dsps.h>
#endif
+#include <linux/android_pmem.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <mach/mdm.h>
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 44cedc9..0e9e644 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -379,6 +379,12 @@ extern struct platform_device msm_dsps_device_8064;
extern struct platform_device *msm_8974_stub_regulator_devices[];
extern int msm_8974_stub_regulator_devices_len;
+extern struct platform_device msm8960_cpu_idle_device;
+extern struct platform_device msm8930_cpu_idle_device;
+extern struct platform_device apq8064_cpu_idle_device;
+
+extern struct platform_device msm8960_msm_gov_device;
+extern struct platform_device msm8930_msm_gov_device;
extern struct platform_device apq8064_msm_gov_device;
extern struct platform_device msm_bus_8930_apps_fabric;
diff --git a/arch/arm/mach-msm/early_random.c b/arch/arm/mach-msm/early_random.c
deleted file mode 100644
index e315b86..0000000
--- a/arch/arm/mach-msm/early_random.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-
-#include <mach/scm.h>
-
-#include <asm/io.h>
-#include <asm/cacheflush.h>
-
-#define TZ_SVC_CRYPTO 10
-#define PRNG_CMD_ID 0x01
-
-static int use_arch_random = 1;
-struct tz_prng_data {
- uint8_t *out_buf;
- uint32_t out_buf_sz;
-} __packed;
-
-DEFINE_SCM_BUFFER(common_scm_buf)
-DEFINE_MUTEX(arch_random_lock);
-#define RANDOM_BUFFER_SIZE PAGE_SIZE
-char random_buffer[RANDOM_BUFFER_SIZE] __aligned(PAGE_SIZE);
-
-int arch_get_random_common(void *v, size_t size)
-{
- struct tz_prng_data data;
- int ret;
- u32 resp;
-
- if (!use_arch_random)
- return 0;
-
- if (size > sizeof(random_buffer))
- return 0;
-
- mutex_lock(&arch_random_lock);
- data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
- data.out_buf_sz = size;
-
- ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
- sizeof(data), &resp, sizeof(resp),
- common_scm_buf, SCM_BUFFER_SIZE(common_scm_buf));
- if (!ret) {
- dmac_inv_range(random_buffer, random_buffer +
- RANDOM_BUFFER_SIZE);
- outer_inv_range(
- (unsigned long) virt_to_phys(random_buffer),
- (unsigned long) virt_to_phys(random_buffer) +
- RANDOM_BUFFER_SIZE);
- memcpy(v, random_buffer, size);
- }
- mutex_unlock(&arch_random_lock);
- return !ret;
-}
-
-int arch_get_random_long(unsigned long *v)
-{
- return arch_get_random_common(v, sizeof(unsigned long));
-}
-
-int arch_get_random_int(unsigned int *v)
-{
- return arch_get_random_common(v, sizeof(unsigned int));
-}
-
-int arch_random_init(void)
-{
- use_arch_random = 0;
-
- return 0;
-}
-module_init(arch_random_init);
diff --git a/arch/arm/mach-msm/hardware_self_adapt.c b/arch/arm/mach-msm/hardware_self_adapt.c
index 7d84ebc..2428ef3 100644
--- a/arch/arm/mach-msm/hardware_self_adapt.c
+++ b/arch/arm/mach-msm/hardware_self_adapt.c
@@ -808,6 +808,41 @@ unsigned int get_framebuffer_size(void)
}
+unsigned int get_mdp_pmem_size(void)
+{
+ unsigned int mdp_pmem_size = 0;
+ lcd_type lcd_resolution = LCD_IS_HVGA;
+ lcd_resolution = get_hw_lcd_resolution_type();
+ switch(lcd_resolution)
+ {
+ case LCD_IS_QVGA:
+ case LCD_IS_HVGA:
+ mdp_pmem_size = 0x1500000; //21M
+ break;
+ case LCD_IS_WVGA:
+ case LCD_IS_FWVGA:
+#ifdef CONFIG_CHIL360_RAM_STOCK
+ mdp_pmem_size = 0x1C00000; //28M
+#elif defined(CONFIG_CHIL360_RAM_MEDIUM)
+ mdp_pmem_size = 0x1A00000; //26M
+#elif defined(CONFIG_CHIL360_RAM_HIGH)
+ mdp_pmem_size = 0x1400000; //20M
+#elif defined(CONFIG_CHIL360_RAM_EXTRA_HIGH)
+// mdp_pmem_size = 0xB00000; //11M
+ mdp_pmem_size = 0x800000; //8mb
+#endif
+ break;
+ case LCD_IS_QHD:
+ mdp_pmem_size = 0x2300000; //35M
+ break;
+ default:
+ mdp_pmem_size = 0x2300000; //35M
+ break;
+ }
+
+ return mdp_pmem_size;
+}
+
/*===========================================================================
FUNCTION get_vibrator_voltage
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 566b59a..4e3afd4 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -449,10 +449,7 @@ struct msm_panel_common_pdata {
u32 ov1_wb_size; /* overlay1 writeback size */
u32 mem_hid;
char cont_splash_enabled;
- u32 splash_screen_addr;
- u32 splash_screen_size;
char mdp_iommu_split_domain;
- u32 avtimer_phy;
};
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index e929189..4bfbe61 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -35,9 +35,6 @@ extern struct platform_device *msm_iommu_root_dev;
*/
#define MAX_NUM_MIDS 32
-/* Maximum number of SMT entries allowed by the system */
-#define MAX_NUM_SMR 128
-
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
* name Human-readable name given to this IOMMU HW instance
@@ -72,10 +69,6 @@ struct msm_iommu_ctx_dev {
* @irq: Interrupt number
* @clk: The bus clock for this IOMMU hardware instance
* @pclk: The clock for the IOMMU bus interconnect
- * @aclk: Alternate clock for this IOMMU core, if any
- * @name: Human-readable name of this IOMMU device
- * @gdsc: Regulator needed to power this HW block (v2 only)
- * @nsmr: Size of the SMT on this HW block (v2 only)
*
* A msm_iommu_drvdata holds the global driver data about a single piece
* of an IOMMU hardware instance.
@@ -86,10 +79,8 @@ struct msm_iommu_drvdata {
int ttbr_split;
struct clk *clk;
struct clk *pclk;
- struct clk *aclk;
const char *name;
struct regulator *gdsc;
- unsigned int nsmr;
};
/**
@@ -98,10 +89,6 @@ struct msm_iommu_drvdata {
* @pdev: Platform device associated wit this HW instance
* @attached_elm: List element for domains to track which devices are
* attached to them
- * @attached_domain Domain currently attached to this context (if any)
- * @name Human-readable name of this context device
- * @sids List of Stream IDs mapped to this context (v2 only)
- * @nsid Number of Stream IDs mapped to this context (v2 only)
*
* A msm_iommu_ctx_drvdata holds the driver data for a single context bank
* within each IOMMU hardware instance
@@ -112,8 +99,6 @@ struct msm_iommu_ctx_drvdata {
struct list_head attached_elm;
struct iommu_domain *attached_domain;
const char *name;
- u32 sids[MAX_NUM_SMR];
- unsigned int nsid;
};
/*
@@ -124,59 +109,6 @@ struct msm_iommu_ctx_drvdata {
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
-enum {
- PROC_APPS,
- PROC_GPU,
- PROC_MAX
-};
-
-/* Expose structure to allow kgsl iommu driver to use the same structure to
- * communicate to GPU the addresses of the flag and turn variables.
- */
-struct remote_iommu_petersons_spinlock {
- uint32_t flag[PROC_MAX];
- uint32_t turn;
-};
-
-#ifdef CONFIG_MSM_IOMMU
-void *msm_iommu_lock_initialize(void);
-void msm_iommu_mutex_lock(void);
-void msm_iommu_mutex_unlock(void);
-#else
-static inline void *msm_iommu_lock_initialize(void)
-{
- return NULL;
-}
-static inline void msm_iommu_mutex_lock(void) { }
-static inline void msm_iommu_mutex_unlock(void) { }
-#endif
-
-#ifdef CONFIG_MSM_IOMMU_GPU_SYNC
-void msm_iommu_remote_p0_spin_lock(void);
-void msm_iommu_remote_p0_spin_unlock(void);
-
-#define msm_iommu_remote_lock_init() _msm_iommu_remote_spin_lock_init()
-#define msm_iommu_remote_spin_lock() msm_iommu_remote_p0_spin_lock()
-#define msm_iommu_remote_spin_unlock() msm_iommu_remote_p0_spin_unlock()
-#else
-#define msm_iommu_remote_lock_init()
-#define msm_iommu_remote_spin_lock()
-#define msm_iommu_remote_spin_unlock()
-#endif
-
-/* Allows kgsl iommu driver to acquire lock */
-#define msm_iommu_lock() \
- do { \
- msm_iommu_mutex_lock(); \
- msm_iommu_remote_spin_lock(); \
- } while (0)
-
-#define msm_iommu_unlock() \
- do { \
- msm_iommu_remote_spin_unlock(); \
- msm_iommu_mutex_unlock(); \
- } while (0)
-
#ifdef CONFIG_MSM_IOMMU
/*
* Look up an IOMMU context device by its context name. NULL if none found.
@@ -191,6 +123,7 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
}
#endif
+#endif
static inline int msm_soc_version_supports_iommu_v1(void)
{
@@ -214,4 +147,3 @@ static inline int msm_soc_version_supports_iommu_v1(void)
}
return 1;
}
-#endif
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
deleted file mode 100644
index a22b628..0000000
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ARCH_ARM_MACH_KGSL_H
-#define _ARCH_ARM_MACH_KGSL_H
-
-/* Clock flags to show which clocks should be controled by a given platform */
-#define KGSL_CLK_SRC 0x00000001
-#define KGSL_CLK_CORE 0x00000002
-#define KGSL_CLK_IFACE 0x00000004
-#define KGSL_CLK_MEM 0x00000008
-#define KGSL_CLK_MEM_IFACE 0x00000010
-#define KGSL_CLK_AXI 0x00000020
-
-#define KGSL_MAX_PWRLEVELS 5
-
-#define KGSL_CONVERT_TO_MBPS(val) \
- (val*1000*1000U)
-
-#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory"
-#define KGSL_3D0_IRQ "kgsl_3d0_irq"
-#define KGSL_2D0_REG_MEMORY "kgsl_2d0_reg_memory"
-#define KGSL_2D0_IRQ "kgsl_2d0_irq"
-#define KGSL_2D1_REG_MEMORY "kgsl_2d1_reg_memory"
-#define KGSL_2D1_IRQ "kgsl_2d1_irq"
-
-#define ADRENO_CHIPID(_co, _ma, _mi, _pa) \
- ((((_co) & 0xFF) << 24) | \
- (((_ma) & 0xFF) << 16) | \
- (((_mi) & 0xFF) << 8) | \
- ((_pa) & 0xFF))
-
-enum kgsl_iommu_context_id {
- KGSL_IOMMU_CONTEXT_USER = 0,
- KGSL_IOMMU_CONTEXT_PRIV = 1,
-};
-
-struct kgsl_iommu_ctx {
- const char *iommu_ctx_name;
- enum kgsl_iommu_context_id ctx_id;
-};
-
-struct kgsl_device_iommu_data {
- const struct kgsl_iommu_ctx *iommu_ctxs;
- int iommu_ctx_count;
- unsigned int physstart;
- unsigned int physend;
-};
-
-struct kgsl_pwrlevel {
- unsigned int gpu_freq;
- unsigned int bus_freq;
- unsigned int io_fraction;
-};
-
-struct kgsl_device_platform_data {
- struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS];
- int init_level;
- int num_levels;
- int (*set_grp_async)(void);
- unsigned int idle_timeout;
- bool strtstp_sleepwake;
- unsigned int nap_allowed;
- unsigned int clk_map;
- unsigned int idle_needed;
- struct msm_bus_scale_pdata *bus_scale_table;
- struct kgsl_device_iommu_data *iommu_data;
- int iommu_count;
- struct msm_dcvs_core_info *core_info;
- unsigned int chipid;
-};
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index d1e8c75..0c456a7 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -84,7 +84,6 @@ int platform_physical_remove_pages(u64, u64);
int platform_physical_active_pages(u64, u64);
int platform_physical_low_power_pages(u64, u64);
unsigned long get_ddr_size(void);
-int msm_get_memory_type_from_name(const char *memtype_name);
extern int (*change_memory_power)(u64, u64, int);
@@ -125,23 +124,6 @@ void find_membank0_hole(void);
(virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
#endif
-/*
- * Need a temporary unique variable that no one will ever see to
- * hold the compat string. Line number gives this easily.
- * Need another layer of indirection to get __LINE__ to expand
- * properly as opposed to appending and ending up with
- * __compat___LINE__
- */
-#define __CONCAT(a, b) ___CONCAT(a, b)
-#define ___CONCAT(a, b) a ## b
-
-#define EXPORT_COMPAT(com) \
-static char *__CONCAT(__compat_, __LINE__) __used \
- __attribute((__section__(".exportcompat.init"))) = com
-
-extern char *__compat_exports_start[];
-extern char *__compat_exports_end[];
-
#endif
#if defined CONFIG_ARCH_MSM_SCORPION || defined CONFIG_ARCH_MSM_KRAIT
@@ -159,5 +141,4 @@ extern char *__compat_exports_end[];
#ifndef CONFIG_ARCH_MSM7X27
#define CONSISTENT_DMA_SIZE (SZ_1M * 14)
-
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_bus.h b/arch/arm/mach-msm/include/mach/msm_bus.h
index 6b94a43..6d7a533 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/input.h>
-#include <linux/platform_device.h>
/*
* Macros for clients to convert their data to ib and ab
@@ -44,8 +43,8 @@
struct msm_bus_vectors {
int src; /* Master */
int dst; /* Slave */
- uint64_t ab; /* Arbitrated bandwidth */
- uint64_t ib; /* Instantaneous bandwidth */
+ unsigned int ab; /* Arbitrated bandwidth */
+ unsigned int ib; /* Instantaneous bandwidth */
};
struct msm_bus_paths {
@@ -78,24 +77,11 @@ struct msm_bus_scale_pdata {
uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
void msm_bus_scale_unregister_client(uint32_t cl);
-struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
-void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
/* AXI Port configuration APIs */
int msm_bus_axi_porthalt(int master_port);
int msm_bus_axi_portunhalt(int master_port);
#else
-static inline struct msm_bus_scale_pdata
-*msm_bus_cl_get_pdata(struct platform_device *pdev)
-{
- return NULL;
-}
-
-static inline void
-msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
-{
-}
-
static inline uint32_t
msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
{
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs.h b/arch/arm/mach-msm/include/mach/msm_dcvs.h
index db59d42..fa7e6f0 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,12 +18,6 @@
#define CORE_NAME_MAX (32)
#define CORES_MAX (10)
-#define CPU_OFFSET 1 /* used to notify TZ the core number */
-#define GPU_OFFSET (CORES_MAX * 2/3) /* there will be more cpus than gpus,
- * let the GPU be assigned fewer core
- * elements and start later
- */
-
enum msm_core_idle_state {
MSM_DCVS_IDLE_ENTER,
MSM_DCVS_IDLE_EXIT,
@@ -36,14 +30,43 @@ enum msm_core_control_event {
MSM_DCVS_DISABLE_HIGH_LATENCY_MODES,
};
-struct msm_gov_platform_data {
- struct msm_dcvs_core_info *info;
- int latency;
+/**
+ * struct msm_dcvs_idle
+ *
+ * API for idle code to register and send idle enter/exit
+ * notifications to msm_dcvs driver.
+ */
+struct msm_dcvs_idle {
+ const char *core_name;
+ /* Enable/Disable idle state/notifications */
+ int (*enable)(struct msm_dcvs_idle *self,
+ enum msm_core_control_event event);
};
/**
+ * msm_dcvs_idle_source_register
+ * @drv: Pointer to the source driver
+ * @return: Handle to be used for sending idle state notifications.
+ *
+ * Register the idle driver with the msm_dcvs driver to send idle
+ * state notifications for the core.
+ */
+extern int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv);
+
+/**
+ * msm_dcvs_idle_source_unregister
+ * @drv: Pointer to the source driver
+ * @return:
+ * 0 on success
+ * -EINVAL
+ *
+ * Description: Unregister the idle driver with the msm_dcvs driver
+ */
+extern int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv);
+
+/**
* msm_dcvs_idle
- * @dcvs_core_id: The id returned by msm_dcvs_register_core
+ * @handle: Handle provided back at registration
* @state: The enter/exit idle state the core is in
* @iowaited: iowait in us
* on iMSM_DCVS_IDLE_EXIT.
@@ -55,7 +78,7 @@ struct msm_gov_platform_data {
*
* Send idle state notifications to the msm_dcvs driver
*/
-int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
+int msm_dcvs_idle(int handle, enum msm_core_idle_state state,
uint32_t iowaited);
/**
@@ -65,21 +88,16 @@ int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
* before the sink driver can be registered.
*/
struct msm_dcvs_core_info {
- int num_cores;
- int *sensors;
- struct msm_dcvs_freq_entry *freq_tbl;
- struct msm_dcvs_core_param core_param;
- struct msm_dcvs_algo_param algo_param;
- struct msm_dcvs_energy_curve_coeffs energy_coeffs;
- struct msm_dcvs_power_params power_param;
+ struct msm_dcvs_freq_entry *freq_tbl;
+ struct msm_dcvs_core_param core_param;
+ struct msm_dcvs_algo_param algo_param;
};
/**
* msm_dcvs_register_core
- * @type: whether this is a CPU or a GPU
- * @type_core_num: The number of the core for a type
+ * @core_name: Unique name identifier for the core.
+ * @group_id: Cores that are to be grouped for synchronized frequency scaling
* @info: The core specific algorithm parameters.
- * @sensor: The thermal sensor number of the core in question
* @return :
* 0 on success,
* -ENOSYS,
@@ -88,30 +106,37 @@ struct msm_dcvs_core_info {
* Register the core with msm_dcvs driver. Done once at init before calling
* msm_dcvs_freq_sink_register
* Cores that need to run synchronously must share the same group id.
+ * If a core doesnt care to be in any group, the group_id should be 0.
+ */
+extern int msm_dcvs_register_core(const char *core_name, uint32_t group_id,
+ struct msm_dcvs_core_info *info);
+
+/**
+ * struct msm_dcvs_freq
+ *
+ * API for clock driver code to register and receive frequency change
+ * request for the core from the msm_dcvs driver.
*/
-extern int msm_dcvs_register_core(
- enum msm_dcvs_core_type type,
- int type_core_num,
- struct msm_dcvs_core_info *info,
- int (*set_frequency)(int type_core_num, unsigned int freq),
- unsigned int (*get_frequency)(int type_core_num),
- int (*idle_enable)(int type_core_num,
- enum msm_core_control_event event),
- int (*set_floor_frequency)(int type_core_num, unsigned int freq),
- int sensor);
+struct msm_dcvs_freq {
+ const char *core_name;
+ /* Callback from msm_dcvs to set the core frequency */
+ int (*set_frequency)(struct msm_dcvs_freq *self,
+ unsigned int freq);
+ unsigned int (*get_frequency)(struct msm_dcvs_freq *self);
+};
/**
- * msm_dcvs_freq_sink_start
+ * msm_dcvs_freq_sink_register
* @drv: The sink driver
* @return: Handle unique to the core.
*
* Register the clock driver code with the msm_dvs driver to get notified about
* frequency change requests.
*/
-extern int msm_dcvs_freq_sink_start(int dcvs_core_id);
+extern int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv);
/**
- * msm_dcvs_freq_sink_stop
+ * msm_dcvs_freq_sink_unregister
* @drv: The sink driver
* @return:
* 0 on success,
@@ -120,13 +145,6 @@ extern int msm_dcvs_freq_sink_start(int dcvs_core_id);
* Unregister the sink driver for the core. This will cause the source driver
* for the core to stop sending idle pulses.
*/
-extern int msm_dcvs_freq_sink_stop(int dcvs_core_id);
+extern int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv);
-/**
- * msm_dcvs_update_limits
- * @drv: The sink driver
- *
- * Update the frequency known to dcvs when the limits are changed.
- */
-extern void msm_dcvs_update_limits(int dcvs_core_id);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h b/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
index 7eefd54..3cc2595 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,87 +13,38 @@
#ifndef _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H
#define _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H
-enum msm_dcvs_core_type {
- MSM_DCVS_CORE_TYPE_CPU = 0,
- MSM_DCVS_CORE_TYPE_GPU = 1,
-};
-
-enum msm_dcvs_algo_param_type {
- MSM_DCVS_ALGO_DCVS_PARAM = 0,
- MSM_DCVS_ALGO_MPD_PARAM = 1,
-};
-
enum msm_dcvs_scm_event {
- MSM_DCVS_SCM_IDLE_ENTER = 0, /* Core enters idle */
- MSM_DCVS_SCM_IDLE_EXIT = 1, /* Core exits idle */
- MSM_DCVS_SCM_QOS_TIMER_EXPIRED = 2, /* Core slack timer expired */
- MSM_DCVS_SCM_CLOCK_FREQ_UPDATE = 3, /* Core freq change complete */
- MSM_DCVS_SCM_CORE_ONLINE = 4, /* Core is online */
- MSM_DCVS_SCM_CORE_OFFLINE = 5, /* Core is offline */
- MSM_DCVS_SCM_CORE_UNAVAILABLE = 6, /* Core is offline + unavailable */
- MSM_DCVS_SCM_DCVS_ENABLE = 7, /* DCVS is enabled/disabled for core */
- MSM_DCVS_SCM_MPD_ENABLE = 8, /* Enable/disable MP Decision */
- MSM_DCVS_SCM_RUNQ_UPDATE = 9, /* Update running threads */
- MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED = 10, /* MPDecision slack timer */
+ MSM_DCVS_SCM_IDLE_ENTER,
+ MSM_DCVS_SCM_IDLE_EXIT,
+ MSM_DCVS_SCM_QOS_TIMER_EXPIRED,
+ MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
+ MSM_DCVS_SCM_ENABLE_CORE,
+ MSM_DCVS_SCM_RESET_CORE,
};
struct msm_dcvs_algo_param {
+ uint32_t slack_time_us;
+ uint32_t scale_slack_time;
+ uint32_t scale_slack_time_pct;
uint32_t disable_pc_threshold;
- uint32_t em_win_size_min_us;
- uint32_t em_win_size_max_us;
+ uint32_t em_window_size;
uint32_t em_max_util_pct;
- uint32_t group_id;
- uint32_t max_freq_chg_time_us;
- uint32_t slack_mode_dynamic;
- uint32_t slack_time_min_us;
- uint32_t slack_time_max_us;
- uint32_t slack_weight_thresh_pct;
- uint32_t ss_no_corr_below_freq;
- uint32_t ss_win_size_min_us;
- uint32_t ss_win_size_max_us;
+ uint32_t ss_window_size;
uint32_t ss_util_pct;
+ uint32_t ss_iobusy_conv;
};
struct msm_dcvs_freq_entry {
- uint32_t freq;
- uint32_t voltage;
- uint32_t is_trans_level;
- uint32_t active_energy_offset;
- uint32_t leakage_energy_offset;
-};
-
-struct msm_dcvs_energy_curve_coeffs {
- int32_t active_coeff_a;
- int32_t active_coeff_b;
- int32_t active_coeff_c;
-
- int32_t leakage_coeff_a;
- int32_t leakage_coeff_b;
- int32_t leakage_coeff_c;
- int32_t leakage_coeff_d;
+ uint32_t freq; /* Core freq in MHz */
+ uint32_t idle_energy;
+ uint32_t active_energy;
};
-struct msm_dcvs_power_params {
- uint32_t current_temp;
+struct msm_dcvs_core_param {
+ uint32_t max_time_us;
uint32_t num_freq; /* number of msm_dcvs_freq_entry passed */
};
-struct msm_dcvs_core_param {
- uint32_t core_type;
- uint32_t core_bitmask_id;
-};
-
-struct msm_mpd_algo_param {
- uint32_t em_win_size_min_us;
- uint32_t em_win_size_max_us;
- uint32_t em_max_util_pct;
- uint32_t mp_em_rounding_point_min;
- uint32_t mp_em_rounding_point_max;
- uint32_t online_util_pct_min;
- uint32_t online_util_pct_max;
- uint32_t slack_time_min_us;
- uint32_t slack_time_max_us;
-};
#ifdef CONFIG_MSM_DCVS
/**
@@ -110,9 +61,20 @@ struct msm_mpd_algo_param {
extern int msm_dcvs_scm_init(size_t size);
/**
- * Registers cores with the DCVS algo.
+ * Create an empty core group
+ *
+ * @return:
+ * 0 on success.
+ * -ENOMEM: Insufficient memory.
+ * -EINVAL: Invalid args.
+ */
+extern int msm_dcvs_scm_create_group(uint32_t id);
+
+/**
+ * Registers cores as part of a group
*
* @core_id: The core identifier that will be used for communication with DCVS
+ * @group_id: The group to which this core will be added to.
* @param: The core parameters
* @freq: Array of frequency and energy values
*
@@ -121,8 +83,9 @@ extern int msm_dcvs_scm_init(size_t size);
* -ENOMEM: Insufficient memory.
* -EINVAL: Invalid args.
*/
-extern int msm_dcvs_scm_register_core(uint32_t core_id,
- struct msm_dcvs_core_param *param);
+extern int msm_dcvs_scm_register_core(uint32_t core_id, uint32_t group_id,
+ struct msm_dcvs_core_param *param,
+ struct msm_dcvs_freq_entry *freq);
/**
* Set DCVS algorithm parameters
@@ -138,33 +101,6 @@ extern int msm_dcvs_scm_set_algo_params(uint32_t core_id,
struct msm_dcvs_algo_param *param);
/**
- * Set MPDecision algorithm parameters
- *
- * @param: The param data structure
- * 0 on success.
- * -EINVAL: Invalid args.
- */
-extern int msm_mpd_scm_set_algo_params(struct msm_mpd_algo_param *param);
-
-/**
- * Set frequency and power characteristics for the core.
- *
- * @param core_id: The core identifier that will be used to interace with the
- * DCVS algo.
- * @param pwr_param: power params
- * @param freq_entry: frequency characteristics desired
- * @param coeffs: Coefficients that will describe the power curve
- *
- * @return int
- * 0 on success.
- * -EINVAL: Invalid args.
- */
-extern int msm_dcvs_scm_set_power_params(uint32_t core_id,
- struct msm_dcvs_power_params *pwr_param,
- struct msm_dcvs_freq_entry *freq_entry,
- struct msm_dcvs_energy_curve_coeffs *coeffs);
-
-/**
* Do an SCM call.
*
* @core_id: The core identifier.
@@ -190,44 +126,19 @@ extern int msm_dcvs_scm_set_power_params(uint32_t core_id,
* @param1: time taken in usec to switch to the frequency
* @ret0: New QoS timer value for the core in usec
* @ret1: unused
- * MSM_DCVS_SCM_CORE_ONLINE
- * @param0: active clock frequency of the core in KHz
- * @param1: time taken to online the core
- * @ret0: unused
- * @ret1: unused
- * MSM_DCVS_SCM_CORE_OFFLINE
- * @param0: time taken to offline the core
- * @param1: unused
- * @ret0: unused
- * @ret1: unused
- * MSM_DCVS_SCM_CORE_UNAVAILABLE
- * @param0: TODO:bitmask
- * @param1: unused
- * @ret0: Bitmask of cores to bring online/offline.
- * @ret1: Mp Decision slack time. Common to all cores.
- * MSM_DCVS_SCM_DCVS_ENABLE
- * @param0: 1 to enable; 0 to disable DCVS
- * @param1: unused
+ * MSM_DCVS_SCM_ENABLE_CORE
+ * @param0: enable(1) or disable(0) core
+ * @param1: active clock frequency of the core in KHz
* @ret0: New clock frequency for the core in KHz
* @ret1: unused
- * MSM_DCVS_SCM_MPD_ENABLE
- * @param0: 1 to enable; 0 to disable MP Decision
+ * MSM_DCVS_SCM_RESET_CORE
+ * @param0: active clock frequency of the core in KHz
* @param1: unused
- * @ret0: unused
+ * @ret0: New clock frequency for the core in KHz
* @ret1: unused
- * MSM_DCVS_SCM_RUNQ_UPDATE
- * @param0: run q value
- * @param1: unused
- * @ret0: Bitmask of cores online
- * @ret1: New QoS timer for MP Decision (usec)
- * MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
- * @param0: unused
- * @param1: unused
- * @ret0: Bitmask of cores online
- * @ret1: New QoS timer for MP Decision (usec)
- * @return:
- * 0 on success,
- * SCM return values
+ * @return:
+ * 0 on success,
+ * SCM return values
*/
extern int msm_dcvs_scm_event(uint32_t core_id,
enum msm_dcvs_scm_event event_id,
@@ -237,21 +148,16 @@ extern int msm_dcvs_scm_event(uint32_t core_id,
#else
static inline int msm_dcvs_scm_init(uint32_t phy, size_t bytes)
{ return -ENOSYS; }
+static inline int msm_dcvs_scm_create_group(uint32_t id)
+{ return -ENOSYS; }
static inline int msm_dcvs_scm_register_core(uint32_t core_id,
+ uint32_t group_id,
struct msm_dcvs_core_param *param,
struct msm_dcvs_freq_entry *freq)
{ return -ENOSYS; }
static inline int msm_dcvs_scm_set_algo_params(uint32_t core_id,
struct msm_dcvs_algo_param *param)
{ return -ENOSYS; }
-static inline int msm_mpd_scm_set_algo_params(
- struct msm_mpd_algo_param *param)
-{ return -ENOSYS; }
-static inline int msm_dcvs_set_power_params(uint32_t core_id,
- struct msm_dcvs_power_params *pwr_param,
- struct msm_dcvs_freq_entry *freq_entry,
- struct msm_dcvs_energy_curve_coeffs *coeffs)
-{ return -ENOSYS; }
static inline int msm_dcvs_scm_event(uint32_t core_id,
enum msm_dcvs_scm_event event_id,
uint32_t param0, uint32_t param1,
diff --git a/arch/arm/mach-msm/include/mach/msm_fb.h b/arch/arm/mach-msm/include/mach/msm_fb.h
index 3e42048..3bbaa25 100644
--- a/arch/arm/mach-msm/include/mach/msm_fb.h
+++ b/arch/arm/mach-msm/include/mach/msm_fb.h
@@ -77,7 +77,7 @@ struct msm_mddi_client_data {
uint32_t reg);
uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
void (*auto_hibernate)(struct msm_mddi_client_data *, int);
- /* custom data that needs to be passed from the board file to a
+ /* custom data that needs to be passed from the board file to a
* particular client */
void *private_client_data;
struct resource *fb_resource;
@@ -190,7 +190,7 @@ struct msm_mddi_bridge_platform_data {
struct mdp_v4l2_req;
int msm_fb_v4l2_enable(struct mdp_overlay *req, bool enable, void **par);
-int msm_fb_v4l2_update(void *par, bool bUserPtr,
+int msm_fb_v4l2_update(void *par,
unsigned long srcp0_addr, unsigned long srcp0_size,
unsigned long srcp1_addr, unsigned long srcp1_size,
unsigned long srcp2_addr, unsigned long srcp2_size);
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 3c5d841..d9978b5 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -98,8 +98,8 @@
0xFB600000 */
#define MSM_STRONGLY_ORDERED_PAGE 0xFA0F0000
+#define MSM8625_SECONDARY_PHYS 0x0FE00000
#define MSM8625_NON_CACHE_MEM 0x0FC00000
-#define MSM8625_CPU_PHYS 0x0FE00000
#if defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_MSM7X27) \
|| defined(CONFIG_ARCH_MSM7X30)
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 55742ee..44064cd 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -66,7 +66,4 @@ struct reserve_info {
extern struct reserve_info *reserve_info;
-int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
- int depth, void *data);
-
#endif
diff --git a/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi.h b/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi.h
index 559073c..5bad4fa 100644
--- a/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi.h
+++ b/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,9 +15,7 @@
#define DBOR_SIGNATURE 0x524F4244
-#ifdef CONFIG_DEBUG_FS
-void acdb_rtc_set_err(u32 ErrCode);
-#endif
+void acdb_rtc_set_err(u32 err_code);
struct header {
@@ -95,8 +93,6 @@ struct acdb_iir_block {
u16 pan[4];
};
-
-
struct mbadrc_band_config_type {
u16 mbadrc_sub_band_enable;
u16 mbadrc_sub_mute;
@@ -123,181 +119,14 @@ struct acdb_mbadrc_block {
struct mbadrc_parameter parameters;
};
-struct acdb_calib_gain_rx {
- u16 audppcalgain;
- u16 reserved;
-};
-
-struct acdb_calib_gain_tx {
- u16 audprecalgain;
- u16 reserved;
-};
-
-struct acdb_pbe_block {
- s16 realbassmix;
- s16 basscolorcontrol;
- u16 mainchaindelay;
- u16 xoverfltorder;
- u16 bandpassfltorder;
- s16 adrcdelay;
- u16 downsamplelevel;
- u16 comprmstav;
- s16 expthreshold;
- u16 expslope;
- u16 compthreshold;
- u16 compslope;
- u16 cpmpattack_lsw;
- u16 compattack_msw;
- u16 comprelease_lsw;
- u16 comprelease_msw;
- u16 compmakeupgain;
- s16 baselimthreshold;
- s16 highlimthreshold;
- s16 basslimmakeupgain;
- s16 highlimmakeupgain;
- s16 limbassgrc;
- s16 limhighgrc;
- s16 limdelay;
- u16 filter_coeffs[90];
-};
-
-struct acdb_rmc_block {
- s16 rmc_enable;
- u16 rmc_ipw_length_ms;
- u16 rmc_detect_start_threshdb;
- u16 rmc_peak_length_ms;
- s16 rmc_init_pulse_threshdb;
- u16 rmc_init_pulse_length_ms;
- u16 rmc_total_int_length_ms;
- u16 rmc_rampupdn_length_ms;
- u16 rmc_delay_length_ms;
- u16 reserved00;
- u16 reserved01;
- s16 reserved02;
- s16 reserved03;
- s16 reserved04;
-};
-
-struct acdb_fluence_block {
- u16 csmode;
- u16 cs_tuningMode;
- u16 cs_echo_path_delay_by_80;
- u16 cs_echo_path_delay;
- u16 af1_twoalpha;
- u16 af1_erl;
- u16 af1_taps;
- u16 af1_preset_coefs;
- u16 af1_offset;
- u16 af2_twoalpha;
- u16 af2_erl;
- u16 af2_taps;
- u16 af2_preset_coefs;
- u16 af2_offset;
- u16 pcd_twoalpha;
- u16 pcd_offset;
- u16 cspcd_threshold;
- u16 wgthreshold;
- u16 mpthreshold;
- u16 sf_init_table_0[8];
- u16 sf_init_table_1[8];
- u16 sf_taps;
- u16 sf_twoalpha;
- u16 dnns_echoalpharev;
- u16 dnns_echoycomp;
- u16 dnns_wbthreshold;
- u16 dnns_echogammahi;
- u16 dnns_echogammalo;
- u16 dnns_noisegammas;
- u16 dnns_noisegamman;
- u16 dnns_noisegainmins;
- u16 dnns_noisegainminn;
- u16 dnns_noisebiascomp;
- u16 dnns_acthreshold;
- u16 wb_echo_ratio_2mic;
- u16 wb_gamma_e;
- u16 wb_gamma_nn;
- u16 wb_gamma_sn;
- u16 vcodec_delay0;
- u16 vcodec_delay1;
- u16 vcodec_len0;
- u16 vcodec_len1;
- u16 vcodec_thr0;
- u16 vcodec_thr1;
- u16 fixcalfactorleft;
- u16 fixcalfactorright;
- u16 csoutputgain;
- u16 enh_meu_1;
- u16 enh_meu_2;
- u16 fixed_over_est;
- u16 rx_nlpp_limit;
- u16 rx_nlpp_gain;
- u16 wnd_threshold;
- u16 wnd_ns_hover;
- u16 wnd_pwr_smalpha;
- u16 wnd_det_esmalpha;
- u16 wnd_ns_egoffset;
- u16 wnd_sm_ratio;
- u16 wnd_det_coefs[5];
- u16 wnd_th1;
- u16 wnd_th2;
- u16 wnd_fq;
- u16 wnd_dfc;
- u16 wnd_sm_alphainc;
- u16 wnd_sm_alphsdec;
- u16 lvnv_spdet_far;
- u16 lvnv_spdet_mic;
- u16 lvnv_spdet_xclip;
- u16 dnns_nl_atten;
- u16 dnns_cni_level;
- u16 dnns_echogammaalpha;
- u16 dnns_echogammarescue;
- u16 dnns_echogammadt;
- u16 mf_noisegammafac;
- u16 e_noisegammafac;
- u16 dnns_noisegammainit;
- u16 sm_noisegammas;
- u16 wnd_noisegamman;
- u16 af_taps_bg_spkr;
- u16 af_erl_bg_spkr;
- u16 minimum_erl_bg;
- u16 erl_step_bg;
- u16 upprisecalpha;
- u16 upprisecthresh;
- u16 uppriwindbias;
- u16 e_pcd_threshold;
- u16 nv_maxvadcount;
- u16 crystalspeechreserved[38];
- u16 cs_speaker[7];
- u16 ns_fac;
- u16 ns_blocksize;
- u16 is_bias;
- u16 is_bias_inp;
- u16 sc_initb;
- u16 ac_resetb;
- u16 sc_avar;
- u16 is_hover[5];
- u16 is_cf_level;
- u16 is_cf_ina;
- u16 is_cf_inb;
- u16 is_cf_a;
- u16 is_cf_b;
- u16 sc_th;
- u16 sc_pscale;
- u16 sc_nc;
- u16 sc_hover;
- u16 sc_alphas;
- u16 sc_cfac;
- u16 sc_sdmax;
- u16 sc_sdmin;
- u16 sc_initl;
- u16 sc_maxval;
- u16 sc_spmin;
- u16 is_ec_th;
- u16 is_fx_dl;
- u16 coeffs_iva_filt_0[32];
- u16 coeffs_iva_filt_1[32];
+struct acdb_ns_tx_block {
+ unsigned short ec_mode_new;
+ unsigned short dens_gamma_n;
+ unsigned short dens_nfe_block_size;
+ unsigned short dens_limit_ns;
+ unsigned short dens_limit_ns_d;
+ unsigned short wb_gamma_e;
+ unsigned short wb_gamma_n;
};
-s32 acdb_get_calibration_data(struct acdb_get_block *get_block);
-void fluence_feature_update(int enable, int stream_id);
#endif
diff --git a/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi2.h b/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi2.h
deleted file mode 100644
index 5bad4fa..0000000
--- a/arch/arm/mach-msm/include/mach/qdsp5/audio_acdbi2.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef _MACH_QDSP5_V2_AUDIO_ACDBI_H
-#define _MACH_QDSP5_V2_AUDIO_ACDBI_H
-
-#define DBOR_SIGNATURE 0x524F4244
-
-void acdb_rtc_set_err(u32 err_code);
-
-
-struct header {
- u32 dbor_signature;
- u32 abid;
- u32 iid;
- u32 data_len;
-};
-
-enum {
- ACDB_AGC_BLOCK = 197,
- ACDB_IIR_BLOCK = 245,
- ACDB_MBADRC_BLOCK = 343
-};
-
-/* Structure to query for acdb parameter */
-struct acdb_get_block {
- u32 acdb_id;
- u32 sample_rate_id; /* Actual sample rate value */
- u32 interface_id; /* Interface id's */
- u32 algorithm_block_id; /* Algorithm block id */
- u32 total_bytes; /* Length in bytes used by buffer for
- configuration */
- u32 *buf_ptr; /* Address for storing configuration
- data */
-};
-
-struct acdb_agc_block {
- u16 enable_status;
- u16 comp_rlink_static_gain;
- u16 comp_rlink_aig_flag;
- u16 exp_rlink_threshold;
- u16 exp_rlink_slope;
- u16 comp_rlink_threshold;
- u16 comp_rlink_slope;
- u16 comp_rlink_aig_attack_k;
- u16 comp_rlink_aig_leak_down;
- u16 comp_rlink_aig_leak_up;
- u16 comp_rlink_aig_max;
- u16 comp_rlink_aig_min;
- u16 comp_rlink_aig_release_k;
- u16 comp_rlink_aig_sm_leak_rate_fast;
- u16 comp_rlink_aig_sm_leak_rate_slow;
- u16 comp_rlink_attack_k_msw;
- u16 comp_rlink_attack_k_lsw;
- u16 comp_rlink_delay;
- u16 comp_rlink_release_k_msw;
- u16 comp_rlink_release_k_lsw;
- u16 comp_rlink_rms_trav;
-};
-
-
-struct iir_coeff_type {
- u16 b0_lo;
- u16 b0_hi;
- u16 b1_lo;
- u16 b1_hi;
- u16 b2_lo;
- u16 b2_hi;
-};
-
-struct iir_coeff_stage_a {
- u16 a1_lo;
- u16 a1_hi;
- u16 a2_lo;
- u16 a2_hi;
-};
-
-struct acdb_iir_block {
- u16 enable_flag;
- u16 stage_count;
- struct iir_coeff_type stages[4];
- struct iir_coeff_stage_a stages_a[4];
- u16 shift_factor[4];
- u16 pan[4];
-};
-
-struct mbadrc_band_config_type {
- u16 mbadrc_sub_band_enable;
- u16 mbadrc_sub_mute;
- u16 mbadrc_comp_rms_tav;
- u16 mbadrc_comp_threshold;
- u16 mbadrc_comp_slop;
- u16 mbadrc_comp_attack_msw;
- u16 mbadrc_comp_attack_lsw;
- u16 mbadrc_comp_release_msw;
- u16 mbadrc_comp_release_lsw;
- u16 mbadrc_make_up_gain;
-};
-
-struct mbadrc_parameter {
- u16 mbadrc_enable;
- u16 mbadrc_num_bands;
- u16 mbadrc_down_sample_level;
- u16 mbadrc_delay;
-};
-
-struct acdb_mbadrc_block {
- u16 ext_buf[196];
- struct mbadrc_band_config_type band_config[5];
- struct mbadrc_parameter parameters;
-};
-
-struct acdb_ns_tx_block {
- unsigned short ec_mode_new;
- unsigned short dens_gamma_n;
- unsigned short dens_nfe_block_size;
- unsigned short dens_limit_ns;
- unsigned short dens_limit_ns_d;
- unsigned short wb_gamma_e;
- unsigned short wb_gamma_n;
-};
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index aa4adc4..7cc5f7a 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -25,22 +25,10 @@
#define SCM_SVC_DCVS 0xD
#define SCM_SVC_TZSCHEDULER 0xFC
-#define DEFINE_SCM_BUFFER(__n) \
-static char __n[PAGE_SIZE] __aligned(PAGE_SIZE);
-
-#define SCM_BUFFER_SIZE(__buf) sizeof(__buf)
-
-#define SCM_BUFFER_PHYS(__buf) virt_to_phys(__buf)
-
#ifdef CONFIG_MSM_SCM
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len);
-extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- void *scm_buf, size_t scm_buf_size);
-
-
extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
@@ -60,13 +48,6 @@ static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
return 0;
}
-static inline int scm_call_noalloc(u32 svc_id, u32 cmd_id,
- const void *cmd_buf, size_t cmd_len, void *resp_buf,
- size_t resp_len, void *scm_buf, size_t scm_buf_size)
-{
- return 0;
-}
-
static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
{
return 0;
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index f2025ee..7706239 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -33,10 +33,10 @@
#include <linux/completion.h>
#include <linux/err.h>
#endif
+#include <linux/android_pmem.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <linux/sched.h>
-#include <linux/of_fdt.h>
/* fixme */
#include <asm/tlbflush.h>
@@ -399,126 +399,3 @@ unsigned long get_ddr_size(void)
return ret;
}
-
-static char * const memtype_names[] = {
- [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
- [MEMTYPE_SMI] = "SMI",
- [MEMTYPE_EBI0] = "EBI0",
- [MEMTYPE_EBI1] = "EBI1",
-};
-
-int msm_get_memory_type_from_name(const char *memtype_name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
- if (memtype_names[i] &&
- strcmp(memtype_name, memtype_names[i]) == 0)
- return i;
- }
-
- pr_err("Could not find memory type %s\n", memtype_name);
- return -EINVAL;
-}
-
-static int reserve_memory_type(const char *mem_name,
- struct memtype_reserve *reserve_table,
- int size)
-{
- int ret = msm_get_memory_type_from_name(mem_name);
-
- if (ret >= 0) {
- reserve_table[ret].size += size;
- ret = 0;
- }
- return ret;
-}
-
-static int check_for_compat(unsigned long node)
-{
- char **start = __compat_exports_start;
-
- for ( ; start < __compat_exports_end; start++)
- if (of_flat_dt_is_compatible(node, *start))
- return 1;
-
- return 0;
-}
-
-int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
- int depth, void *data)
-{
- char *memory_name_prop;
- unsigned int *memory_remove_prop;
- unsigned long memory_name_prop_length;
- unsigned long memory_remove_prop_length;
- unsigned long memory_size_prop_length;
- unsigned int *memory_size_prop;
- unsigned int memory_size;
- unsigned int memory_start;
- int ret;
-
- memory_name_prop = of_get_flat_dt_prop(node,
- "qcom,memory-reservation-type",
- &memory_name_prop_length);
- memory_remove_prop = of_get_flat_dt_prop(node,
- "qcom,memblock-remove",
- &memory_remove_prop_length);
-
- if (memory_name_prop || memory_remove_prop) {
- if (!check_for_compat(node))
- goto out;
- } else {
- goto out;
- }
-
- if (memory_name_prop) {
- if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
- WARN(1, "Memory name was malformed\n");
- goto mem_remove;
- }
-
- memory_size_prop = of_get_flat_dt_prop(node,
- "qcom,memory-reservation-size",
- &memory_size_prop_length);
-
- if (memory_size_prop &&
- (memory_size_prop_length == sizeof(unsigned int))) {
- memory_size = be32_to_cpu(*memory_size_prop);
-
- if (reserve_memory_type(memory_name_prop,
- data, memory_size) == 0)
- pr_info("%s reserved %s size %x\n",
- uname, memory_name_prop, memory_size);
- else
- WARN(1, "Node %s reserve failed\n",
- uname);
- } else {
- WARN(1, "Node %s specified bad/nonexistent size\n",
- uname);
- }
- }
-
-mem_remove:
-
- if (memory_remove_prop) {
- if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
- WARN(1, "Memory remove malformed\n");
- goto out;
- }
-
- memory_start = be32_to_cpu(memory_remove_prop[0]);
- memory_size = be32_to_cpu(memory_remove_prop[1]);
-
- ret = memblock_remove(memory_start, memory_size);
- if (ret)
- WARN(1, "Failed to remove memory %x-%x\n",
- memory_start, memory_start+memory_size);
- else
- pr_info("Node %s removed memory %x-%x\n", uname,
- memory_start, memory_start+memory_size);
- }
-
-out:
- return 0;
-}
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index bd24d7c..ab62c20 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -2,7 +2,7 @@
# Makefile for msm-bus driver specific files
#
obj-y += msm_bus_core.o msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o
-obj-y += msm_bus_rpm.o msm_bus_bimc.o msm_bus_noc.o msm_bus_of.o
+obj-y += msm_bus_rpm.o msm_bus_bimc.o msm_bus_noc.o
obj-$(CONFIG_ARCH_MSM8X60) += msm_bus_board_8660.o
obj-$(CONFIG_ARCH_MSM8960) += msm_bus_board_8960.o
obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
index 65539c6..07082b7 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
@@ -32,9 +32,8 @@
#define SEL_FAB_CLK 1
#define SEL_SLAVE_CLK 0
-#define BW_TO_CLK_FREQ_HZ(width, bw) \
- msm_bus_div64(width, bw)
-
+#define BW_TO_CLK_FREQ_HZ(width, bw) ((unsigned long)\
+ DIV_ROUND_UP((bw), (width)))
#define IS_MASTER_VALID(mas) \
(((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
? 1 : 0)
@@ -44,33 +43,6 @@
static DEFINE_MUTEX(msm_bus_lock);
-/* This function uses shift operations to divide 64 bit value for higher
- * efficiency. The divisor expected are number of ports or bus-width.
- * These are expected to be 1, 2, 4, 8, 16 and 32 in most cases.
- *
- * To account for exception to the above divisor values, the standard
- * do_div function is used.
- * */
-uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
-{
- uint64_t *b = &bw;
-
- if ((bw > 0) && (bw < w))
- return 1;
-
- switch (w) {
- case 1: return bw;
- case 2: return (bw >> 1);
- case 4: return (bw >> 2);
- case 8: return (bw >> 3);
- case 16: return (bw >> 4);
- case 32: return (bw >> 5);
- }
-
- do_div(*b, w);
- return *b;
-}
-
/**
* add_path_node: Adds the path information to the current node
* @info: Internal node info structure
@@ -306,21 +278,21 @@ static int getpath(int src, int dest)
* frequencies is calculated at each node on the path. Commit data to be sent
* to RPM for each master and slave is also calculated here.
*/
-static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw,
- uint64_t curr_clk, uint64_t curr_bw, unsigned int ctx, unsigned int
- cl_active_flag)
+static int update_path(int curr, int pnode, unsigned long req_clk, unsigned
+ long req_bw, unsigned long curr_clk, unsigned long curr_bw,
+ unsigned int ctx, unsigned int cl_active_flag)
{
int index, ret = 0;
struct msm_bus_inode_info *info;
int next_pnode;
- int64_t add_bw = req_bw - curr_bw;
+ long int add_bw = req_bw - curr_bw;
unsigned bwsum = 0;
- uint64_t req_clk_hz, curr_clk_hz, bwsum_hz;
+ unsigned req_clk_hz, curr_clk_hz, bwsum_hz;
int *master_tiers;
struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device
(GET_FABID(curr));
- MSM_BUS_DBG("args: %d %d %d %llu %llu %llu %llu %u\n",
+ MSM_BUS_DBG("args: %d %d %d %lu %lu %lu %lu %u\n",
curr, GET_NODE(pnode), GET_INDEX(pnode), req_clk, req_bw,
curr_clk, curr_bw, ctx);
index = GET_INDEX(pnode);
@@ -406,8 +378,8 @@ static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw,
req_clk);
bwsum_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth,
bwsum);
- MSM_BUS_DBG("up-clk: curr_hz: %llu, req_hz: %llu, bw_hz %llu\n",
- curr_clk, req_clk, bwsum_hz);
+ MSM_BUS_DBG("Calling update-clks: curr_hz: %lu, req_hz: %lu,"
+ " bw_hz %u\n", curr_clk, req_clk, bwsum_hz);
ret = fabdev->algo->update_clks(fabdev, hop, index,
curr_clk_hz, req_clk_hz, bwsum_hz, SEL_FAB_CLK,
ctx, cl_active_flag);
@@ -560,7 +532,7 @@ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index)
int i, ret = 0;
struct msm_bus_scale_pdata *pdata;
int pnode, src, curr, ctx;
- uint64_t req_clk, req_bw, curr_clk, curr_bw;
+ unsigned long req_clk, req_bw, curr_clk, curr_bw;
struct msm_bus_client *client = (struct msm_bus_client *)cl;
if (IS_ERR(client)) {
MSM_BUS_ERR("msm_bus_scale_client update req error %d\n",
@@ -582,8 +554,9 @@ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index)
goto err;
}
- MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n",
- cl, index, client->curr, client->pdata->usecase->num_paths);
+ MSM_BUS_DBG("cl: %u index: %d curr: %d"
+ " num_paths: %d\n", cl, index, client->curr,
+ client->pdata->usecase->num_paths);
for (i = 0; i < pdata->usecase->num_paths; i++) {
src = msm_bus_board_get_iid(client->pdata->usecase[index].
@@ -611,7 +584,7 @@ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index)
} else {
curr_clk = client->pdata->usecase[curr].vectors[i].ib;
curr_bw = client->pdata->usecase[curr].vectors[i].ab;
- MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk);
+ MSM_BUS_DBG("ab: %lu ib: %lu\n", curr_bw, curr_clk);
}
if (!pdata->active_only) {
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 41dd32d..2161e1a 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1803,7 +1803,7 @@ static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop,
struct msm_bus_inode_info *info,
struct msm_bus_fabric_registration *fab_pdata,
void *sel_cdata, int *master_tiers,
- int64_t add_bw)
+ long int add_bw)
{
struct msm_bus_bimc_info *binfo;
struct msm_bus_bimc_qos_bw qbw;
@@ -1813,7 +1813,7 @@ static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop,
struct msm_bus_bimc_commit *sel_cd =
(struct msm_bus_bimc_commit *)sel_cdata;
- MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n",
+ MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %ld\n",
info->node_info->id, info->node_info->priv_id, add_bw);
binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 4bce997..12051da 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -36,7 +36,7 @@
(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
#define INTERLEAVED_BW(fab_pdata, bw, ports) \
- ((fab_pdata->il_flag) ? msm_bus_div64((bw), (ports)) : (bw))
+ ((fab_pdata->il_flag) ? DIV_ROUND_UP((bw), (ports)) : (bw))
#define INTERLEAVED_VAL(fab_pdata, n) \
((fab_pdata->il_flag) ? (n) : 1)
@@ -83,34 +83,34 @@ struct msm_bus_node_info {
};
struct path_node {
- uint64_t clk[NUM_CTX];
- uint64_t bw[NUM_CTX];
- uint64_t *sel_clk;
- uint64_t *sel_bw;
+ unsigned long clk[NUM_CTX];
+ unsigned long bw[NUM_CTX];
+ unsigned long *sel_clk;
+ unsigned long *sel_bw;
int next;
};
struct msm_bus_link_info {
- uint64_t clk[NUM_CTX];
- uint64_t *sel_clk;
- uint64_t memclk;
- int64_t bw[NUM_CTX];
- int64_t *sel_bw;
+ unsigned long clk[NUM_CTX];
+ unsigned long *sel_clk;
+ unsigned long memclk;
+ long bw[NUM_CTX];
+ long *sel_bw;
int *tier;
int num_tiers;
};
struct nodeclk {
struct clk *clk;
- uint64_t rate;
+ unsigned long rate;
bool dirty;
bool enable;
};
struct msm_bus_inode_info {
struct msm_bus_node_info *node_info;
- uint64_t max_bw;
- uint64_t max_clk;
+ unsigned long max_bw;
+ unsigned long max_clk;
struct msm_bus_link_info link_info;
int num_pnodes;
struct path_node *pnode;
@@ -137,7 +137,7 @@ struct msm_bus_hw_algorithm {
struct msm_bus_inode_info *info,
struct msm_bus_fabric_registration *fab_pdata,
void *sel_cdata, int *master_tiers,
- int64_t add_bw);
+ long int add_bw);
void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
void *cdata, int nmasters, int nslaves, int ntslaves);
int (*commit)(struct msm_bus_fabric_registration
@@ -164,8 +164,8 @@ struct msm_bus_fabric_device {
struct msm_bus_fab_algorithm {
int (*update_clks)(struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *pme, int index,
- uint64_t curr_clk, uint64_t req_clk,
- uint64_t bwsum, int flag, int ctx,
+ unsigned long curr_clk, unsigned long req_clk,
+ unsigned long bwsum, int flag, int ctx,
unsigned int cl_active_flag);
int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
@@ -177,7 +177,7 @@ struct msm_bus_fab_algorithm {
struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
- int64_t add_bw, int *master_tiers, int ctx);
+ long int add_bw, int *master_tiers, int ctx);
};
struct msm_bus_board_algorithm {
@@ -204,7 +204,6 @@ struct msm_bus_client {
int curr;
};
-uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_dbg.c b/arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
index a44c53a..76f85c6 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
@@ -385,11 +385,11 @@ static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
pdata->usecase[index].vectors[j].dst);
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : ");
for (j = 0; j < pdata->usecase->num_paths; j++)
- i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ",
pdata->usecase[index].vectors[j].ab);
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : ");
for (j = 0; j < pdata->usecase->num_paths; j++)
- i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ",
pdata->usecase[index].vectors[j].ib);
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 28e100d..c83795a 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -222,13 +222,13 @@ error:
*/
static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *slave, int index,
- uint64_t curr_clk_hz, uint64_t req_clk_hz,
- uint64_t bwsum_hz, int clk_flag, int ctx,
+ unsigned long curr_clk_hz, unsigned long req_clk_hz,
+ unsigned long bwsum_hz, int clk_flag, int ctx,
unsigned int cl_active_flag)
{
int i, status = 0;
- uint64_t max_pclk = 0, rate;
- uint64_t *pclk = NULL;
+ unsigned long max_pclk = 0, rate;
+ unsigned long *pclk = NULL;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
struct nodeclk *nodeclk;
@@ -261,7 +261,7 @@ static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
info->link_info.sel_clk = &info->link_info.clk[ctx];
max_pclk = max(max_pclk, *info->link_info.sel_clk);
}
- MSM_BUS_DBG("max_pclk from gateways: %llu\n", max_pclk);
+ MSM_BUS_DBG("max_pclk from gateways: %lu\n", max_pclk);
/* Maximum of all slave clocks. */
@@ -278,7 +278,7 @@ static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
}
- MSM_BUS_DBG("max_pclk from slaves & gws: %llu\n", max_pclk);
+ MSM_BUS_DBG("max_pclk from slaves & gws: %lu\n", max_pclk);
fabric->info.link_info.sel_clk =
&fabric->info.link_info.clk[ctx];
pclk = fabric->info.link_info.sel_clk;
@@ -296,7 +296,7 @@ static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
if (clk_flag) {
nodeclk = &fabric->info.nodeclk[ctx];
if (nodeclk->clk) {
- MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz:%llu\n",
+ MSM_BUS_DBG("clks: id: %d set-clk: %lu bwsum_hz:%lu\n",
fabric->fabdev.id, *pclk, bwsum_hz);
if (nodeclk->rate != *pclk) {
nodeclk->dirty = true;
@@ -308,8 +308,8 @@ static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
nodeclk = &slave->nodeclk[ctx];
if (nodeclk->clk) {
rate = *pclk;
- MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz: %llu\n",
- slave->node_info->priv_id, rate,
+ MSM_BUS_DBG("AXI_clks: id: %d set-clk: %lu "
+ "bwsum_hz: %lu\n" , slave->node_info->priv_id, rate,
bwsum_hz);
if (nodeclk->rate != rate) {
nodeclk->dirty = true;
@@ -332,7 +332,7 @@ skip_set_clks:
void msm_bus_fabric_update_bw(struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info,
- int64_t add_bw, int *master_tiers, int ctx)
+ long int add_bw, int *master_tiers, int ctx)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
void *sel_cdata;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index b4ac8d4..af50f0a 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -503,7 +503,7 @@ static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop,
struct msm_bus_inode_info *info,
struct msm_bus_fabric_registration *fab_pdata,
void *sel_cdata, int *master_tiers,
- int64_t add_bw)
+ long int add_bw)
{
struct msm_bus_noc_info *ninfo;
struct msm_bus_noc_qos_bw qos_bw;
@@ -528,7 +528,7 @@ static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop,
bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
- MSM_BUS_DBG("NOC: Update bw for: %d: %lld\n",
+ MSM_BUS_DBG("NOC: Update bw for: %d: %ld\n",
info->node_info->priv_id, add_bw);
for (i = 0; i < ports; i++) {
sel_cd->mas[info->node_info->masterp[i]].bw += bw;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_of.c b/arch/arm/mach-msm/msm_bus/msm_bus_of.c
deleted file mode 100644
index 24b0ce2..0000000
--- a/arch/arm/mach-msm/msm_bus/msm_bus_of.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <mach/msm_bus.h>
-
-/**
- * msm_bus_cl_get_pdata() - Generate bus client data from device tree
- * provided by clients.
- *
- * of_node: Device tree node to extract information from
- *
- * The function returns a valid pointer to the allocated bus-scale-pdata
- * if the vectors were correctly read from the client's device node.
- * Any error in reading or parsing the device node will return NULL
- * to the caller.
- */
-struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
-{
- struct device_node *of_node;
- struct msm_bus_scale_pdata *pdata = NULL;
- struct msm_bus_paths *usecase = NULL;
- int i = 0, j, ret, num_usecases = 0, num_paths, len;
- const uint32_t *vec_arr = NULL;
- bool mem_err = false;
-
- if (!pdev) {
- pr_err("Error: Null Platform device\n");
- return NULL;
- }
-
- of_node = pdev->dev.of_node;
- pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
- GFP_KERNEL);
- if (!pdata) {
- pr_err("Error: Memory allocation for pdata failed\n");
- mem_err = true;
- goto err;
- }
-
- ret = of_property_read_string(of_node, "qcom,msm_bus,name",
- &pdata->name);
- if (ret) {
- pr_err("Error: Client name not found\n");
- goto err;
- }
-
- ret = of_property_read_u32(of_node, "qcom,msm_bus,num_cases",
- &num_usecases);
- if (ret) {
- pr_err("Error: num_usecases not found\n");
- goto err;
- }
-
- pdata->num_usecases = num_usecases;
- ret = of_property_read_u32(of_node, "qcom,msm_bus,active_only",
- &pdata->active_only);
- if (ret) {
- pr_info("active_only flag absent.\n");
- pr_info("Using dual context by default\n");
- }
-
- usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
- pdata->num_usecases), GFP_KERNEL);
- if (!usecase) {
- pr_err("Error: Memory allocation for paths failed\n");
- mem_err = true;
- goto err;
- }
-
- ret = of_property_read_u32(of_node, "qcom,msm_bus,num_paths",
- &num_paths);
- if (ret) {
- pr_err("Error: num_paths not found\n");
- goto err;
- }
-
- vec_arr = of_get_property(of_node, "qcom,msm_bus,vectors", &len);
- if (len != num_usecases * num_paths * sizeof(struct msm_bus_vectors)) {
- pr_err("Error: Length-error on getting vectors\n");
- goto err;
- }
-
- for (i = 0; i < num_usecases; i++) {
- usecase[i].num_paths = num_paths;
- usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
- sizeof(struct msm_bus_vectors), GFP_KERNEL);
- if (!usecase[i].vectors) {
- mem_err = true;
- pr_err("Error: Mem alloc failure in vectors\n");
- goto err;
- }
-
- for (j = 0; j < num_paths; j++) {
- int index = ((i * num_paths) + j) * 4;
- usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
- usecase[i].vectors[j].dst =
- be32_to_cpu(vec_arr[index + 1]);
- usecase[i].vectors[j].ab =
- be32_to_cpu(vec_arr[index + 2]);
- usecase[i].vectors[j].ib =
- be32_to_cpu(vec_arr[index + 3]);
- }
- }
-
- pdata->usecase = usecase;
- return pdata;
-err:
- if (mem_err) {
- for (; i > 0; i--)
- kfree(usecase[i-1].vectors);
-
- kfree(usecase);
- kfree(pdata);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(msm_bus_cl_get_pdata);
-
-/**
- * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
- * of_node: Device tree node to extract information from
- */
-void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
-{
- int i;
-
- for (i = 0; i < pdata->num_usecases; i++)
- kfree(pdata->usecase[i].vectors);
-
- kfree(pdata->usecase);
- kfree(pdata);
-}
-EXPORT_SYMBOL(msm_bus_cl_clear_pdata);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
index fa25ff1..a358d6d 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
@@ -239,7 +239,7 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
struct msm_bus_inode_info *info,
struct msm_bus_fabric_registration *fab_pdata,
void *sel_cdata, int *master_tiers,
- int64_t add_bw)
+ long int add_bw)
{
int index, i, j, tiers, ports;
struct commit_data *sel_cd = (struct commit_data *)sel_cdata;
@@ -302,9 +302,9 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
msm_bus_create_bw_tier_pair_bytes(tier,
tieredbw);
sel_cd->actarb[index] = tieredbw;
- MSM_BUS_DBG("tr:%d mpor:%d tbw:%ld bws: %lld\n",
- hop_tier, info->node_info->masterp[i],
- tieredbw, *hop->link_info.sel_bw);
+ MSM_BUS_DBG("tier:%d mport: %d tiered_bw:%ld "
+ "bwsum: %ld\n", hop_tier, info->node_info->
+ masterp[i], tieredbw, *hop->link_info.sel_bw);
}
}
}
@@ -314,12 +314,10 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
for (i = 0; i < ports; i++) {
sel_cd->bwsum[hop->node_info->slavep[i]]
= (uint16_t)msm_bus_create_bw_tier_pair_bytes(0,
- (uint32_t)msm_bus_div64(hop->node_info->num_sports,
- *hop->link_info.sel_bw));
- MSM_BUS_DBG("slavep:%d, link_bw: %u\n",
- hop->node_info->slavep[i], (uint32_t)
- msm_bus_div64(hop->node_info->num_sports,
- *hop->link_info.sel_bw));
+ (*hop->link_info.sel_bw/hop->node_info->num_sports));
+ MSM_BUS_DBG("slavep:%d, link_bw: %ld\n",
+ hop->node_info->slavep[i], (*hop->link_info.sel_bw/
+ hop->node_info->num_sports));
}
}
@@ -803,7 +801,7 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
struct msm_bus_inode_info *info,
struct msm_bus_fabric_registration *fab_pdata,
void *sel_cdata, int *master_tiers,
- int64_t add_bw)
+ long int add_bw)
{
int index, i, j, tiers, ports;
struct commit_data *sel_cd = (struct commit_data *)sel_cdata;
@@ -855,9 +853,9 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
sel_cd->arb[tier][index] =
msm_bus_create_bw_tier_pair_bytes(0, tieredbw);
sel_cd->actarb[tier][index] = tieredbw;
- MSM_BUS_DBG("tr:%d mpor:%d tbw:%lu bws: %lld\n",
- hop_tier, info->node_info->masterp[i], tieredbw,
- *hop->link_info.sel_bw);
+ MSM_BUS_DBG("tier:%d mport: %d tiered_bw:%lu "
+ "bwsum: %ld\n", hop_tier, info->node_info->
+ masterp[i], tieredbw, *hop->link_info.sel_bw);
}
}
}
@@ -867,13 +865,11 @@ static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop,
ports = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_sports);
for (i = 0; i < ports; i++) {
sel_cd->bwsum[hop->node_info->slavep[i]]
- = msm_bus_pack_bwsum_bytes((uint32_t)
- msm_bus_div64(hop->node_info->num_sports,
- *hop->link_info.sel_bw));
- MSM_BUS_DBG("slavep:%d, link_bw: %lld\n",
- hop->node_info->slavep[i],
- msm_bus_div64(hop->node_info->num_sports,
- *hop->link_info.sel_bw));
+ = msm_bus_pack_bwsum_bytes((*hop->link_info.
+ sel_bw/hop->node_info->num_sports));
+ MSM_BUS_DBG("slavep:%d, link_bw: %ld\n",
+ hop->node_info->slavep[i], (*hop->link_info.sel_bw/
+ hop->node_info->num_sports));
}
}
diff --git a/arch/arm/mach-msm/msm_dcvs.c b/arch/arm/mach-msm/msm_dcvs.c
index ed34a47..0c158de 100644
--- a/arch/arm/mach-msm/msm_dcvs.c
+++ b/arch/arm/mach-msm/msm_dcvs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/kobject.h>
#include <linux/ktime.h>
@@ -22,327 +23,167 @@
#include <linux/spinlock.h>
#include <linux/stringify.h>
#include <linux/debugfs.h>
-#include <linux/msm_tsens.h>
-#include <linux/platform_device.h>
#include <asm/atomic.h>
#include <asm/page.h>
#include <mach/msm_dcvs.h>
-#include <trace/events/mpdcvs_trace.h>
#define CORE_HANDLE_OFFSET (0xA0)
#define __err(f, ...) pr_err("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
#define __info(f, ...) pr_info("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
#define MAX_PENDING (5)
+enum {
+ MSM_DCVS_DEBUG_NOTIFIER = BIT(0),
+ MSM_DCVS_DEBUG_IDLE_PULSE = BIT(1),
+ MSM_DCVS_DEBUG_FREQ_CHANGE = BIT(2),
+};
+
struct core_attribs {
+ struct kobj_attribute idle_enabled;
+ struct kobj_attribute freq_change_enabled;
+ struct kobj_attribute actual_freq;
struct kobj_attribute freq_change_us;
+ struct kobj_attribute max_time_us;
+
+ struct kobj_attribute slack_time_us;
+ struct kobj_attribute scale_slack_time;
+ struct kobj_attribute scale_slack_time_pct;
struct kobj_attribute disable_pc_threshold;
- struct kobj_attribute em_win_size_min_us;
- struct kobj_attribute em_win_size_max_us;
+ struct kobj_attribute em_window_size;
struct kobj_attribute em_max_util_pct;
- struct kobj_attribute group_id;
- struct kobj_attribute max_freq_chg_time_us;
- struct kobj_attribute slack_mode_dynamic;
- struct kobj_attribute slack_time_min_us;
- struct kobj_attribute slack_time_max_us;
- struct kobj_attribute slack_weight_thresh_pct;
- struct kobj_attribute ss_no_corr_below_freq;
- struct kobj_attribute ss_win_size_min_us;
- struct kobj_attribute ss_win_size_max_us;
+ struct kobj_attribute ss_window_size;
struct kobj_attribute ss_util_pct;
-
- struct kobj_attribute active_coeff_a;
- struct kobj_attribute active_coeff_b;
- struct kobj_attribute active_coeff_c;
- struct kobj_attribute leakage_coeff_a;
- struct kobj_attribute leakage_coeff_b;
- struct kobj_attribute leakage_coeff_c;
- struct kobj_attribute leakage_coeff_d;
-
- struct kobj_attribute thermal_poll_ms;
-
- struct kobj_attribute freq_tbl;
+ struct kobj_attribute ss_iobusy_conv;
struct attribute_group attrib_group;
};
-enum pending_freq_state {
- /*
- * used by the thread to check if pending_freq was updated while it was
- * setting previous frequency - this is written to and used by the
- * freq updating thread
- */
- NO_OUTSTANDING_FREQ_CHANGE = 0,
-
- /*
- * This request is set to indicate that the governor is stopped and no
- * more frequency change requests are accepted untill it starts again.
- * This is checked/used by the threads that want to change the freq
- */
- STOP_FREQ_CHANGE = -1,
-
- /*
- * Any other +ve value means that a freq change was requested and the
- * thread has not gotten around to update it
- *
- * Any other -ve value means that this is the last freq change i.e. a
- * freq change was requested but the thread has not run yet and
- * meanwhile the governor was stopped.
- */
-};
-
struct dcvs_core {
- spinlock_t idle_state_change_lock;
- /* 0 when not idle (busy) 1 when idle and -1 when governor starts and
- * we dont know whether the next call is going to be idle enter or exit
- */
- int idle_entered;
-
- enum msm_dcvs_core_type type;
- /* this is the number in each type for example cpu 0,1,2 and gpu 0,1 */
- int type_core_num;
char core_name[CORE_NAME_MAX];
+ uint32_t new_freq[MAX_PENDING];
uint32_t actual_freq;
uint32_t freq_change_us;
uint32_t max_time_us; /* core param */
struct msm_dcvs_algo_param algo_param;
- struct msm_dcvs_energy_curve_coeffs coeffs;
+ struct msm_dcvs_idle *idle_driver;
+ struct msm_dcvs_freq *freq_driver;
/* private */
- ktime_t time_start;
+ int64_t time_start;
+ struct mutex lock;
+ spinlock_t cpu_lock;
struct task_struct *task;
struct core_attribs attrib;
- uint32_t dcvs_core_id;
- struct msm_dcvs_core_info *info;
- int sensor;
- wait_queue_head_t wait_q;
-
- int (*set_frequency)(int type_core_num, unsigned int freq);
- unsigned int (*get_frequency)(int type_core_num);
- int (*idle_enable)(int type_core_num,
- enum msm_core_control_event event);
- int (*set_floor_frequency)(int type_core_num, unsigned int freq);
-
- spinlock_t pending_freq_lock;
- int pending_freq;
-
- struct hrtimer slack_timer;
- struct delayed_work temperature_work;
+ uint32_t handle;
+ uint32_t group_id;
+ uint32_t freq_pending;
+ struct hrtimer timer;
+ int32_t timer_disabled;
+ /* track if kthread for change_freq is active */
+ int32_t change_freq_activated;
};
+static int msm_dcvs_debug;
static int msm_dcvs_enabled = 1;
module_param_named(enable, msm_dcvs_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
-static struct dentry *debugfs_base;
+static struct dentry *debugfs_base;
static struct dcvs_core core_list[CORES_MAX];
+static DEFINE_MUTEX(core_list_lock);
static struct kobject *cores_kobj;
+static struct dcvs_core *core_handles[CORES_MAX];
-#define DCVS_MAX_NUM_FREQS 15
-static struct msm_dcvs_freq_entry cpu_freq_tbl[DCVS_MAX_NUM_FREQS];
-static unsigned num_cpu_freqs;
-static struct msm_dcvs_platform_data *dcvs_pdata;
-
-static void force_stop_slack_timer(struct dcvs_core *core)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&core->idle_state_change_lock, flags);
- hrtimer_cancel(&core->slack_timer);
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
-}
-
-static void force_start_slack_timer(struct dcvs_core *core, int slack_us)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&core->idle_state_change_lock, flags);
-
- /*
- * only start the timer if governor is not stopped
- */
- if (slack_us != 0) {
- ret = hrtimer_start(&core->slack_timer,
- ktime_set(0, slack_us * 1000),
- HRTIMER_MODE_REL_PINNED);
- if (ret) {
- pr_err("%s Failed to start timer ret = %d\n",
- core->core_name, ret);
- }
- }
-
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
-}
-
-static void stop_slack_timer(struct dcvs_core *core)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&core->idle_state_change_lock, flags);
- /* err only for cpu type's GPU's can do idle exit consecutively */
- if (core->idle_entered == 1 && !(core->dcvs_core_id >= GPU_OFFSET))
- __err("%s trying to reenter idle", core->core_name);
- core->idle_entered = 1;
- hrtimer_cancel(&core->slack_timer);
- core->idle_entered = 1;
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
-}
-
-static void start_slack_timer(struct dcvs_core *core, int slack_us)
-{
- unsigned long flags1, flags2;
- int ret;
-
- spin_lock_irqsave(&core->idle_state_change_lock, flags2);
-
- spin_lock_irqsave(&core->pending_freq_lock, flags1);
-
- /* err only for cpu type's GPU's can do idle enter consecutively */
- if (core->idle_entered == 0 && !(core->dcvs_core_id >= GPU_OFFSET))
- __err("%s trying to reexit idle", core->core_name);
- core->idle_entered = 0;
- /*
- * only start the timer if governor is not stopped
- */
- if (slack_us != 0
- && !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
- ret = hrtimer_start(&core->slack_timer,
- ktime_set(0, slack_us * 1000),
- HRTIMER_MODE_REL_PINNED);
- if (ret) {
- pr_err("%s Failed to start timer ret = %d\n",
- core->core_name, ret);
- }
- }
- spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
-
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
-}
-
-static void restart_slack_timer(struct dcvs_core *core, int slack_us)
-{
- unsigned long flags1, flags2;
- int ret;
-
- spin_lock_irqsave(&core->idle_state_change_lock, flags2);
-
- hrtimer_cancel(&core->slack_timer);
-
- spin_lock_irqsave(&core->pending_freq_lock, flags1);
-
- /*
- * only start the timer if idle is not entered
- * and governor is not stopped
- */
- if (slack_us != 0 && (core->idle_entered != 1)
- && !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
- ret = hrtimer_start(&core->slack_timer,
- ktime_set(0, slack_us * 1000),
- HRTIMER_MODE_REL_PINNED);
- if (ret) {
- pr_err("%s Failed to start timer ret = %d\n",
- core->core_name, ret);
- }
- }
- spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
-}
-
-static void apply_gpu_floor(int cpu_freq)
-{
- int i;
- int gpu_floor_freq = 0;
- struct dcvs_core *gpu;
-
- if (!dcvs_pdata)
- return;
-
- for (i = 0; i < dcvs_pdata->num_sync_rules; i++)
- if (cpu_freq > dcvs_pdata->sync_rules[i].cpu_khz) {
- gpu_floor_freq =
- dcvs_pdata->sync_rules[i].gpu_floor_khz;
- break;
- }
-
- if (!gpu_floor_freq)
- return;
-
- for (i = GPU_OFFSET; i < CORES_MAX; i++) {
- gpu = &core_list[i];
- if (gpu->dcvs_core_id == -1)
- continue;
- if (gpu->set_floor_frequency)
- gpu->set_floor_frequency(gpu->type_core_num,
- gpu_floor_freq);
- }
-}
-
+/* Change core frequency, called with core mutex locked */
static int __msm_dcvs_change_freq(struct dcvs_core *core)
{
int ret = 0;
unsigned long flags = 0;
- int requested_freq = 0;
- ktime_t time_start;
+ unsigned int requested_freq = 0;
+ unsigned int prev_freq = 0;
+ int64_t time_start = 0;
+ int64_t time_end = 0;
uint32_t slack_us = 0;
uint32_t ret1 = 0;
- spin_lock_irqsave(&core->pending_freq_lock, flags);
+ if (!core->freq_driver || !core->freq_driver->set_frequency) {
+ /* Core may have unregistered or hotplugged */
+ return -ENODEV;
+ }
repeat:
- BUG_ON(!core->pending_freq);
- if (core->pending_freq == STOP_FREQ_CHANGE)
- BUG();
-
- requested_freq = core->pending_freq;
- time_start = core->time_start;
- core->time_start = ns_to_ktime(0);
-
- if (requested_freq < 0) {
- requested_freq = -1 * requested_freq;
- core->pending_freq = STOP_FREQ_CHANGE;
- } else {
- core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
+ spin_lock_irqsave(&core->cpu_lock, flags);
+ if (unlikely(!core->freq_pending)) {
+ spin_unlock_irqrestore(&core->cpu_lock, flags);
+ return ret;
}
+ requested_freq = core->new_freq[core->freq_pending - 1];
+ if (unlikely(core->freq_pending > 1) &&
+ (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)) {
+ int i;
+ for (i = 0; i < core->freq_pending - 1; i++) {
+ __info("Core %s missing freq %u\n",
+ core->core_name, core->new_freq[i]);
+ }
+ }
+ time_start = core->time_start;
+ core->time_start = 0;
+ core->freq_pending = 0;
+ /**
+ * Cancel the timers, we dont want the timer firing as we are
+ * changing the clock rate. Dont let idle_exit and others setup
+ * timers as well.
+ */
+ hrtimer_cancel(&core->timer);
+ core->timer_disabled = 1;
+ spin_unlock_irqrestore(&core->cpu_lock, flags);
if (requested_freq == core->actual_freq)
- goto out;
-
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
-
- if (core->type == MSM_DCVS_CORE_TYPE_CPU &&
- core->type_core_num == 0)
- apply_gpu_floor(requested_freq);
+ return ret;
/**
* Call the frequency sink driver to change the frequency
* We will need to get back the actual frequency in KHz and
* the record the time taken to change it.
*/
- ret = core->set_frequency(core->type_core_num, requested_freq);
- if (ret <= 0)
+ ret = core->freq_driver->set_frequency(core->freq_driver,
+ requested_freq);
+ if (ret <= 0) {
__err("Core %s failed to set freq %u\n",
core->core_name, requested_freq);
/* continue to call TZ to get updated slack timer */
- else
+ } else {
+ prev_freq = core->actual_freq;
core->actual_freq = ret;
+ }
- core->freq_change_us = (uint32_t)ktime_to_us(
- ktime_sub(ktime_get(), time_start));
+ time_end = ktime_to_ns(ktime_get());
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
+ __info("Core %s Time end %llu Time start: %llu\n",
+ core->core_name, time_end, time_start);
+ time_end -= time_start;
+ do_div(time_end, NSEC_PER_USEC);
+ core->freq_change_us = (uint32_t)time_end;
/**
* Disable low power modes if the actual frequency is >
* disable_pc_threshold.
*/
- if (core->actual_freq > core->algo_param.disable_pc_threshold) {
- core->idle_enable(core->type_core_num,
+ if (core->actual_freq >
+ core->algo_param.disable_pc_threshold) {
+ core->idle_driver->enable(core->idle_driver,
MSM_DCVS_DISABLE_HIGH_LATENCY_MODES);
- } else if (core->actual_freq <= core->algo_param.disable_pc_threshold) {
- core->idle_enable(core->type_core_num,
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Disabling LPM for %s\n", core->core_name);
+ } else if (core->actual_freq <=
+ core->algo_param.disable_pc_threshold) {
+ core->idle_driver->enable(core->idle_driver,
MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Enabling LPM for %s\n", core->core_name);
}
/**
@@ -350,162 +191,108 @@ repeat:
* to this frequency and that will get us the new slack
* timer
*/
- ret = msm_dcvs_scm_event(core->dcvs_core_id,
- MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
- core->actual_freq, core->freq_change_us,
- &slack_us, &ret1);
- if (ret) {
- __err("Error sending core (%s) dcvs_core_id = %d freq change (%u) reqfreq = %d slack_us=%d ret = %d\n",
- core->core_name, core->dcvs_core_id,
- core->actual_freq, requested_freq,
- slack_us, ret);
+ ret = msm_dcvs_scm_event(core->handle, MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
+ core->actual_freq, (uint32_t)time_end, &slack_us, &ret1);
+ if (!ret) {
+ /* Reset the slack timer */
+ if (slack_us) {
+ core->timer_disabled = 0;
+ ret = hrtimer_start(&core->timer,
+ ktime_set(0, slack_us * 1000),
+ HRTIMER_MODE_REL_PINNED);
+ if (ret)
+ __err("Failed to register timer for core %s\n",
+ core->core_name);
+ }
+ } else {
+ __err("Error sending core (%s) freq change (%u)\n",
+ core->core_name, core->actual_freq);
}
- /* TODO confirm that we get a valid freq from SM even when the above
- * FREQ_UPDATE fails
- */
- restart_slack_timer(core, slack_us);
- spin_lock_irqsave(&core->pending_freq_lock, flags);
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
+ __info("Freq %u requested for core %s (actual %u prev %u) "
+ "change time %u us slack time %u us\n",
+ requested_freq, core->core_name,
+ core->actual_freq, prev_freq,
+ core->freq_change_us, slack_us);
/**
* By the time we are done with freq changes, we could be asked to
* change again. Check before exiting.
*/
- if (core->pending_freq != NO_OUTSTANDING_FREQ_CHANGE
- && core->pending_freq != STOP_FREQ_CHANGE) {
+ if (core->freq_pending)
goto repeat;
- }
-
-out: /* should always be jumped to with the spin_lock held */
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+ core->change_freq_activated = 0;
return ret;
}
-static void msm_dcvs_report_temp_work(struct work_struct *work)
-{
- struct dcvs_core *core = container_of(work,
- struct dcvs_core,
- temperature_work.work);
- struct msm_dcvs_core_info *info = core->info;
- struct tsens_device tsens_dev;
- int ret;
- unsigned long temp = 0;
- int interval_ms;
-
- tsens_dev.sensor_num = core->sensor;
- ret = tsens_get_temp(&tsens_dev, &temp);
- if (!temp) {
- tsens_dev.sensor_num = 0;
- ret = tsens_get_temp(&tsens_dev, &temp);
- if (!temp)
- goto out;
- }
-
- if (temp == info->power_param.current_temp)
- goto out;
- info->power_param.current_temp = temp;
-
- ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
- &info->power_param,
- &info->freq_tbl[0], &core->coeffs);
-out:
- if (info->thermal_poll_ms == 0)
- interval_ms = 60000;
- else if (info->thermal_poll_ms < 1000)
- interval_ms = 1000;
- else
- interval_ms = info->thermal_poll_ms;
-
- schedule_delayed_work(&core->temperature_work,
- msecs_to_jiffies(interval_ms));
-}
-
static int msm_dcvs_do_freq(void *data)
{
struct dcvs_core *core = (struct dcvs_core *)data;
+ static struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
- while (!kthread_should_stop()) {
- wait_event(core->wait_q, !(core->pending_freq == 0 ||
- core->pending_freq == -1) ||
- kthread_should_stop());
-
- if (kthread_should_stop())
- break;
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ mutex_lock(&core->lock);
__msm_dcvs_change_freq(core);
- }
+ mutex_unlock(&core->lock);
- return 0;
-}
+ schedule();
-/* freq_pending_lock should be held */
-static void request_freq_change(struct dcvs_core *core, int new_freq)
-{
- if (new_freq == NO_OUTSTANDING_FREQ_CHANGE) {
- if (core->pending_freq != STOP_FREQ_CHANGE) {
- __err("%s gov started with earlier pending freq %d\n",
- core->core_name, core->pending_freq);
- }
- core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
- return;
- }
+ if (kthread_should_stop())
+ break;
- if (new_freq == STOP_FREQ_CHANGE) {
- if (core->pending_freq == NO_OUTSTANDING_FREQ_CHANGE)
- core->pending_freq = STOP_FREQ_CHANGE;
- else if (core->pending_freq > 0)
- core->pending_freq = -1 * core->pending_freq;
- return;
+ set_current_state(TASK_UNINTERRUPTIBLE);
}
- if (core->pending_freq < 0) {
- /* a value less than 0 means that the governor has stopped
- * and no more freq changes should be requested
- */
- return;
- }
+ __set_current_state(TASK_RUNNING);
- if (core->actual_freq != new_freq && core->pending_freq != new_freq) {
- core->pending_freq = new_freq;
- core->time_start = ktime_get();
- wake_up(&core->wait_q);
- }
+ return 0;
}
static int msm_dcvs_update_freq(struct dcvs_core *core,
enum msm_dcvs_scm_event event, uint32_t param0,
- uint32_t *ret1)
+ uint32_t *ret1, int *freq_changed)
{
int ret = 0;
unsigned long flags = 0;
- uint32_t new_freq = -EINVAL;
-
- spin_lock_irqsave(&core->pending_freq_lock, flags);
+ uint32_t new_freq = 0;
- ret = msm_dcvs_scm_event(core->dcvs_core_id, event, param0,
+ spin_lock_irqsave(&core->cpu_lock, flags);
+ ret = msm_dcvs_scm_event(core->handle, event, param0,
core->actual_freq, &new_freq, ret1);
if (ret) {
- if (ret == -13)
- ret = 0;
- else
- __err("Error (%d) sending SCM event %d for core %s\n",
+ __err("Error (%d) sending SCM event %d for core %s\n",
ret, event, core->core_name);
- goto out;
- }
-
- if (new_freq == 0) {
- /*
- * sometimes TZ gives us a 0 freq back,
- * do not queue up a request
- */
- goto out;
+ goto freq_done;
+ }
+
+ if ((core->actual_freq != new_freq) &&
+ (core->new_freq[core->freq_pending] != new_freq)) {
+ if (core->freq_pending >= MAX_PENDING - 1)
+ core->freq_pending = MAX_PENDING - 1;
+ core->new_freq[core->freq_pending++] = new_freq;
+ core->time_start = ktime_to_ns(ktime_get());
+
+ /* Schedule the frequency change */
+ if (!core->task)
+ __err("Uninitialized task for core %s\n",
+ core->core_name);
+ else {
+ if (freq_changed)
+ *freq_changed = 1;
+ core->change_freq_activated = 1;
+ wake_up_process(core->task);
+ }
+ } else {
+ if (freq_changed)
+ *freq_changed = 0;
}
-
- request_freq_change(core, new_freq);
-
-out:
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+freq_done:
+ spin_unlock_irqrestore(&core->cpu_lock, flags);
return ret;
}
@@ -513,17 +300,19 @@ out:
static enum hrtimer_restart msm_dcvs_core_slack_timer(struct hrtimer *timer)
{
int ret = 0;
- struct dcvs_core *core = container_of(timer,
- struct dcvs_core, slack_timer);
+ struct dcvs_core *core = container_of(timer, struct dcvs_core, timer);
uint32_t ret1;
+ uint32_t ret2;
+
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
+ __info("Slack timer fired for core %s\n", core->core_name);
- trace_printk("dcvs: Slack timer fired for core=%s\n", core->core_name);
/**
* Timer expired, notify TZ
* Dont care about the third arg.
*/
ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_QOS_TIMER_EXPIRED, 0,
- &ret1);
+ &ret1, &ret2);
if (ret)
__err("Timer expired for core %s but failed to notify.\n",
core->core_name);
@@ -544,28 +333,6 @@ static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj, \
return snprintf(buf, PAGE_SIZE, "%d\n", v); \
}
-#define DCVS_PARAM_STORE(_name) \
-static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
- struct kobj_attribute *attr, char *buf) \
-{ \
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
- return snprintf(buf, PAGE_SIZE, "%d\n", core->info->_name); \
-} \
-static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, const char *buf, size_t count) \
-{ \
- int ret = 0; \
- uint32_t val = 0; \
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
- ret = kstrtouint(buf, 10, &val); \
- if (ret) { \
- __err("Invalid input %s for %s\n", buf, __stringify(_name));\
- } else { \
- core->info->_name = val; \
- } \
- return count; \
-}
-
#define DCVS_ALGO_PARAM(_name) \
static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
struct kobj_attribute *attr, char *buf) \
@@ -579,13 +346,14 @@ static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
int ret = 0; \
uint32_t val = 0; \
struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+ mutex_lock(&core->lock); \
ret = kstrtouint(buf, 10, &val); \
if (ret) { \
__err("Invalid input %s for %s\n", buf, __stringify(_name));\
} else { \
uint32_t old_val = core->algo_param._name; \
core->algo_param._name = val; \
- ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id, \
+ ret = msm_dcvs_scm_set_algo_params(core->handle, \
&core->algo_param); \
if (ret) { \
core->algo_param._name = old_val; \
@@ -593,37 +361,7 @@ static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
ret, val, __stringify(_name)); \
} \
} \
- return count; \
-}
-
-#define DCVS_ENERGY_PARAM(_name) \
-static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
- struct kobj_attribute *attr, char *buf) \
-{ \
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
- return snprintf(buf, PAGE_SIZE, "%d\n", core->coeffs._name); \
-} \
-static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, const char *buf, size_t count) \
-{ \
- int ret = 0; \
- int32_t val = 0; \
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
- ret = kstrtoint(buf, 10, &val); \
- if (ret) { \
- __err("Invalid input %s for %s\n", buf, __stringify(_name));\
- } else { \
- int32_t old_val = core->coeffs._name; \
- core->coeffs._name = val; \
- ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, \
- &core->info->power_param, &core->info->freq_tbl[0], \
- &core->coeffs); \
- if (ret) { \
- core->coeffs._name = old_val; \
- __err("Error(%d) in setting %d for coeffs param %s\n",\
- ret, val, __stringify(_name)); \
- } \
- } \
+ mutex_unlock(&core->lock); \
return count; \
}
@@ -645,110 +383,27 @@ static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
* Function declarations for different attributes.
* Gets used when setting the attribute show and store parameters.
*/
+DCVS_PARAM_SHOW(idle_enabled, (core->idle_driver != NULL))
+DCVS_PARAM_SHOW(freq_change_enabled, (core->freq_driver != NULL))
+DCVS_PARAM_SHOW(actual_freq, (core->actual_freq))
DCVS_PARAM_SHOW(freq_change_us, (core->freq_change_us))
+DCVS_PARAM_SHOW(max_time_us, (core->max_time_us))
+DCVS_ALGO_PARAM(slack_time_us)
+DCVS_ALGO_PARAM(scale_slack_time)
+DCVS_ALGO_PARAM(scale_slack_time_pct)
DCVS_ALGO_PARAM(disable_pc_threshold)
-DCVS_ALGO_PARAM(em_win_size_min_us)
-DCVS_ALGO_PARAM(em_win_size_max_us)
+DCVS_ALGO_PARAM(em_window_size)
DCVS_ALGO_PARAM(em_max_util_pct)
-DCVS_ALGO_PARAM(group_id)
-DCVS_ALGO_PARAM(max_freq_chg_time_us)
-DCVS_ALGO_PARAM(slack_mode_dynamic)
-DCVS_ALGO_PARAM(slack_time_min_us)
-DCVS_ALGO_PARAM(slack_time_max_us)
-DCVS_ALGO_PARAM(slack_weight_thresh_pct)
-DCVS_ALGO_PARAM(ss_no_corr_below_freq)
-DCVS_ALGO_PARAM(ss_win_size_min_us)
-DCVS_ALGO_PARAM(ss_win_size_max_us)
+DCVS_ALGO_PARAM(ss_window_size)
DCVS_ALGO_PARAM(ss_util_pct)
-
-DCVS_ENERGY_PARAM(active_coeff_a)
-DCVS_ENERGY_PARAM(active_coeff_b)
-DCVS_ENERGY_PARAM(active_coeff_c)
-DCVS_ENERGY_PARAM(leakage_coeff_a)
-DCVS_ENERGY_PARAM(leakage_coeff_b)
-DCVS_ENERGY_PARAM(leakage_coeff_c)
-DCVS_ENERGY_PARAM(leakage_coeff_d)
-
-DCVS_PARAM_STORE(thermal_poll_ms)
-
-static ssize_t msm_dcvs_attr_freq_tbl_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- struct msm_dcvs_freq_entry *freq_tbl;
- char *buf_idx = buf;
- int i, len;
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl);
-
- freq_tbl = core->info->freq_tbl;
- *buf_idx = '\0';
-
- /* limit the number of frequencies we will print into
- * the PAGE_SIZE sysfs show buffer. */
- if (core->info->power_param.num_freq > 64)
- return 0;
-
- for (i = 0; i < core->info->power_param.num_freq; i++) {
- if (freq_tbl[i].is_trans_level) {
- len = snprintf(buf_idx, 10, "%7d ", freq_tbl[i].freq);
- /* buf_idx always points at terminating null */
- buf_idx += len;
- }
- }
- /* overwrite final trailing space with newline */
- if (buf_idx > buf)
- *(buf_idx - 1) = '\n';
-
- return buf_idx - buf;
-}
-
-static ssize_t msm_dcvs_attr_freq_tbl_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct msm_dcvs_freq_entry *freq_tbl;
- uint32_t freq;
- int i, ret;
- struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl);
-
- freq_tbl = core->info->freq_tbl;
-
- ret = kstrtouint(buf, 10, &freq);
- if (ret) {
- __err("Invalid input %s for freq_tbl\n", buf);
- return count;
- }
-
- for (i = 0; i < core->info->power_param.num_freq; i++)
- if (freq_tbl[i].freq == freq) {
- freq_tbl[i].is_trans_level ^= 1;
- break;
- }
-
- if (i >= core->info->power_param.num_freq) {
- __err("Invalid frequency for freq_tbl: %d\n", freq);
- return count;
- }
-
- ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
- &core->info->power_param,
- &core->info->freq_tbl[0],
- &core->coeffs);
- if (ret) {
- freq_tbl[i].is_trans_level ^= 1;
- __err("Error %d in toggling freq %d (orig enable val %d)\n",
- ret, freq_tbl[i].freq, freq_tbl[i].is_trans_level);
- }
- return count;
-}
+DCVS_ALGO_PARAM(ss_iobusy_conv)
static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core)
{
int ret = 0;
struct kobject *core_kobj = NULL;
- const int attr_count = 25;
+ const int attr_count = 15;
BUG_ON(!cores_kobj);
@@ -760,35 +415,23 @@ static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core)
goto done;
}
- DCVS_RO_ATTRIB(0, freq_change_us);
-
- DCVS_RW_ATTRIB(1, disable_pc_threshold);
- DCVS_RW_ATTRIB(2, em_win_size_min_us);
- DCVS_RW_ATTRIB(3, em_win_size_max_us);
- DCVS_RW_ATTRIB(4, em_max_util_pct);
- DCVS_RW_ATTRIB(5, group_id);
- DCVS_RW_ATTRIB(6, max_freq_chg_time_us);
- DCVS_RW_ATTRIB(7, slack_mode_dynamic);
- DCVS_RW_ATTRIB(8, slack_weight_thresh_pct);
- DCVS_RW_ATTRIB(9, slack_time_min_us);
- DCVS_RW_ATTRIB(10, slack_time_max_us);
- DCVS_RW_ATTRIB(11, ss_no_corr_below_freq);
- DCVS_RW_ATTRIB(12, ss_win_size_min_us);
- DCVS_RW_ATTRIB(13, ss_win_size_max_us);
- DCVS_RW_ATTRIB(14, ss_util_pct);
-
- DCVS_RW_ATTRIB(15, active_coeff_a);
- DCVS_RW_ATTRIB(16, active_coeff_b);
- DCVS_RW_ATTRIB(17, active_coeff_c);
- DCVS_RW_ATTRIB(18, leakage_coeff_a);
- DCVS_RW_ATTRIB(19, leakage_coeff_b);
- DCVS_RW_ATTRIB(20, leakage_coeff_c);
- DCVS_RW_ATTRIB(21, leakage_coeff_d);
- DCVS_RW_ATTRIB(22, thermal_poll_ms);
-
- DCVS_RW_ATTRIB(23, freq_tbl);
-
- core->attrib.attrib_group.attrs[24] = NULL;
+ DCVS_RO_ATTRIB(0, idle_enabled);
+ DCVS_RO_ATTRIB(1, freq_change_enabled);
+ DCVS_RO_ATTRIB(2, actual_freq);
+ DCVS_RO_ATTRIB(3, freq_change_us);
+ DCVS_RO_ATTRIB(4, max_time_us);
+
+ DCVS_RW_ATTRIB(5, slack_time_us);
+ DCVS_RW_ATTRIB(6, scale_slack_time);
+ DCVS_RW_ATTRIB(7, scale_slack_time_pct);
+ DCVS_RW_ATTRIB(8, disable_pc_threshold);
+ DCVS_RW_ATTRIB(9, em_window_size);
+ DCVS_RW_ATTRIB(10, em_max_util_pct);
+ DCVS_RW_ATTRIB(11, ss_window_size);
+ DCVS_RW_ATTRIB(12, ss_util_pct);
+ DCVS_RW_ATTRIB(13, ss_iobusy_conv);
+
+ core->attrib.attrib_group.attrs[14] = NULL;
core_kobj = kobject_create_and_add(core->core_name, cores_kobj);
if (!core_kobj) {
@@ -799,6 +442,8 @@ static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core)
ret = sysfs_create_group(core_kobj, &core->attrib.attrib_group);
if (ret)
__err("Cannot create core %s attr group\n", core->core_name);
+ else if (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER)
+ __info("Setting up attributes for core %s\n", core->core_name);
done:
if (ret) {
@@ -809,337 +454,273 @@ done:
return ret;
}
-static int get_core_offset(enum msm_dcvs_core_type type, int num)
-{
- int offset = -EINVAL;
-
- switch (type) {
- case MSM_DCVS_CORE_TYPE_CPU:
- offset = CPU_OFFSET + num;
- BUG_ON(offset >= GPU_OFFSET);
- break;
- case MSM_DCVS_CORE_TYPE_GPU:
- offset = GPU_OFFSET + num;
- BUG_ON(offset >= CORES_MAX);
- break;
- default:
- BUG();
- }
-
- return offset;
-}
-
-/* Return the core and initialize non platform data specific numbers in it */
-static struct dcvs_core *msm_dcvs_add_core(enum msm_dcvs_core_type type,
- int num)
+/* Return the core if found or add to list if @add_to_list is true */
+static struct dcvs_core *msm_dcvs_get_core(const char *name, int add_to_list)
{
struct dcvs_core *core = NULL;
int i;
- char name[CORE_NAME_MAX];
-
- i = get_core_offset(type, num);
- if (i < 0)
- return NULL;
-
- if (type == MSM_DCVS_CORE_TYPE_CPU)
- snprintf(name, CORE_NAME_MAX, "cpu%d", num);
- else
- snprintf(name, CORE_NAME_MAX, "gpu%d", num);
-
- core = &core_list[i];
- core->dcvs_core_id = i;
- strlcpy(core->core_name, name, CORE_NAME_MAX);
- spin_lock_init(&core->pending_freq_lock);
- spin_lock_init(&core->idle_state_change_lock);
- hrtimer_init(&core->slack_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- core->slack_timer.function = msm_dcvs_core_slack_timer;
- return core;
-}
-
-/* Return the core if found or add to list if @add_to_list is true */
-static struct dcvs_core *msm_dcvs_get_core(int offset)
-{
- /* if the handle is still not set bug */
- BUG_ON(core_list[offset].dcvs_core_id == -1);
- return &core_list[offset];
-}
+ int empty = -1;
-void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage)
-{
- BUG_ON(freq == 0 || voltage == 0 ||
- num_cpu_freqs == DCVS_MAX_NUM_FREQS);
-
- cpu_freq_tbl[num_cpu_freqs].freq = freq;
- cpu_freq_tbl[num_cpu_freqs].voltage = voltage;
+ if (!name[0] ||
+ (strnlen(name, CORE_NAME_MAX - 1) == CORE_NAME_MAX - 1))
+ return core;
- num_cpu_freqs++;
-}
-
-static void update_cpu_dcvs_params(struct msm_dcvs_core_info *info)
-{
- int i;
-
- BUG_ON(num_cpu_freqs == 0);
+ mutex_lock(&core_list_lock);
+ for (i = 0; i < CORES_MAX; i++) {
+ core = &core_list[i];
+ if ((empty < 0) && !core->core_name[0]) {
+ empty = i;
+ continue;
+ }
+ if (!strncmp(name, core->core_name, CORE_NAME_MAX))
+ break;
+ }
- info->freq_tbl = cpu_freq_tbl;
- info->power_param.num_freq = num_cpu_freqs;
+ /* Check for core_list full */
+ if ((i == CORES_MAX) && (empty < 0)) {
+ mutex_unlock(&core_list_lock);
+ return NULL;
+ }
- if (!dcvs_pdata || dcvs_pdata->num_sync_rules == 0)
- return;
+ if (i == CORES_MAX && add_to_list) {
+ core = &core_list[empty];
+ strlcpy(core->core_name, name, CORE_NAME_MAX);
+ mutex_init(&core->lock);
+ spin_lock_init(&core->cpu_lock);
+ core->handle = empty + CORE_HANDLE_OFFSET;
+ hrtimer_init(&core->timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ core->timer.function = msm_dcvs_core_slack_timer;
+ }
+ mutex_unlock(&core_list_lock);
- /* the first sync rule shows what the turbo frequencies are -
- * these frequencies need energy offsets set */
- for (i = 0; i < DCVS_MAX_NUM_FREQS && cpu_freq_tbl[i].freq != 0; i++)
- if (cpu_freq_tbl[i].freq > dcvs_pdata->sync_rules[0].cpu_khz) {
- cpu_freq_tbl[i].active_energy_offset = 100;
- cpu_freq_tbl[i].leakage_energy_offset = 100;
- }
+ return core;
}
-int msm_dcvs_register_core(
- enum msm_dcvs_core_type type,
- int type_core_num,
- struct msm_dcvs_core_info *info,
- int (*set_frequency)(int type_core_num, unsigned int freq),
- unsigned int (*get_frequency)(int type_core_num),
- int (*idle_enable)(int type_core_num,
- enum msm_core_control_event event),
- int (*set_floor_frequency)(int type_core_num, unsigned int freq),
- int sensor)
+int msm_dcvs_register_core(const char *core_name, uint32_t group_id,
+ struct msm_dcvs_core_info *info)
{
int ret = -EINVAL;
- int offset;
struct dcvs_core *core = NULL;
- uint32_t ret1;
- uint32_t ret2;
- offset = get_core_offset(type, type_core_num);
- if (offset < 0)
+ if (!core_name || !core_name[0])
return ret;
- if (core_list[offset].dcvs_core_id != -1)
- return core_list[offset].dcvs_core_id;
- core = msm_dcvs_add_core(type, type_core_num);
+ core = msm_dcvs_get_core(core_name, true);
if (!core)
return ret;
- core->type = type;
- core->type_core_num = type_core_num;
- core->set_frequency = set_frequency;
- core->get_frequency = get_frequency;
- core->idle_enable = idle_enable;
- core->set_floor_frequency = set_floor_frequency;
- core->pending_freq = STOP_FREQ_CHANGE;
-
- core->info = info;
- if (type == MSM_DCVS_CORE_TYPE_CPU)
- update_cpu_dcvs_params(info);
+ mutex_lock(&core->lock);
+ if (group_id) {
+ /**
+ * Create a group for cores, if this core is part of a group
+ * if the group_id is 0, the core is not part of a group.
+ * If the group_id already exits, it will through an error
+ * which we will ignore.
+ */
+ ret = msm_dcvs_scm_create_group(group_id);
+ if (ret == -ENOMEM)
+ goto bail;
+ }
+ core->group_id = group_id;
+ core->max_time_us = info->core_param.max_time_us;
memcpy(&core->algo_param, &info->algo_param,
sizeof(struct msm_dcvs_algo_param));
- memcpy(&core->coeffs, &info->energy_coeffs,
- sizeof(struct msm_dcvs_energy_curve_coeffs));
-
- /*
- * The tz expects cpu0 to represent bit 0 in the mask, however the
- * dcvs_core_id needs to start from 1, dcvs_core_id = 0 is used to
- * indicate that this request is not associated with any core.
- * mpdecision
- */
- info->core_param.core_bitmask_id
- = 1 << (core->dcvs_core_id - CPU_OFFSET);
- core->sensor = sensor;
-
- ret = msm_dcvs_scm_register_core(core->dcvs_core_id, &info->core_param);
- if (ret) {
- __err("%s: scm register core fail handle = %d ret = %d\n",
- __func__, core->dcvs_core_id, ret);
- goto bail;
- }
-
- ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id,
- &info->algo_param);
- if (ret) {
- __err("%s: scm algo params failed ret = %d\n", __func__, ret);
- goto bail;
- }
-
- ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
- &info->power_param,
- &info->freq_tbl[0], &core->coeffs);
- if (ret) {
- __err("%s: scm power params failed ret = %d\n", __func__, ret);
+ ret = msm_dcvs_scm_register_core(core->handle, group_id,
+ &info->core_param, info->freq_tbl);
+ if (ret)
goto bail;
- }
- ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE,
- core->actual_freq, 0, &ret1, &ret2);
+ ret = msm_dcvs_scm_set_algo_params(core->handle, &info->algo_param);
if (ret)
goto bail;
ret = msm_dcvs_setup_core_sysfs(core);
if (ret) {
__err("Unable to setup core %s sysfs\n", core->core_name);
+ core_handles[core->handle - CORE_HANDLE_OFFSET] = NULL;
goto bail;
}
- core->idle_entered = -1;
- init_waitqueue_head(&core->wait_q);
- core->task = kthread_run(msm_dcvs_do_freq, (void *)core,
- "msm_dcvs/%d", core->dcvs_core_id);
- ret = core->dcvs_core_id;
-
- INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work);
- schedule_delayed_work(&core->temperature_work,
- msecs_to_jiffies(info->thermal_poll_ms));
- return ret;
+
bail:
- core->dcvs_core_id = -1;
- return -EINVAL;
+ mutex_unlock(&core->lock);
+ return ret;
}
EXPORT_SYMBOL(msm_dcvs_register_core);
-void msm_dcvs_update_limits(int dcvs_core_id)
+int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv)
{
- struct dcvs_core *core;
+ int ret = -EINVAL;
+ struct dcvs_core *core = NULL;
+ uint32_t ret1;
+ uint32_t ret2;
+
+ if (!drv || !drv->core_name)
+ return ret;
- if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
- __err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
- __func__, dcvs_core_id);
- return;
+ core = msm_dcvs_get_core(drv->core_name, true);
+ if (!core)
+ return ret;
+
+ mutex_lock(&core->lock);
+ if (core->freq_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
+ __info("Frequency notifier for %s being replaced\n",
+ core->core_name);
+ core->freq_driver = drv;
+ core->task = kthread_create(msm_dcvs_do_freq, (void *)core,
+ "msm_dcvs/%d", core->handle);
+ if (IS_ERR(core->task)) {
+ mutex_unlock(&core->lock);
+ return -EFAULT;
+ }
+
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Enabling idle pulse for %s\n", core->core_name);
+
+ if (core->idle_driver) {
+ core->actual_freq = core->freq_driver->get_frequency(drv);
+ /* Notify TZ to start receiving idle info for the core */
+ ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_ENABLE_CORE, 1,
+ &ret1, &ret2);
+ core->idle_driver->enable(core->idle_driver,
+ MSM_DCVS_ENABLE_IDLE_PULSE);
}
- core = msm_dcvs_get_core(dcvs_core_id);
- core->actual_freq = core->get_frequency(core->type_core_num);
+ mutex_unlock(&core->lock);
+
+ return core->handle;
}
+EXPORT_SYMBOL(msm_dcvs_freq_sink_register);
-int msm_dcvs_freq_sink_start(int dcvs_core_id)
+int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv)
{
int ret = -EINVAL;
struct dcvs_core *core = NULL;
uint32_t ret1;
- unsigned long flags;
- int new_freq;
- int timer_interval_us;
-
- if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
- __err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
- __func__, dcvs_core_id);
- return -EINVAL;
- }
+ uint32_t ret2;
- core = msm_dcvs_get_core(dcvs_core_id);
- if (!core)
+ if (!drv || !drv->core_name)
return ret;
- core->actual_freq = core->get_frequency(core->type_core_num);
+ core = msm_dcvs_get_core(drv->core_name, false);
+ if (!core)
+ return ret;
- spin_lock_irqsave(&core->pending_freq_lock, flags);
- /* mark that we are ready to accept new frequencies */
- request_freq_change(core, NO_OUTSTANDING_FREQ_CHANGE);
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+ mutex_lock(&core->lock);
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Disabling idle pulse for %s\n", core->core_name);
+ if (core->idle_driver) {
+ core->idle_driver->enable(core->idle_driver,
+ MSM_DCVS_DISABLE_IDLE_PULSE);
+ /* Notify TZ to stop receiving idle info for the core */
+ ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_ENABLE_CORE, 0,
+ &ret1, &ret2);
+ hrtimer_cancel(&core->timer);
+ core->idle_driver->enable(core->idle_driver,
+ MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Enabling LPM for %s\n", core->core_name);
+ }
+ core->freq_pending = 0;
+ core->freq_driver = NULL;
+ mutex_unlock(&core->lock);
+ kthread_stop(core->task);
- spin_lock_irqsave(&core->idle_state_change_lock, flags);
- core->idle_entered = -1;
- spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(msm_dcvs_freq_sink_unregister);
- /* Notify TZ to start receiving idle info for the core */
- ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 1, &ret1);
+int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv)
+{
+ int ret = -EINVAL;
+ struct dcvs_core *core = NULL;
- ret = msm_dcvs_scm_event(
- core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE, core->actual_freq,
- 0, &new_freq, &timer_interval_us);
- if (ret)
- __err("Error (%d) DCVS sending online for %s\n",
- ret, core->core_name);
+ if (!drv || !drv->core_name)
+ return ret;
- if (new_freq != 0) {
- spin_lock_irqsave(&core->pending_freq_lock, flags);
- request_freq_change(core, new_freq);
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
- }
- force_start_slack_timer(core, timer_interval_us);
+ core = msm_dcvs_get_core(drv->core_name, true);
+ if (!core)
+ return ret;
+ mutex_lock(&core->lock);
+ if (core->idle_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
+ __info("Idle notifier for %s being replaced\n",
+ core->core_name);
+ core->idle_driver = drv;
+ mutex_unlock(&core->lock);
- core->idle_enable(core->type_core_num, MSM_DCVS_ENABLE_IDLE_PULSE);
- return 0;
+ return core->handle;
}
-EXPORT_SYMBOL(msm_dcvs_freq_sink_start);
+EXPORT_SYMBOL(msm_dcvs_idle_source_register);
-int msm_dcvs_freq_sink_stop(int dcvs_core_id)
+int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv)
{
int ret = -EINVAL;
struct dcvs_core *core = NULL;
- uint32_t ret1;
- uint32_t freq;
- unsigned long flags;
- if (dcvs_core_id < 0 || dcvs_core_id > CORES_MAX) {
- pr_err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
- __func__, dcvs_core_id);
- return -EINVAL;
- }
+ if (!drv || !drv->core_name)
+ return ret;
- core = msm_dcvs_get_core(dcvs_core_id);
- if (!core) {
- __err("couldn't find core for coreid = %d\n", dcvs_core_id);
+ core = msm_dcvs_get_core(drv->core_name, false);
+ if (!core)
return ret;
- }
- core->idle_enable(core->type_core_num, MSM_DCVS_DISABLE_IDLE_PULSE);
- /* Notify TZ to stop receiving idle info for the core */
- ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_DCVS_ENABLE,
- 0, core->actual_freq, &freq, &ret1);
- core->idle_enable(core->type_core_num,
- MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
- spin_lock_irqsave(&core->pending_freq_lock, flags);
- /* flush out all the pending freq changes */
- request_freq_change(core, STOP_FREQ_CHANGE);
- spin_unlock_irqrestore(&core->pending_freq_lock, flags);
- force_stop_slack_timer(core);
+ mutex_lock(&core->lock);
+ core->idle_driver = NULL;
+ mutex_unlock(&core->lock);
return 0;
}
-EXPORT_SYMBOL(msm_dcvs_freq_sink_stop);
+EXPORT_SYMBOL(msm_dcvs_idle_source_unregister);
-int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
- uint32_t iowaited)
+int msm_dcvs_idle(int handle, enum msm_core_idle_state state, uint32_t iowaited)
{
int ret = 0;
struct dcvs_core *core = NULL;
uint32_t timer_interval_us = 0;
uint32_t r0, r1;
+ uint32_t freq_changed = 0;
- if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
- pr_err("invalid dcvs_core_id = %d ret -EINVAL\n", dcvs_core_id);
- return -EINVAL;
- }
+ if (handle >= CORE_HANDLE_OFFSET &&
+ (handle - CORE_HANDLE_OFFSET) < CORES_MAX)
+ core = &core_list[handle - CORE_HANDLE_OFFSET];
+
+ BUG_ON(!core);
- core = msm_dcvs_get_core(dcvs_core_id);
+ if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
+ __info("Core %s idle state %d\n", core->core_name, state);
switch (state) {
case MSM_DCVS_IDLE_ENTER:
- stop_slack_timer(core);
- ret = msm_dcvs_scm_event(core->dcvs_core_id,
+ hrtimer_cancel(&core->timer);
+ ret = msm_dcvs_scm_event(core->handle,
MSM_DCVS_SCM_IDLE_ENTER, 0, 0, &r0, &r1);
- if (ret < 0 && ret != -13)
+ if (ret)
__err("Error (%d) sending idle enter for %s\n",
ret, core->core_name);
- trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 1);
break;
case MSM_DCVS_IDLE_EXIT:
+ hrtimer_cancel(&core->timer);
ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_IDLE_EXIT,
- iowaited, &timer_interval_us);
+ iowaited, &timer_interval_us, &freq_changed);
if (ret)
__err("Error (%d) sending idle exit for %s\n",
ret, core->core_name);
- start_slack_timer(core, timer_interval_us);
- trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 0);
- trace_msm_dcvs_iowait("iowait", core->core_name, iowaited);
- trace_msm_dcvs_slack_time("slack_timer_dcvs", core->core_name,
- timer_interval_us);
+ /* only start slack timer if change_freq won't */
+ if (freq_changed || core->change_freq_activated)
+ break;
+ if (timer_interval_us && !core->timer_disabled) {
+ ret = hrtimer_start(&core->timer,
+ ktime_set(0, timer_interval_us * 1000),
+ HRTIMER_MODE_REL_PINNED);
+
+ if (ret)
+ __err("Failed to register timer for core %s\n",
+ core->core_name);
+ }
break;
}
@@ -1174,6 +755,13 @@ static int __init msm_dcvs_late_init(void)
goto err;
}
+ if (!debugfs_create_u32("debug_mask", S_IRUGO | S_IWUSR,
+ debugfs_base, &msm_dcvs_debug)) {
+ __err("Cannot create debugfs entry %s\n", "debug_mask");
+ ret = -ENOMEM;
+ goto err;
+ }
+
err:
if (ret) {
kobject_del(cores_kobj);
@@ -1185,45 +773,19 @@ err:
}
late_initcall(msm_dcvs_late_init);
-static int __devinit dcvs_probe(struct platform_device *pdev)
-{
- if (pdev->dev.platform_data)
- dcvs_pdata = pdev->dev.platform_data;
-
- return 0;
-}
-
-static struct platform_driver dcvs_driver = {
- .probe = dcvs_probe,
- .driver = {
- .name = "dcvs",
- .owner = THIS_MODULE,
- },
-};
-
static int __init msm_dcvs_early_init(void)
{
int ret = 0;
- int i;
-
- platform_driver_register(&dcvs_driver);
if (!msm_dcvs_enabled) {
__info("Not enabled (%d)\n", msm_dcvs_enabled);
return 0;
}
-
- /* Only need about 32kBytes for normal operation */
- ret = msm_dcvs_scm_init(SZ_32K);
- if (ret) {
+ ret = msm_dcvs_scm_init(10 * 1024);
+ if (ret)
__err("Unable to initialize DCVS err=%d\n", ret);
- goto done;
- }
- for (i = 0; i < CORES_MAX; i++)
- core_list[i].dcvs_core_id = -1;
-done:
return ret;
}
postcore_initcall(msm_dcvs_early_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_idle.c b/arch/arm/mach-msm/msm_dcvs_idle.c
new file mode 100644
index 0000000..179e170
--- /dev/null
+++ b/arch/arm/mach-msm/msm_dcvs_idle.c
@@ -0,0 +1,170 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <mach/msm_dcvs.h>
+
+struct cpu_idle_info {
+ int cpu;
+ int enabled;
+ int handle;
+ struct msm_dcvs_idle dcvs_notifier;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
+static char core_name[NR_CPUS][10];
+static struct pm_qos_request qos_req;
+static uint32_t latency;
+
+static int msm_dcvs_idle_notifier(struct msm_dcvs_idle *self,
+ enum msm_core_control_event event)
+{
+ struct cpu_idle_info *info = container_of(self,
+ struct cpu_idle_info, dcvs_notifier);
+
+ switch (event) {
+ case MSM_DCVS_ENABLE_IDLE_PULSE:
+ info->enabled = true;
+ break;
+
+ case MSM_DCVS_DISABLE_IDLE_PULSE:
+ info->enabled = false;
+ break;
+
+ case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
+ pm_qos_update_request(&qos_req, PM_QOS_DEFAULT_VALUE);
+ break;
+
+ case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
+ pm_qos_update_request(&qos_req, latency);
+ break;
+ }
+
+ return 0;
+}
+
+static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ struct cpu_idle_info *info =
+ &per_cpu(cpu_idle_info, smp_processor_id());
+ u64 io_wait_us = 0;
+ u64 prev_io_wait_us = 0;
+ u64 last_update_time = 0;
+ u64 val = 0;
+ uint32_t iowaited = 0;
+
+ if (!info->enabled)
+ return NOTIFY_OK;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ val = get_cpu_iowait_time_us(smp_processor_id(),
+ &last_update_time);
+ /* val could be -1 when NOHZ is not enabled */
+ if (val == (u64)-1)
+ val = 0;
+ per_cpu(iowait_on_cpu, smp_processor_id()) = val;
+ msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_ENTER, 0);
+ break;
+
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
+ val = get_cpu_iowait_time_us(smp_processor_id(),
+ &last_update_time);
+ if (val == (u64)-1)
+ val = 0;
+ io_wait_us = val;
+ iowaited = (io_wait_us - prev_io_wait_us);
+ msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_EXIT, iowaited);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block idle_nb = {
+ .notifier_call = msm_cpuidle_notifier,
+};
+
+static int msm_dcvs_idle_probe(struct platform_device *pdev)
+{
+ int cpu;
+ struct cpu_idle_info *info = NULL;
+ struct msm_dcvs_idle *inotify = NULL;
+
+ for_each_possible_cpu(cpu) {
+ info = &per_cpu(cpu_idle_info, cpu);
+ info->cpu = cpu;
+ inotify = &info->dcvs_notifier;
+ snprintf(core_name[cpu], 10, "cpu%d", cpu);
+ inotify->core_name = core_name[cpu];
+ inotify->enable = msm_dcvs_idle_notifier;
+ info->handle = msm_dcvs_idle_source_register(inotify);
+ BUG_ON(info->handle < 0);
+ }
+
+ latency = *((uint32_t *)pdev->dev.platform_data);
+ pm_qos_add_request(&qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ return cpu_pm_register_notifier(&idle_nb);
+}
+
+static int msm_dcvs_idle_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ int rc = 0;
+ int cpu = 0;
+ struct msm_dcvs_idle *inotify = NULL;
+ struct cpu_idle_info *info = NULL;
+
+ rc = cpu_pm_unregister_notifier(&idle_nb);
+
+ for_each_possible_cpu(cpu) {
+ info = &per_cpu(cpu_idle_info, cpu);
+ inotify = &info->dcvs_notifier;
+ ret = msm_dcvs_idle_source_unregister(inotify);
+ if (ret) {
+ rc = -EFAULT;
+ pr_err("Error de-registering core %d idle notifier.\n",
+ cpu);
+ }
+ }
+
+ return rc;
+}
+
+static struct platform_driver idle_pdrv = {
+ .probe = msm_dcvs_idle_probe,
+ .remove = __devexit_p(msm_dcvs_idle_remove),
+ .driver = {
+ .name = "msm_cpu_idle",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int msm_dcvs_idle_init(void)
+{
+ return platform_driver_register(&idle_pdrv);
+}
+late_initcall(msm_dcvs_idle_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_scm.c b/arch/arm/mach-msm/msm_dcvs_scm.c
index df6c44f..6095e08 100644
--- a/arch/arm/mach-msm/msm_dcvs_scm.c
+++ b/arch/arm/mach-msm/msm_dcvs_scm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,15 +21,17 @@
#include <mach/scm.h>
#include <mach/msm_dcvs_scm.h>
+#define DCVS_CMD_CREATE_GROUP 1
#define DCVS_CMD_REGISTER_CORE 2
#define DCVS_CMD_SET_ALGO_PARAM 3
#define DCVS_CMD_EVENT 4
#define DCVS_CMD_INIT 5
-#define DCVS_CMD_SET_POWER_PARAM 6
struct scm_register_core {
uint32_t core_id;
+ uint32_t group_id;
phys_addr_t core_param_phy;
+ phys_addr_t freq_phy;
};
struct scm_algo {
@@ -42,21 +44,6 @@ struct scm_init {
uint32_t size;
};
-struct scm_pwr_param {
- uint32_t core_id;
- phys_addr_t pwr_param_phy;
- phys_addr_t freq_phy;
- phys_addr_t coeffs_phy;
-};
-
-struct msm_algo_param {
- enum msm_dcvs_algo_param_type type;
- union {
- struct msm_dcvs_algo_param dcvs_param;
- struct msm_mpd_algo_param mpd_param;
- } u;
-};
-
int msm_dcvs_scm_init(size_t size)
{
int ret = 0;
@@ -82,25 +69,49 @@ int msm_dcvs_scm_init(size_t size)
}
EXPORT_SYMBOL(msm_dcvs_scm_init);
-int msm_dcvs_scm_register_core(uint32_t core_id,
- struct msm_dcvs_core_param *param)
+int msm_dcvs_scm_create_group(uint32_t id)
+{
+ int ret = 0;
+
+ ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_CREATE_GROUP,
+ &id, sizeof(uint32_t), NULL, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_dcvs_scm_create_group);
+
+int msm_dcvs_scm_register_core(uint32_t core_id, uint32_t group_id,
+ struct msm_dcvs_core_param *param,
+ struct msm_dcvs_freq_entry *freq)
{
int ret = 0;
struct scm_register_core reg_data;
struct msm_dcvs_core_param *p = NULL;
+ struct msm_dcvs_freq_entry *f = NULL;
p = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_core_param)), GFP_KERNEL);
if (!p)
return -ENOMEM;
+ f = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_freq_entry) *
+ param->num_freq), GFP_KERNEL);
+ if (!f) {
+ kfree(p);
+ return -ENOMEM;
+ }
+
memcpy(p, param, sizeof(struct msm_dcvs_core_param));
+ memcpy(f, freq, sizeof(struct msm_dcvs_freq_entry) * param->num_freq);
reg_data.core_id = core_id;
+ reg_data.group_id = group_id;
reg_data.core_param_phy = virt_to_phys(p);
+ reg_data.freq_phy = virt_to_phys(f);
ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_REGISTER_CORE,
&reg_data, sizeof(reg_data), NULL, 0);
+ kfree(f);
kfree(p);
return ret;
@@ -112,14 +123,13 @@ int msm_dcvs_scm_set_algo_params(uint32_t core_id,
{
int ret = 0;
struct scm_algo algo;
- struct msm_algo_param *p = NULL;
+ struct msm_dcvs_algo_param *p = NULL;
- p = kzalloc(PAGE_ALIGN(sizeof(struct msm_algo_param)), GFP_KERNEL);
+ p = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_algo_param)), GFP_KERNEL);
if (!p)
return -ENOMEM;
- p->type = MSM_DCVS_ALGO_DCVS_PARAM;
- memcpy(&p->u.dcvs_param, param, sizeof(struct msm_dcvs_algo_param));
+ memcpy(p, param, sizeof(struct msm_dcvs_algo_param));
algo.core_id = core_id;
algo.algo_phy = virt_to_phys(p);
@@ -133,85 +143,6 @@ int msm_dcvs_scm_set_algo_params(uint32_t core_id,
}
EXPORT_SYMBOL(msm_dcvs_scm_set_algo_params);
-int msm_mpd_scm_set_algo_params(struct msm_mpd_algo_param *param)
-{
- int ret = 0;
- struct scm_algo algo;
- struct msm_algo_param *p = NULL;
-
- p = kzalloc(PAGE_ALIGN(sizeof(struct msm_algo_param)), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->type = MSM_DCVS_ALGO_MPD_PARAM;
- memcpy(&p->u.mpd_param, param, sizeof(struct msm_mpd_algo_param));
-
- algo.core_id = 0;
- algo.algo_phy = virt_to_phys(p);
-
- ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_SET_ALGO_PARAM,
- &algo, sizeof(algo), NULL, 0);
-
- kfree(p);
-
- return ret;
-}
-EXPORT_SYMBOL(msm_mpd_scm_set_algo_params);
-
-int msm_dcvs_scm_set_power_params(uint32_t core_id,
- struct msm_dcvs_power_params *pwr_param,
- struct msm_dcvs_freq_entry *freq_entry,
- struct msm_dcvs_energy_curve_coeffs *coeffs)
-{
- int ret = 0;
- struct scm_pwr_param pwr;
- struct msm_dcvs_power_params *pwrt = NULL;
- struct msm_dcvs_freq_entry *freqt = NULL;
- struct msm_dcvs_energy_curve_coeffs *coefft = NULL;
-
- pwrt = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_power_params)),
- GFP_KERNEL);
- if (!pwrt)
- return -ENOMEM;
-
- freqt = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_freq_entry)
- * pwr_param->num_freq),
- GFP_KERNEL);
- if (!freqt) {
- kfree(pwrt);
- return -ENOMEM;
- }
-
- coefft = kzalloc(PAGE_ALIGN(
- sizeof(struct msm_dcvs_energy_curve_coeffs)),
- GFP_KERNEL);
- if (!coefft) {
- kfree(pwrt);
- kfree(freqt);
- return -ENOMEM;
- }
-
- memcpy(pwrt, pwr_param, sizeof(struct msm_dcvs_power_params));
- memcpy(freqt, freq_entry,
- sizeof(struct msm_dcvs_freq_entry)*pwr_param->num_freq);
- memcpy(coefft, coeffs, sizeof(struct msm_dcvs_energy_curve_coeffs));
-
- pwr.core_id = core_id;
- pwr.pwr_param_phy = virt_to_phys(pwrt);
- pwr.freq_phy = virt_to_phys(freqt);
- pwr.coeffs_phy = virt_to_phys(coefft);
-
- ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_SET_POWER_PARAM,
- &pwr, sizeof(pwr), NULL, 0);
-
- kfree(pwrt);
- kfree(freqt);
- kfree(coefft);
-
- return ret;
-}
-EXPORT_SYMBOL(msm_dcvs_scm_set_power_params);
-
int msm_dcvs_scm_event(uint32_t core_id,
enum msm_dcvs_scm_event event_id,
uint32_t param0, uint32_t param1,
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 48df2d0..2932934 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -28,8 +28,8 @@
#include <mach/msm_iomap.h>
#include "pm.h"
-#define CORE_RESET_BASE 0xA8600590
-#define MSM_CORE_STATUS_MSK 0x02800000
+#define MSM_CORE1_RESET 0xA8600590
+#define MSM_CORE1_STATUS_MSK 0x02800000
/*
* control for which core is the next to come out of the secondary
@@ -138,8 +138,8 @@ static int __cpuinit msm8625_release_secondary(void)
timeout = jiffies + usecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
value = __raw_readl(MSM_CFG_CTL_BASE + 0x3c);
- if ((value & MSM_CORE_STATUS_MSK) ==
- MSM_CORE_STATUS_MSK)
+ if ((value & MSM_CORE1_STATUS_MSK) ==
+ MSM_CORE1_STATUS_MSK)
break;
udelay(1);
}
@@ -149,7 +149,7 @@ static int __cpuinit msm8625_release_secondary(void)
return -ENODEV;
}
- base_ptr = ioremap_nocache(CORE_RESET_BASE, SZ_4);
+ base_ptr = ioremap_nocache(MSM_CORE1_RESET, SZ_4);
if (!base_ptr)
return -ENODEV;
/* Reset core 1 out of reset */
@@ -276,10 +276,10 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* Write the address of secondary startup into the
* boot remapper register. The secondary CPU branches to this address.
*/
- __raw_writel(MSM8625_CPU_PHYS, (MSM_CFG_CTL_BASE + 0x34));
+ __raw_writel(MSM8625_SECONDARY_PHYS, (MSM_CFG_CTL_BASE + 0x34));
mb();
- second_ptr = ioremap_nocache(MSM8625_CPU_PHYS, SZ_8);
+ second_ptr = ioremap_nocache(MSM8625_SECONDARY_PHYS, SZ_8);
if (!second_ptr) {
pr_err("failed to ioremap for secondary core\n");
return;
diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c
index 462485d..4629007 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.c
+++ b/arch/arm/mach-msm/qdsp5/adsp.c
@@ -3,7 +3,7 @@
* Register/Interrupt access for userspace aDSP library.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -1501,9 +1501,7 @@ static int __init adsp_init(void)
if (msm_adsp_probe_work_queue == NULL)
return -ENOMEM;
msm_adsp_driver.driver.name = msm_adsp_driver_name;
- preempt_disable();
rc = platform_driver_register(&msm_adsp_driver);
- preempt_enable();
MM_INFO("%s -- %d\n", msm_adsp_driver_name, rc);
return rc;
}
diff --git a/arch/arm/mach-msm/qdsp5/adsp.h b/arch/arm/mach-msm/qdsp5/adsp.h
index 0922d88..ac2f0be 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.h
+++ b/arch/arm/mach-msm/qdsp5/adsp.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/qdsp5/adsp.h
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2010, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2010, 2012 Code Aurora Forum. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -20,7 +20,7 @@
#include <linux/types.h>
#include <linux/msm_adsp.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_rpcrouter.h>
#include <mach/msm_adsp.h>
diff --git a/arch/arm/mach-msm/qdsp5/adsp_debug.c b/arch/arm/mach-msm/qdsp5/adsp_debug.c
index ccddd43..03deab9 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_debug.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_driver.c b/arch/arm/mach-msm/qdsp5/adsp_driver.c
index eb9c388..4fe1a53 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_driver.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_driver.c
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/qdsp5/adsp_driver.c
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2009, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, 2012 Code Aurora Forum. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -138,7 +138,7 @@ static int get_ion_region_info(int fd, struct adsp_ion_region *region)
pr_err("%s: could not get flags for the handle\n", __func__);
goto flag_error;
}
- temp_ptr = ion_map_kernel(region->client, region->handle);
+ temp_ptr = ion_map_kernel(region->client, region->handle, ionflag);
if (IS_ERR_OR_NULL(temp_ptr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
@@ -266,7 +266,7 @@ int adsp_ion_do_cache_op(struct msm_adsp_module *module,
module->name, vaddr, len);
return ret;
}
- if ((region->ion_flag == ION_FLAG_CACHED) && region->handle) {
+ if ((region->ion_flag == CACHED) && region->handle) {
len = ((((len) + 31) & (~31)) + 32);
ret = msm_ion_do_cache_op(region->client, region->handle,
(void *)paddr, len, cmd);
diff --git a/arch/arm/mach-msm/qdsp5/adsp_info.c b/arch/arm/mach-msm/qdsp5/adsp_info.c
index 69a2d18..dea52bb 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_info.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_info.c
@@ -1,6 +1,6 @@
/* arch/arm/mach-msm/adsp_info.c
*
- * Copyright (c) 2008-2009, 2011-2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2011-2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c b/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c
index 768ac31..8fb2e06 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c
@@ -3,7 +3,7 @@
* Verification code for aDSP JPEG events.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c
index a5dd4ad..87d5dc3 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c
@@ -3,7 +3,7 @@
* Verification code for aDSP JPEG packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c
index 6424975..06b70de 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c
@@ -3,7 +3,7 @@
* Verificion code for aDSP LPM packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_rm.c b/arch/arm/mach-msm/qdsp5/adsp_rm.c
index 95489f8..f67946c 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_rm.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_rm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c b/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c
index c89a37d..68ae380 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c
@@ -3,7 +3,7 @@
* Verification code for aDSP VFE packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c
index dba012e..dcd3d96 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c
@@ -3,7 +3,7 @@
* Verification code for aDSP VFE packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
index 62d6d58..27dd9bc 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c
@@ -3,7 +3,7 @@
* Verificion code for aDSP VDEC packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2010, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2010, 2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
index 1b16628..290a14c 100644
--- a/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
+++ b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c
@@ -3,7 +3,7 @@
* Verificion code for aDSP VENC packets from userspace.
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/audio_aac.c b/arch/arm/mach-msm/qdsp5/audio_aac.c
index 4509162..2a3f2cd 100644
--- a/arch/arm/mach-msm/qdsp5/audio_aac.c
+++ b/arch/arm/mach-msm/qdsp5/audio_aac.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2011-2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -34,7 +34,7 @@
#include <linux/slab.h>
#include <linux/msm_audio_aac.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -1369,10 +1369,7 @@ static int audaac_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1424,10 +1421,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1463,10 +1456,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
MM_DBG("append reserved byte %x\n",
audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
xfer = (count > ((frame->size - mfield_size) - 1)) ?
(frame->size - mfield_size) - 1 : count;
cpy_ptr++;
@@ -1733,7 +1722,7 @@ static int audio_open(struct inode *inode, struct file *file)
MM_DBG("allocating mem sz = %d\n", mem_sz);
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1759,7 +1748,7 @@ static int audio_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers,freeing instance 0x%08x\n",
(int)audio);
@@ -1775,7 +1764,7 @@ static int audio_open(struct inode *inode, struct file *file)
mem_sz = (PCM_BUFSZ_MIN * PCM_BUF_MAX_COUNT);
MM_DBG("allocating mem sz = %d\n", mem_sz);
handle = ion_alloc(client, mem_sz,
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate I/P buffers\n");
rc = -ENOMEM;
@@ -1802,7 +1791,8 @@ static int audio_open(struct inode *inode, struct file *file)
goto input_buff_get_flags_error;
}
- audio->map_v_read = ion_map_kernel(client, handle);
+ audio->map_v_read = ion_map_kernel(client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read buffers, freeing instance \
0x%08x\n", (int)audio);
diff --git a/arch/arm/mach-msm/qdsp5/audio_aac_in.c b/arch/arm/mach-msm/qdsp5/audio_aac_in.c
index 4c13a3c..4a35939 100644
--- a/arch/arm/mach-msm/qdsp5/audio_aac_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_aac_in.c
@@ -2,7 +2,7 @@
*
* aac audio input device
*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This code is based in part on arch/arm/mach-msm/qdsp5v2/audio_aac_in.c,
* Copyright (C) 2008 Google, Inc.
@@ -33,7 +33,7 @@
#include <linux/delay.h>
#include <linux/msm_audio_aac.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include "audmgr.h"
@@ -770,7 +770,6 @@ static long audaac_in_ioctl(struct file *file,
MM_DBG("\n");
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = atomic_read(&audio->in_bytes);
stats.sample_count = atomic_read(&audio->in_samples);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
@@ -1371,7 +1370,7 @@ static int audaac_in_open(struct inode *inode, struct file *file)
MM_DBG("allocating mem sz = %d\n", dma_size);
handle = ion_alloc(client, dma_size, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1399,7 +1398,7 @@ static int audaac_in_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_read = ion_map_kernel(client, handle);
+ audio->map_v_read = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
(int)audio);
@@ -1415,7 +1414,7 @@ static int audaac_in_open(struct inode *inode, struct file *file)
MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE);
handle = ion_alloc(client, BUFFER_SIZE,
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate I/P buffers\n");
rc = -ENOMEM;
@@ -1445,7 +1444,8 @@ static int audaac_in_open(struct inode *inode, struct file *file)
goto input_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_ac3.c b/arch/arm/mach-msm/qdsp5/audio_ac3.c
index 199b322..7caa275 100644
--- a/arch/arm/mach-msm/qdsp5/audio_ac3.c
+++ b/arch/arm/mach-msm/qdsp5/audio_ac3.c
@@ -37,7 +37,7 @@
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
#include <linux/msm_audio_ac3.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -1036,7 +1036,7 @@ static long audac3_ioctl(struct file *file, unsigned int cmd,
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1074,7 +1074,8 @@ static long audac3_ioctl(struct file *file, unsigned int cmd,
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("map of read buf failed\n");
ion_free(audio->client, handle);
@@ -1594,7 +1595,7 @@ static int audac3_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, DMASZ, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1620,7 +1621,7 @@ static int audac3_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers,freeing instance 0x%08x\n",
(int)audio);
diff --git a/arch/arm/mach-msm/qdsp5/audio_acdb.c b/arch/arm/mach-msm/qdsp5/audio_acdb.c
index 922bb74..608f544 100644
--- a/arch/arm/mach-msm/qdsp5/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_acdb.c
@@ -34,7 +34,7 @@
#include <mach/qdsp5/qdsp5audpreproccmdi.h>
#include <mach/qdsp5/qdsp5audpreprocmsg.h>
#include <mach/qdsp5/qdsp5audppmsg.h>
-#include <mach/qdsp5/audio_acdbi2.h>
+#include <mach/qdsp5/audio_acdbi.h>
#include <mach/qdsp5/acdb_commands.h>
#include <mach/qdsp5/audio_acdb_def.h>
#include <mach/debug_mm.h>
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb.c b/arch/arm/mach-msm/qdsp5/audio_amrnb.c
index 4faffd2..ad32dc8 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrnb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrnb.c
@@ -42,7 +42,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -986,7 +986,7 @@ static long audamrnb_ioctl(struct file *file, unsigned int cmd,
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1023,7 +1023,8 @@ static long audamrnb_ioctl(struct file *file, unsigned int cmd,
break;
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read buf\n");
ion_free(audio->client, handle);
@@ -1205,10 +1206,7 @@ static int audamrnb_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1267,10 +1265,6 @@ static ssize_t audamrnb_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1299,10 +1293,7 @@ static ssize_t audamrnb_write(struct file *file, const char __user *buf,
}
frame->mfield_sz = mfield_size;
}
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
+
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
@@ -1550,7 +1541,7 @@ static int audamrnb_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1576,7 +1567,7 @@ static int audamrnb_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
index fbee2f9..9da9666 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
@@ -37,7 +37,7 @@
#include <linux/delay.h>
#include <linux/msm_audio_amrnb.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include "audmgr.h"
@@ -897,7 +897,6 @@ static ssize_t audamrnb_in_read(struct file *file,
struct amrnb_encoded_meta_out meta_field;
struct audio_frame_nt *nt_frame;
MM_DBG("count = %d\n", count);
- memset(&meta_field, 0, sizeof(meta_field));
mutex_lock(&audio->read_lock);
while (count > 0) {
rc = wait_event_interruptible(
@@ -1338,7 +1337,7 @@ static int audamrnb_in_open(struct inode *inode, struct file *file)
if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE);
handle = ion_alloc(client, BUFFER_SIZE,
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate write buffers\n");
rc = -ENOMEM;
@@ -1368,7 +1367,8 @@ static int audamrnb_in_open(struct inode *inode, struct file *file)
goto input_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrwb.c b/arch/arm/mach-msm/qdsp5/audio_amrwb.c
index 2b69e9f..f3f63ee 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrwb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrwb.c
@@ -6,7 +6,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009, 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, 2011-2012 Code Aurora Forum. All rights reserved.
*
* All source code in this file is licensed under the following license except
* where indicated.
@@ -41,7 +41,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -982,7 +982,7 @@ static long audamrwb_ioctl(struct file *file, unsigned int cmd,
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1019,7 +1019,8 @@ static long audamrwb_ioctl(struct file *file, unsigned int cmd,
break;
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map mem for read buf\n");
ion_free(audio->client, handle);
@@ -1253,10 +1254,7 @@ static int audamrwb_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1314,10 +1312,6 @@ static ssize_t audamrwb_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1351,10 +1345,6 @@ static ssize_t audamrwb_write(struct file *file, const char __user *buf,
if (audio->reserved) {
MM_DBG("append reserved byte %x\n", audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
xfer = (count > ((frame->size - mfield_size) - 1)) ?
((frame->size - mfield_size) - 1) : count;
cpy_ptr++;
@@ -1615,7 +1605,7 @@ static int audamrwb_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
goto output_buff_alloc_error;
@@ -1640,7 +1630,7 @@ static int audamrwb_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc.c b/arch/arm/mach-msm/qdsp5/audio_evrc.c
index bda307e..ff7db0b 100644
--- a/arch/arm/mach-msm/qdsp5/audio_evrc.c
+++ b/arch/arm/mach-msm/qdsp5/audio_evrc.c
@@ -1,6 +1,6 @@
/* arch/arm/mach-msm/audio_evrc.c
*
- * Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2011-2012 Code Aurora Forum. All rights reserved.
*
* This code also borrows from audio_aac.c, which is
* Copyright (C) 2008 Google, Inc.
@@ -36,7 +36,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -971,7 +971,7 @@ static long audevrc_ioctl(struct file *file, unsigned int cmd,
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1008,7 +1008,8 @@ static long audevrc_ioctl(struct file *file, unsigned int cmd,
break;
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map mem"
" for read buf\n");
@@ -1198,10 +1199,7 @@ static int audevrc_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1258,10 +1256,6 @@ static ssize_t audevrc_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf,
mfield_size)) {
@@ -1292,10 +1286,7 @@ static ssize_t audevrc_write(struct file *file, const char __user *buf,
}
frame->mfield_sz = mfield_size;
}
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
+
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
@@ -1541,7 +1532,7 @@ static int audevrc_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1567,7 +1558,7 @@ static int audevrc_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
index 18ea0ea..3310743 100644
--- a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
@@ -2,7 +2,7 @@
*
* evrc audio input device
*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This code is based in part on arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c,
* Copyright (C) 2008 Google, Inc.
@@ -33,7 +33,7 @@
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
@@ -732,7 +732,6 @@ static long audevrc_in_ioctl(struct file *file,
MM_DBG("\n");
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = atomic_read(&audio->in_bytes);
stats.sample_count = atomic_read(&audio->in_samples);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
@@ -885,7 +884,6 @@ static ssize_t audevrc_in_read(struct file *file,
int rc = 0;
struct evrc_encoded_meta_out meta_field;
struct audio_frame_nt *nt_frame;
- memset(&meta_field, 0, sizeof(meta_field));
MM_DBG("count = %d\n", count);
mutex_lock(&audio->read_lock);
while (count > 0) {
@@ -1075,10 +1073,6 @@ int audrec_evrc_process_eos(struct audio_evrc_in *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1144,10 +1138,6 @@ static ssize_t audevrc_in_write(struct file *file,
rc = -EINVAL;
goto error;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto error;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1177,7 +1167,6 @@ static ssize_t audevrc_in_write(struct file *file,
}
frame->mfield_sz = mfield_size;
}
- count = count > frame->size ? frame->size : count;
MM_DBG("copying the stream count = %d\n", count);
if (copy_from_user(cpy_ptr, buf, count)) {
rc = -EFAULT;
@@ -1321,7 +1310,7 @@ static int audevrc_in_open(struct inode *inode, struct file *file)
MM_DBG("allocating mem sz = %d\n", dma_size);
handle = ion_alloc(client, dma_size, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1349,7 +1338,7 @@ static int audevrc_in_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_read = ion_map_kernel(client, handle);
+ audio->map_v_read = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
(int)audio);
@@ -1364,7 +1353,7 @@ static int audevrc_in_open(struct inode *inode, struct file *file)
if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE);
handle = ion_alloc(client, BUFFER_SIZE,
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate I/P buffers\n");
rc = -ENOMEM;
@@ -1394,7 +1383,8 @@ static int audevrc_in_open(struct inode *inode, struct file *file)
goto input_buff_alloc_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_fm.c b/arch/arm/mach-msm/qdsp5/audio_fm.c
index 957a407..2ab7cad 100644
--- a/arch/arm/mach-msm/qdsp5/audio_fm.c
+++ b/arch/arm/mach-msm/qdsp5/audio_fm.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/audio_in.c b/arch/arm/mach-msm/qdsp5/audio_in.c
index 3a60ec2..6fc5d6b 100644
--- a/arch/arm/mach-msm/qdsp5/audio_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_in.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009,2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -571,7 +571,6 @@ static long audio_in_ioctl(struct file *file,
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = atomic_read(&audio->in_bytes);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
diff --git a/arch/arm/mach-msm/qdsp5/audio_lpa.c b/arch/arm/mach-msm/qdsp5/audio_lpa.c
index 707a9eb..de114c4 100644
--- a/arch/arm/mach-msm/qdsp5/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp5/audio_lpa.c
@@ -1,7 +1,7 @@
/* audio_lpa.c - low power audio driver
*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* Based on the PCM decoder driver in arch/arm/mach-msm/qdsp5/audio_pcm.c
*
@@ -37,7 +37,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/earlysuspend.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
@@ -744,7 +744,7 @@ static int audlpa_ion_add(struct audio *audio,
pr_err("%s: could not get flags for the handle\n", __func__);
goto flag_error;
}
- kvaddr = (unsigned long)ion_map_kernel(audio->client, handle);
+ kvaddr = (unsigned long)ion_map_kernel(audio->client, handle, ionflag);
if (IS_ERR_OR_NULL((void *)kvaddr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
@@ -929,7 +929,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
@@ -1041,7 +1040,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config config;
- memset(&config, 0, sizeof(config));
config.buffer_count = audio->buffer_count;
config.buffer_size = audio->buffer_size;
config.sample_rate = audio->out_sample_rate;
diff --git a/arch/arm/mach-msm/qdsp5/audio_mp3.c b/arch/arm/mach-msm/qdsp5/audio_mp3.c
index 3950a2a..8fa7cf3 100644
--- a/arch/arm/mach-msm/qdsp5/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp5/audio_mp3.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -34,7 +34,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -1100,7 +1100,7 @@ static int audmp3_ion_add(struct audio *audio,
goto flag_error;
}
- temp_ptr = ion_map_kernel(audio->client, handle);
+ temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
if (IS_ERR_OR_NULL(temp_ptr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
@@ -1326,7 +1326,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
@@ -1531,7 +1530,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
rc = -ENOMEM;
@@ -1564,7 +1563,8 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("map of read buf failed\n");
@@ -1879,10 +1879,7 @@ static int audmp3_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1939,10 +1936,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1976,10 +1969,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
if (audio->reserved) {
MM_DBG("append reserved byte %x\n", audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
xfer = (count > ((frame->size - mfield_size) - 1)) ?
(frame->size - mfield_size) - 1 : count;
cpy_ptr++;
@@ -2270,7 +2259,7 @@ static int audio_open(struct inode *inode, struct file *file)
MM_DBG("memsz = %d\n", mem_sz);
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -2296,7 +2285,7 @@ static int audio_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_mvs.c b/arch/arm/mach-msm/qdsp5/audio_mvs.c
index 60a737d..360366a 100644
--- a/arch/arm/mach-msm/qdsp5/audio_mvs.c
+++ b/arch/arm/mach-msm/qdsp5/audio_mvs.c
@@ -928,7 +928,9 @@ static void audio_mvs_process_rpc_request(uint32_t procedure,
MM_DBG("UL AMR frame_type %d\n",
be32_to_cpu(*args));
- } else if (frame_mode == MVS_FRAME_MODE_PCM_UL) {
+ /* NOTE: merge QC case 01094468, modify 2 lines */
+ } else if ((frame_mode == MVS_FRAME_MODE_PCM_UL) ||
+ (frame_mode == MVS_FRAME_MODE_PCM_WB_UL)) {
/* PCM doesn't have frame_type */
buf_node->frame.frame_type = 0;
} else if (frame_mode == MVS_FRAME_MODE_VOC_TX) {
@@ -1055,7 +1057,9 @@ static void audio_mvs_process_rpc_request(uint32_t procedure,
cpu_to_be32(0x00000001);
dl_reply.cdc_param.gnr_arg.pkt_status =
cpu_to_be32(AUDIO_MVS_PKT_NORMAL);
- } else if (frame_mode == MVS_FRAME_MODE_PCM_DL) {
+ /* NOTE: merge QC case 01094468, modify 2 lines */
+ } else if ((frame_mode == MVS_FRAME_MODE_PCM_DL) ||
+ (frame_mode == MVS_FRAME_MODE_PCM_WB_DL)) {
dl_reply.cdc_param.gnr_arg.param1 = 0;
dl_reply.cdc_param.gnr_arg.param2 = 0;
dl_reply.cdc_param.\
@@ -1516,7 +1520,7 @@ static long audio_mvs_ioctl(struct file *file,
switch (cmd) {
case AUDIO_GET_MVS_CONFIG: {
struct msm_audio_mvs_config config;
- memset(&config, 0, sizeof(config));
+
MM_DBG("GET_MVS_CONFIG mvs_mode %d rate_type %d\n",
config.mvs_mode, config.rate_type);
@@ -1669,7 +1673,7 @@ static int audio_mvs_open(struct inode *inode, struct file *file)
mutex_lock(&audio_mvs_info.lock);
if (audio_mvs_info.task != NULL ||
- audio_mvs_info.rpc_endpt != NULL) {
+ audio_mvs_info.rpc_endpt != NULL) {
rc = audio_mvs_alloc_buf(&audio_mvs_info);
if (rc == 0) {
@@ -1681,7 +1685,16 @@ static int audio_mvs_open(struct inode *inode, struct file *file)
rc = -ENODEV;
}
+//Note: disable the state judgement between state with AUDIO_MVS_CLOSED
+// according to QC SR 01103475.
+#if 0
+ } else {
+ MM_ERR("MVS driver exists, state %d\n",
+ audio_mvs_info.state);
+ rc = -EBUSY;
+ }
+#endif
mutex_unlock(&audio_mvs_info.lock);
done:
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index 3daaa7f..07f9f4c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -693,7 +693,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config config;
- memset(&config, 0, sizeof(config));
config.buffer_size = BUFSZ;
config.buffer_count = 2;
config.sample_rate = audio->out_sample_rate;
diff --git a/arch/arm/mach-msm/qdsp5/audio_pcm.c b/arch/arm/mach-msm/qdsp5/audio_pcm.c
index 4ffc2be..fa6721b 100644
--- a/arch/arm/mach-msm/qdsp5/audio_pcm.c
+++ b/arch/arm/mach-msm/qdsp5/audio_pcm.c
@@ -1,7 +1,7 @@
/* audio_pcm.c - pcm audio decoder driver
*
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* Based on the mp3 decoder driver in arch/arm/mach-msm/qdsp5/audio_mp3.c
*
@@ -38,7 +38,7 @@
#include <linux/delay.h>
#include <linux/earlysuspend.h>
#include <linux/list.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
@@ -826,7 +826,7 @@ static int audpcm_ion_add(struct audio *audio,
pr_err("%s: could not get flags for the handle\n", __func__);
goto flag_error;
}
- kvaddr = (unsigned long)ion_map_kernel(audio->client, handle);
+ kvaddr = (unsigned long)ion_map_kernel(audio->client, handle, ionflag);
if (IS_ERR_OR_NULL((void *)kvaddr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
@@ -1579,7 +1579,7 @@ static int audio_open(struct inode *inode, struct file *file)
MM_DBG("memsz = %d\n", mem_sz);
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1605,7 +1605,7 @@ static int audio_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
index c5787fd..6e17420 100644
--- a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
@@ -26,7 +26,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/delay.h>
@@ -851,7 +851,7 @@ static int audpcm_in_open(struct inode *inode, struct file *file)
MM_DBG("allocating mem sz = %d\n", DMASZ);
handle = ion_alloc(client, DMASZ, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -879,7 +879,7 @@ static int audpcm_in_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->data = ion_map_kernel(client, handle);
+ audio->data = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->data)) {
MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
(int)audio);
diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp.c b/arch/arm/mach-msm/qdsp5/audio_qcelp.c
index bb4ffef..785d0bf 100644
--- a/arch/arm/mach-msm/qdsp5/audio_qcelp.c
+++ b/arch/arm/mach-msm/qdsp5/audio_qcelp.c
@@ -2,7 +2,7 @@
*
* qcelp 13k audio decoder device
*
- * Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2011-2012 Code Aurora Forum. All rights reserved.
*
* This code is based in part on audio_mp3.c, which is
* Copyright (C) 2008 Google, Inc.
@@ -37,7 +37,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -785,7 +785,6 @@ static long audqcelp_ioctl(struct file *file, unsigned int cmd,
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
@@ -973,7 +972,7 @@ static long audqcelp_ioctl(struct file *file, unsigned int cmd,
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1010,7 +1009,8 @@ static long audqcelp_ioctl(struct file *file, unsigned int cmd,
break;
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read buf\n");
ion_free(audio->client, handle);
@@ -1200,10 +1200,7 @@ static int audqcelp_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1261,10 +1258,6 @@ static ssize_t audqcelp_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
@@ -1293,10 +1286,7 @@ static ssize_t audqcelp_write(struct file *file, const char __user *buf,
}
frame->mfield_sz = mfield_size;
}
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
+
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
@@ -1540,7 +1530,7 @@ static int audqcelp_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1566,7 +1556,7 @@ static int audqcelp_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
index e38cd9b..1db2080 100644
--- a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
@@ -33,7 +33,7 @@
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
@@ -885,7 +885,6 @@ static ssize_t audqcelp_in_read(struct file *file,
int rc = 0;
struct qcelp_encoded_meta_out meta_field;
struct audio_frame_nt *nt_frame;
- memset(&meta_field, 0, sizeof(meta_field));
MM_DBG("count = %d\n", count);
mutex_lock(&audio->read_lock);
while (count > 0) {
@@ -1314,7 +1313,7 @@ static int audqcelp_in_open(struct inode *inode, struct file *file)
MM_DBG("allocating mem sz = %d\n", dma_size);
handle = ion_alloc(client, dma_size, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1342,7 +1341,7 @@ static int audqcelp_in_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_read = ion_map_kernel(client, handle);
+ audio->map_v_read = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
(int)audio);
@@ -1357,7 +1356,7 @@ static int audqcelp_in_open(struct inode *inode, struct file *file)
if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) {
MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE);
handle = ion_alloc(client, BUFFER_SIZE,
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate I/P buffers\n");
rc = -ENOMEM;
@@ -1387,7 +1386,8 @@ static int audqcelp_in_open(struct inode *inode, struct file *file)
goto input_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_voice_lb.c b/arch/arm/mach-msm/qdsp5/audio_voice_lb.c
index a73defd..08fa487 100644
--- a/arch/arm/mach-msm/qdsp5/audio_voice_lb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_voice_lb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/arch/arm/mach-msm/qdsp5/audio_voicememo.c b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
index f7295b7..88cda1e 100644
--- a/arch/arm/mach-msm/qdsp5/audio_voicememo.c
+++ b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
@@ -454,6 +454,9 @@ static void process_rpc_request(uint32_t proc, uint32_t xid,
be32_to_cpu(datacb_data->rec_status));
/* Data recorded */
+ /* modify contidion MAX_FRAME_SIZE to MAX_REC_BUF_SIZE
+ * to fix voice record failure issue
+ */
if ((rec_status == RPC_VOC_REC_STAT_DATA) ||
(rec_status == RPC_VOC_REC_STAT_DONE)) {
if (datacb_data->pkt.fw_data.fw_ptr_status &&
diff --git a/arch/arm/mach-msm/qdsp5/audio_wma.c b/arch/arm/mach-msm/qdsp5/audio_wma.c
index 42458c2..81416ac 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wma.c
@@ -1,6 +1,6 @@
/* audio_wma.c - wma audio decoder driver
*
- * Copyright (c) 2009, 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, 2011-2012, Code Aurora Forum. All rights reserved.
*
* Based on the mp3 native driver in arch/arm/mach-msm/qdsp5/audio_mp3.c
*
@@ -41,7 +41,7 @@
#include <linux/msm_audio.h>
#include <linux/msm_audio_wma.h>
#include <linux/memory_alloc.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
@@ -833,7 +833,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
@@ -1051,7 +1050,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1089,7 +1088,8 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("map of read buf failed\n");
ion_free(audio->client, handle);
@@ -1329,10 +1329,7 @@ static int audwma_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1386,10 +1383,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("audio_write: mf offset_val %x\n",
mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
@@ -1690,7 +1683,7 @@ static int audio_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1716,7 +1709,7 @@ static int audio_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audio_wmapro.c b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
index fa51f64..8090be5 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* All source code in this file is licensed under the following license except
* where indicated.
@@ -40,7 +40,7 @@
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
#include <linux/msm_audio_wmapro.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <mach/msm_adsp.h>
#include <mach/qdsp5/qdsp5audppcmdi.h>
@@ -821,7 +821,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
@@ -1048,7 +1047,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
- SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
@@ -1086,7 +1085,8 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
audio->map_v_read = ion_map_kernel(
- audio->client, handle);
+ audio->client,
+ handle, ionflag);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("map of read buf failed\n");
ion_free(audio->client, handle);
@@ -1324,10 +1324,7 @@ static int audwmapro_process_eos(struct audio *audio,
rc = -EBUSY;
goto done;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- goto done;
- }
+
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
@@ -1381,10 +1378,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
rc = -EINVAL;
break;
}
- if (mfield_size > audio->out[0].size) {
- rc = -EINVAL;
- break;
- }
MM_DBG("audio_write: mf offset_val %x\n",
mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
@@ -1419,10 +1412,6 @@ static ssize_t audio_write(struct file *file, const char __user *buf,
if (audio->reserved) {
MM_DBG("append reserved byte %x\n", audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
- if (mfield_size > frame->size) {
- rc = -EINVAL;
- break;
- }
xfer = (count > ((frame->size - mfield_size) - 1)) ?
(frame->size - mfield_size) - 1 : count;
cpy_ptr++;
@@ -1689,7 +1678,7 @@ static int audio_open(struct inode *inode, struct file *file)
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
- ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ ION_HEAP(ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
@@ -1715,7 +1704,7 @@ static int audio_open(struct inode *inode, struct file *file)
goto output_buff_get_flags_error;
}
- audio->map_v_write = ion_map_kernel(client, handle);
+ audio->map_v_write = ion_map_kernel(client, handle, ionflag);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5/audmgr.c b/arch/arm/mach-msm/qdsp5/audmgr.c
index cc40585..fb51240 100644
--- a/arch/arm/mach-msm/qdsp5/audmgr.c
+++ b/arch/arm/mach-msm/qdsp5/audmgr.c
@@ -3,7 +3,7 @@
* interface to "audmgr" service on the baseband cpu
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2009, 2012, 2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, 2012, 2013 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/audmgr.h b/arch/arm/mach-msm/qdsp5/audmgr.h
index 01a0890..15dd954 100644
--- a/arch/arm/mach-msm/qdsp5/audmgr.h
+++ b/arch/arm/mach-msm/qdsp5/audmgr.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/qdsp5/audmgr.h
*
* Copyright (C) 2008 Google, Inc.
- * Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2009, 2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/audmgr_new.h b/arch/arm/mach-msm/qdsp5/audmgr_new.h
index d775a93..20e27f1 100644
--- a/arch/arm/mach-msm/qdsp5/audmgr_new.h
+++ b/arch/arm/mach-msm/qdsp5/audmgr_new.h
@@ -1,6 +1,6 @@
/* arch/arm/mach-msm/qdsp5/audmgr.h
*
- * Copyright 2008,2012 (c) The Linux Foundation. All rights reserved.
+ * Copyright 2008,2012 (c) Code Aurora Forum. All rights reserved.
* Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
diff --git a/arch/arm/mach-msm/qdsp5/audpp.c b/arch/arm/mach-msm/qdsp5/audpp.c
index 8b57fb5..bcc00a4 100644
--- a/arch/arm/mach-msm/qdsp5/audpp.c
+++ b/arch/arm/mach-msm/qdsp5/audpp.c
@@ -35,7 +35,7 @@
#include <mach/qdsp5/qdsp5audppcmdi.h>
#include <mach/qdsp5/qdsp5audppmsg.h>
#include <mach/qdsp5/qdsp5audpp.h>
-#include <mach/qdsp5/audio_acdbi.h>
+#include <mach/qdsp5v2/audio_acdbi.h>
#include <mach/debug_mm.h>
#include "evlog.h"
diff --git a/arch/arm/mach-msm/qdsp5/audpreproc.c b/arch/arm/mach-msm/qdsp5/audpreproc.c
index dc5307d..92e54f8 100644
--- a/arch/arm/mach-msm/qdsp5/audpreproc.c
+++ b/arch/arm/mach-msm/qdsp5/audpreproc.c
@@ -1,7 +1,7 @@
/*
* Common code to deal with the AUDPREPROC dsp task (audio preprocessing)
*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* Based on the audpp layer in arch/arm/mach-msm/qdsp5/audpp.c
*
@@ -23,7 +23,7 @@
#include <mach/debug_mm.h>
#include <mach/qdsp5/qdsp5audpreproc.h>
#include <mach/qdsp5/qdsp5audreccmdi.h>
-#include <mach/qdsp5/audio_acdbi.h>
+#include <mach/qdsp5v2/audio_acdbi.h>
static DEFINE_MUTEX(audpreproc_lock);
diff --git a/arch/arm/mach-msm/qdsp5/audrec.c b/arch/arm/mach-msm/qdsp5/audrec.c
index 0f34518..e238e32 100644
--- a/arch/arm/mach-msm/qdsp5/audrec.c
+++ b/arch/arm/mach-msm/qdsp5/audrec.c
@@ -2,7 +2,7 @@
*
* common code to deal with the AUDREC dsp task (audio recording)
*
- * Copyright (c) 2009,2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009,2012 Code Aurora Forum. All rights reserved.
*
* Based on the audpp layer in arch/arm/mach-msm/qdsp5/audpp.c
*
diff --git a/arch/arm/mach-msm/qdsp5/dsp_debug.c b/arch/arm/mach-msm/qdsp5/dsp_debug.c
index 6e73a60..331ba00 100644
--- a/arch/arm/mach-msm/qdsp5/dsp_debug.c
+++ b/arch/arm/mach-msm/qdsp5/dsp_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/dsp_debug.h b/arch/arm/mach-msm/qdsp5/dsp_debug.h
index 15c14ca..bd40682 100644
--- a/arch/arm/mach-msm/qdsp5/dsp_debug.h
+++ b/arch/arm/mach-msm/qdsp5/dsp_debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/snd.c b/arch/arm/mach-msm/qdsp5/snd.c
index 3f379dc..f1db012 100644
--- a/arch/arm/mach-msm/qdsp5/snd.c
+++ b/arch/arm/mach-msm/qdsp5/snd.c
@@ -3,7 +3,7 @@
* interface to "snd" service on the baseband cpu
*
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/snd_adie.c b/arch/arm/mach-msm/qdsp5/snd_adie.c
index 5128062..ba7efc3 100644
--- a/arch/arm/mach-msm/qdsp5/snd_adie.c
+++ b/arch/arm/mach-msm/qdsp5/snd_adie.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009, 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -376,17 +376,8 @@ static ssize_t snd_adie_debug_write(struct file *file, const char __user *buf,
{
int rc = 0, op = 0;
int id = 0, adie_block = 0, config = 1;
- char l_buf[100];
-
- count = count > (sizeof(l_buf) - 1) ?
- (sizeof(l_buf) - 1) : count;
- l_buf[count] = '\0';
- if (copy_from_user(l_buf, buf, count)) {
- pr_info("Unable to copy data from user space\n");
- return -EFAULT;
- }
- if (sscanf(l_buf, "%d %d %d %d", &op, &id, &adie_block, &config) != 4)
- return -EINVAL;
+
+ sscanf(buf, "%d %d %d %d", &op, &id, &adie_block, &config);
MM_INFO("\nUser input: op %d id %d block %d config %d\n", op, id,
adie_block, config);
switch (op) {
diff --git a/arch/arm/mach-msm/qdsp5/snd_cad.c b/arch/arm/mach-msm/qdsp5/snd_cad.c
index 0b92cef..1b85b92 100644
--- a/arch/arm/mach-msm/qdsp5/snd_cad.c
+++ b/arch/arm/mach-msm/qdsp5/snd_cad.c
@@ -3,7 +3,7 @@
* interface to "snd" service on the baseband cpu
* This code also borrows from snd.c, which is
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009, 2012 Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
diff --git a/arch/arm/mach-msm/qdsp5/snd_pcm_client.c b/arch/arm/mach-msm/qdsp5/snd_pcm_client.c
index b109a11..b58d3a2 100644
--- a/arch/arm/mach-msm/qdsp5/snd_pcm_client.c
+++ b/arch/arm/mach-msm/qdsp5/snd_pcm_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -311,7 +311,6 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
- memset(&stats, 0, sizeof(stats));
stats.byte_count = atomic_read(&audio->out_bytes);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index 7272f97..5c1e7ce 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -15,7 +15,7 @@
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/mm.h>
#include <mach/qdsp6v2/audio_acdb.h>
@@ -662,7 +662,7 @@ static int register_memory(void)
}
kvptr = ion_map_kernel(acdb_data.ion_client,
- acdb_data.ion_handle);
+ acdb_data.ion_handle, 0);
if (IS_ERR_OR_NULL(kvptr)) {
pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
result = PTR_ERR(kvptr);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
index edf8f77..7fa5c5a 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
@@ -28,7 +28,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/earlysuspend.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/atomic.h>
@@ -486,7 +486,7 @@ static int audlpa_ion_add(struct audio *audio,
goto flag_error;
}
- temp_ptr = ion_map_kernel(audio->client, handle);
+ temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
if (IS_ERR_OR_NULL(temp_ptr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index b077bd4..2a22088 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -704,7 +704,7 @@ static int audio_aio_ion_add(struct q6audio_aio *audio,
goto flag_error;
}
- temp_ptr = ion_map_kernel(audio->client, handle);
+ temp_ptr = ion_map_kernel(audio->client, handle, ionflag);
if (IS_ERR_OR_NULL(temp_ptr)) {
pr_err("%s: could not get virtual address\n", __func__);
goto map_error;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index b2829c3..811baf0 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -24,7 +24,7 @@
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <asm/ioctls.h>
#include <asm/atomic.h>
#include "q6audio_common.h"
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 776b73f..6052918 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -31,9 +31,6 @@
static DEFINE_MUTEX(scm_lock);
-#define SCM_BUF_LEN(__cmd_size, __resp_size) \
- (sizeof(struct scm_command) + sizeof(struct scm_response) + \
- __cmd_size + __resp_size)
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
@@ -79,6 +76,42 @@ struct scm_response {
};
/**
+ * alloc_scm_command() - Allocate an SCM command
+ * @cmd_size: size of the command buffer
+ * @resp_size: size of the response buffer
+ *
+ * Allocate an SCM command, including enough room for the command
+ * and response headers as well as the command and response buffers.
+ *
+ * Returns a valid &scm_command on success or %NULL if the allocation fails.
+ */
+static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
+{
+ struct scm_command *cmd;
+ size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
+ resp_size;
+
+ cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+ if (cmd) {
+ cmd->len = len;
+ cmd->buf_offset = offsetof(struct scm_command, buf);
+ cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
+ }
+ return cmd;
+}
+
+/**
+ * free_scm_command() - Free an SCM command
+ * @cmd: command to free
+ *
+ * Free an SCM command.
+ */
+static inline void free_scm_command(struct scm_command *cmd)
+{
+ kfree(cmd);
+}
+
+/**
* scm_command_to_response() - Get a pointer to a scm_response
* @cmd: command
*
@@ -186,47 +219,39 @@ static void scm_inv_range(unsigned long start, unsigned long end)
}
/**
- * scm_call_common() - Send an SCM command
+ * scm_call() - Send an SCM command
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
* @cmd_len: length of the command buffer
* @resp_buf: response buffer
* @resp_len: length of the response buffer
- * @scm_buf: internal scm structure used for passing data
- * @scm_buf_len: length of the internal scm structure
- *
- * Core function to scm call. Initializes the given cmd structure with
- * appropriate values and makes the actual scm call. Validation of cmd
- * pointer and length must occur in the calling function.
*
- * Returns the appropriate error code from the scm call
+ * Sends a command to the SCM and waits for the command to finish processing.
*/
-
-static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- struct scm_command *scm_buf,
- size_t scm_buf_length)
+int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+ void *resp_buf, size_t resp_len)
{
int ret;
+ struct scm_command *cmd;
struct scm_response *rsp;
unsigned long start, end;
- scm_buf->len = scm_buf_length;
- scm_buf->buf_offset = offsetof(struct scm_command, buf);
- scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
- scm_buf->id = (svc_id << 10) | cmd_id;
+ cmd = alloc_scm_command(cmd_len, resp_len);
+ if (!cmd)
+ return -ENOMEM;
+ cmd->id = (svc_id << 10) | cmd_id;
if (cmd_buf)
- memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);
+ memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
mutex_lock(&scm_lock);
- ret = __scm_call(scm_buf);
+ ret = __scm_call(cmd);
mutex_unlock(&scm_lock);
if (ret)
- return ret;
+ goto out;
- rsp = scm_command_to_response(scm_buf);
+ rsp = scm_command_to_response(cmd);
start = (unsigned long)rsp;
do {
@@ -238,74 +263,8 @@ static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
if (resp_buf)
memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
-
- return ret;
-}
-
-/**
- * scm_call_noalloc - Send an SCM command
- *
- * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
- * scm internal structures. The buffer should be allocated with
- * DEFINE_SCM_BUFFER to account for the proper alignment and size.
- */
-int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- void *scm_buf, size_t scm_buf_len)
-{
- int ret;
- size_t len = SCM_BUF_LEN(cmd_len, resp_len);
-
- if (cmd_len > scm_buf_len || resp_len > scm_buf_len ||
- len > scm_buf_len)
- return -EINVAL;
-
- if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
- return -EINVAL;
-
- memset(scm_buf, 0, scm_buf_len);
-
- ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, scm_buf, len);
- return ret;
-
-}
-
-/**
- * scm_call() - Send an SCM command
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @cmd_buf: command buffer
- * @cmd_len: length of the command buffer
- * @resp_buf: response buffer
- * @resp_len: length of the response buffer
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking scm_call and invalidated in the cache
- * immediately after scm_call returns. Cache maintenance on the command and
- * response buffers is taken care of by scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
-int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
- void *resp_buf, size_t resp_len)
-{
- struct scm_command *cmd;
- int ret;
- size_t len = SCM_BUF_LEN(cmd_len, resp_len);
-
- if (cmd_len > len || resp_len > len)
- return -EINVAL;
-
- cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, cmd, len);
- kfree(cmd);
+out:
+ free_scm_command(cmd);
return ret;
}
EXPORT_SYMBOL(scm_call);
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 5d777a5..8074199 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -17,6 +17,12 @@
*/
.align 5
ENTRY(v6_early_abort)
+#ifdef CONFIG_CPU_V6
+ sub r1, sp, #4 @ Get unused stack location
+ strex r0, r1, [r1] @ Clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+ clrex
+#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad0..7033752 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -13,6 +13,12 @@
*/
.align 5
ENTRY(v7_early_abort)
+ /*
+ * The effect of data aborts on on the exclusive access monitor are
+ * UNPREDICTABLE. Do a CLREX to clear the state
+ */
+ clrex
+
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 577de64..8a4a612 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -34,105 +34,6 @@
#include "mm.h"
-/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- */
-static void __dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-static void __dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
-/**
- * arm_dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (!arch_is_coherent())
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-/**
- * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (!arch_is_coherent())
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-
-static void arm_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned int offset = handle & (PAGE_SIZE - 1);
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- if (!arch_is_coherent())
- __dma_page_dev_to_cpu(page, offset, size, dir);
-}
-
-static void arm_dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned int offset = handle & (PAGE_SIZE - 1);
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- if (!arch_is_coherent())
- __dma_page_cpu_to_dev(page, offset, size, dir);
-}
-
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
-struct dma_map_ops arm_dma_ops = {
- .alloc = arm_dma_alloc,
- .free = arm_dma_free,
- .mmap = arm_dma_mmap,
- .map_page = arm_dma_map_page,
- .unmap_page = arm_dma_unmap_page,
- .map_sg = arm_dma_map_sg,
- .unmap_sg = arm_dma_unmap_sg,
- .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
- .sync_single_for_device = arm_dma_sync_single_for_device,
- .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
- .sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = arm_dma_set_mask,
-};
-EXPORT_SYMBOL(arm_dma_ops);
-
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)arm_dma_limit;
@@ -280,14 +181,14 @@ static int __init consistent_init(void)
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
- pr_err("%s: no pud tables\n", __func__);
+ printk(KERN_ERR "%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
+ printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -295,7 +196,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
- pr_err("%s: no pte tables\n", __func__);
+ printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -410,7 +311,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
int bit;
if (!consistent_pte) {
- pr_err("%s: not initialised\n", __func__);
+ printk(KERN_ERR "%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
@@ -469,14 +370,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
- pr_err("%s: trying to free invalid coherent area: %p\n",
+ printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
- pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
@@ -498,8 +399,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
}
if (pte_none(pte) || !pte_present(pte))
- pr_crit("%s: bad page in kernel page table\n",
- __func__);
+ printk(KERN_CRIT "%s: bad page in kernel page table\n",
+ __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -623,14 +524,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
-static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
-{
- prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
- pgprot_writecombine(prot) :
- pgprot_dmacoherent(prot);
- return prot;
-}
-
#define nommu() 0
#else /* !CONFIG_MMU */
@@ -643,7 +536,6 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __free_from_pool(cpu_addr, size) 0
#define __free_from_contiguous(dev, page, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
-#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#endif /* CONFIG_MMU */
@@ -714,34 +606,39 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs)
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp, prot,
+ return __dma_alloc(dev, size, handle, gfp,
+ pgprot_dmacoherent(pgprot_kernel),
__builtin_return_address(0));
}
+EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Create userspace mapping for the DMA-coherent memory.
+ * Allocate a writecombining region, in much the same way as
+ * dma_alloc_coherent above.
*/
-int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+void *
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+{
+ return __dma_alloc(dev, size, handle, gfp,
+ pgprot_writecombine(pgprot_kernel),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(dma_alloc_writecombine);
+
+static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
unsigned long pfn = dma_to_pfn(dev, dma_addr);
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
ret = remap_pfn_range(vma, vma->vm_start,
pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
@@ -751,11 +648,27 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+ return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL(dma_mmap_coherent);
+
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL(dma_mmap_writecombine);
+
+
/*
* Free a buffer as defined by the above mapping.
*/
-void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
@@ -779,6 +692,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
__free_from_contiguous(dev, page, size);
}
}
+EXPORT_SYMBOL(dma_free_coherent);
/*
* Make an area consistent for devices.
@@ -873,13 +787,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -894,8 +802,9 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
+EXPORT_SYMBOL(___dma_page_cpu_to_dev);
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
@@ -913,9 +822,10 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
+EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
- * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
@@ -930,29 +840,32 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
-int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
+ BUG_ON(!valid_dma_direction(dir));
+
for_each_sg(sg, s, nents, i) {
- s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
- s->length, dir, attrs);
+ s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
+ debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
return 0;
}
+EXPORT_SYMBOL(dma_map_sg);
/**
- * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -961,55 +874,70 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
-
int i;
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+
for_each_sg(sg, s, nents, i)
- ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
}
+EXPORT_SYMBOL(dma_unmap_sg);
/**
- * arm_dma_sync_sg_for_cpu
+ * dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i)
- ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
- dir);
+ for_each_sg(sg, s, nents, i) {
+ if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+
+ debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
}
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
- * arm_dma_sync_sg_for_device
+ * dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i)
- ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
- dir);
+ for_each_sg(sg, s, nents, i) {
+ if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ __dma_page_cpu_to_dev(sg_page(s), s->offset,
+ s->length, dir);
+ }
+
+ debug_dma_sync_sg_for_device(dev, sg, nents, dir);
}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -1025,15 +953,18 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
+#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask;
+#endif
return 0;
}
+EXPORT_SYMBOL(dma_set_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b6fb650..d3a9f01 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -265,18 +265,6 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
}
#endif
-static char __read_mostly octeon_system_type[80];
-
-static int __init init_octeon_system_type(void)
-{
- snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
- cvmx_board_type_to_string(octeon_bootinfo->board_type),
- octeon_model_get_string(read_c0_prid()));
-
- return 0;
-}
-early_initcall(init_octeon_system_type);
-
/**
* Return a string representing the system type
*
@@ -284,7 +272,11 @@ early_initcall(init_octeon_system_type);
*/
const char *octeon_board_type_string(void)
{
- return octeon_system_type;
+ static char name[80];
+ sprintf(name, "%s (%s)",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type),
+ octeon_model_get_string(read_c0_prid()));
+ return name;
}
const char *get_system_type(void)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 52f60e5..811084f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -162,6 +162,11 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
return counters >> vpe_shift();
}
+static unsigned int counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
#else /* !CONFIG_MIPS_MT_SMP */
#define vpe_id() 0
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fdd6042..bda8eb2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -12,7 +12,6 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
-#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -599,7 +598,6 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
- preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -620,7 +618,6 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(addr, addr + size);
}
- preempt_enable();
bc_wback_inv(addr, size);
__sync();
@@ -631,7 +628,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
- preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -667,7 +663,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
blast_inv_dcache_range(addr, addr + size);
}
- preempt_enable();
bc_inv(addr, size);
__sync();
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 2745196..1088b5f 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -19,7 +19,6 @@
#include <linux/threads.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/serial_reg.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 1f3e9ea..eeb8054 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -6,7 +6,6 @@ config UNICORE32
select HAVE_DMA_ATTRS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
- select GENERIC_ATOMIC64
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select GENERIC_FIND_FIRST_BIT
diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h
index 93a56f3..b1ff8ca 100644
--- a/arch/unicore32/include/asm/bug.h
+++ b/arch/unicore32/include/asm/bug.h
@@ -19,4 +19,9 @@ extern void die(const char *msg, struct pt_regs *regs, int err);
extern void uc32_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap);
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+extern void __show_regs(struct pt_regs *);
+
#endif /* __UNICORE_BUG_H__ */
diff --git a/arch/unicore32/include/asm/cmpxchg.h b/arch/unicore32/include/asm/cmpxchg.h
index 8e797ad..df4d5ac 100644
--- a/arch/unicore32/include/asm/cmpxchg.h
+++ b/arch/unicore32/include/asm/cmpxchg.h
@@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
: "memory", "cc");
break;
default:
- __xchg_bad_pointer();
+ ret = __xchg_bad_pointer();
}
return ret;
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
index 30f749d..f239550 100644
--- a/arch/unicore32/kernel/setup.h
+++ b/arch/unicore32/kernel/setup.h
@@ -30,10 +30,4 @@ extern char __vectors_start[], __vectors_end[];
extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
-
-extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-extern void __show_regs(struct pt_regs *);
-
#endif
diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/asm/ioctls.h
index 96341aa..fd1d136 100644
--- a/arch/xtensa/include/asm/ioctls.h
+++ b/arch/xtensa/include/asm/ioctls.h
@@ -28,17 +28,17 @@
#define TCSETSW 0x5403
#define TCSETSF 0x5404
-#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
-#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
-#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
-#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
-#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
-#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
@@ -88,6 +88,7 @@
#define TIOCSETD _IOW('T', 35, int)
#define TIOCGETD _IOR('T', 36, int)
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
+#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
@@ -110,10 +111,8 @@
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
- /* _IOR('T', 90, struct serial_multiport_struct) */
-#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
- /* _IOW('T', 91, struct serial_multiport_struct) */
+#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
+#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 7eeaf22..b03c043 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -68,12 +68,7 @@
#define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC7FEFFFF
#define TLBTEMP_BASE_1 0xC7FF0000
-#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
-#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
-#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
-#else
-#define TLBTEMP_SIZE ICACHE_WAY_SIZE
-#endif
+#define TLBTEMP_BASE_2 0xC7FF8000
/*
* Xtensa Linux config PTE layout (when present):
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index e01cffc..6223f33 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1053,8 +1053,9 @@ ENTRY(fast_syscall_xtensa)
movi a7, 4 # sizeof(unsigned int)
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
- _bgeui a6, SYS_XTENSA_COUNT, .Lill
- _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
+ addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
+ _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
/* Fall through for ATOMIC_CMP_SWP. */
@@ -1066,26 +1067,27 @@ TRY s32i a5, a3, 0 # different, modify value
l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 1 # and return 1
+ addi a6, a6, 1 # restore a6 (really necessary?)
rfe
1: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 0 # return 0 (note that we cannot set
+ addi a6, a6, 1 # restore a6 (really necessary?)
rfe
.Lnswp: /* Atomic set, add, and exg_add. */
TRY l32i a7, a3, 0 # orig
- addi a6, a6, -SYS_XTENSA_ATOMIC_SET
add a0, a4, a7 # + arg
moveqz a0, a4, a6 # set
- addi a6, a6, SYS_XTENSA_ATOMIC_SET
TRY s32i a0, a3, 0 # write new value
mov a0, a2
mov a2, a7
l32i a7, a0, PT_AREG7 # restore a7
l32i a0, a0, PT_AREG0 # restore a0
+ addi a6, a6, 1 # restore a6 (really necessary?)
rfe
CATCH
@@ -1094,7 +1096,7 @@ CATCH
movi a2, -EFAULT
rfe
-.Lill: l32i a7, a2, PT_AREG7 # restore a7
+.Lill: l32i a7, a2, PT_AREG0 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, -EINVAL
rfe
@@ -1627,7 +1629,7 @@ ENTRY(fast_second_level_miss)
rsr a0, EXCVADDR
bltu a0, a3, 2f
- addi a1, a0, -TLBTEMP_SIZE
+ addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
bgeu a1, a3, 2f
/* Check if we have to restore an ITLB mapping. */
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index c055c91..2783fda 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -48,8 +48,9 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
/* We currently don't support coherent memory outside KSEG */
- BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
- ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ if (ret < XCHAL_KSEG_CACHED_VADDR
+ || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
+ BUG();
if (ret != 0) {
@@ -65,11 +66,10 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- unsigned long addr = (unsigned long)vaddr +
- XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
- BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
- addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
+ BUG();
free_pages(addr, get_order(size));
}
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 19759d3..e265f83 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
* Note: Original code is ata_bus_softreset().
*/
-static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -600,7 +600,9 @@ static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20);
out_be32(ioaddr->ctl_addr, ap->ctl);
- return scc_wait_after_reset(&ap->link, devmask, deadline);
+ scc_wait_after_reset(&ap->link, devmask, deadline);
+
+ return 0;
}
/**
@@ -617,8 +619,7 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
{
struct ata_port *ap = link->ap;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0;
- int rc;
+ unsigned int devmask = 0, err_mask;
u8 err;
DPRINTK("ENTER\n");
@@ -634,9 +635,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
/* issue bus reset */
DPRINTK("about to softreset, devmask=%x\n", devmask);
- rc = scc_bus_softreset(ap, devmask, deadline);
- if (rc) {
- ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
+ err_mask = scc_bus_softreset(ap, devmask, deadline);
+ if (err_mask) {
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
return -EIO;
}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1b85949..bb0025c 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,7 +10,6 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
- phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -45,7 +44,6 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
- dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -178,43 +176,3 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
-
-/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
- * @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
- *
- * Returns 1 if we correctly mapped the memory, or 0 if
- * dma_release_coherent() should proceed with mapping memory from
- * generic pools.
- */
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
-{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-
- if (mem && vaddr >= mem->virt_base && vaddr + size <=
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- unsigned long off = vma->vm_pgoff;
- int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
- int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- int count = size >> PAGE_SHIFT;
-
- *ret = -ENXIO;
- if (off < count && user_count <= count - off) {
- unsigned pfn = mem->pfn_base + start + off;
- *ret = remap_pfn_range(vma, vma->vm_start, pfn,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index 1149dec..ff326fd 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,7 +116,6 @@ static const struct file_operations genlock_fops = {
struct genlock *genlock_create_lock(struct genlock_handle *handle)
{
struct genlock *lock;
- void *ret;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
@@ -146,13 +145,8 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle)
* other processes
*/
- ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
- if (IS_ERR_OR_NULL(ret)) {
- GENLOCK_LOG_ERR("Unable to create lock inode\n");
- kfree(lock);
- return ret;
- }
- lock->file = ret;
+ lock->file = anon_inode_getfile("genlock", &genlock_fops,
+ lock, O_RDWR);
/* Attach the new lock to the handle */
handle->lock = lock;
@@ -666,19 +660,12 @@ static struct genlock_handle *_genlock_get_handle(void)
struct genlock_handle *genlock_get_handle(void)
{
- void *ret;
struct genlock_handle *handle = _genlock_get_handle();
if (IS_ERR(handle))
return handle;
- ret = anon_inode_getfile("genlock-handle",
+ handle->file = anon_inode_getfile("genlock-handle",
&genlock_handle_fops, handle, O_RDWR);
- if (IS_ERR_OR_NULL(ret)) {
- GENLOCK_LOG_ERR("Unable to create handle inode\n");
- kfree(handle);
- return ret;
- }
- handle->file = ret;
return handle;
}
@@ -712,50 +699,6 @@ struct genlock_handle *genlock_get_handle_fd(int fd)
}
EXPORT_SYMBOL(genlock_get_handle_fd);
-/*
- * Get a file descriptor reference to a lock suitable for sharing with
- * other processes
- */
-
-int genlock_get_fd_handle(struct genlock_handle *handle)
-{
- int ret;
- struct genlock *lock;
-
- if (IS_ERR_OR_NULL(handle))
- return -EINVAL;
-
- lock = handle->lock;
-
- if (IS_ERR(lock))
- return PTR_ERR(lock);
-
- if (!lock->file) {
- GENLOCK_LOG_ERR("No file attached to the lock\n");
- return -EINVAL;
- }
-
- ret = get_unused_fd_flags(0);
-
- if (ret < 0)
- return ret;
-
- fd_install(ret, lock->file);
-
- /*
- * Taking a reference for lock file.
- * This is required as now we have two file descriptor
- * pointing to same file. If one FD is closed, lock file
- * will be closed. Taking this reference will make sure
- * that file doesn't get close. This refrence will go
- * when client will call close on this FD.
- */
- fget(ret);
-
- return ret;
-}
-EXPORT_SYMBOL(genlock_get_fd_handle);
-
#ifdef CONFIG_GENLOCK_MISCDEVICE
static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
index abcd58b..9bc0da5 100644
--- a/drivers/base/sync.c
+++ b/drivers/base/sync.c
@@ -28,14 +28,8 @@
#include <linux/anon_inodes.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/sync.h>
-#include <asm/current.h>
-
static void sync_fence_signal_pt(struct sync_pt *pt);
static int _sync_pt_has_signaled(struct sync_pt *pt);
-static void sync_fence_free(struct kref *kref);
-static void sync_dump(struct sync_fence *fence);
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
@@ -56,7 +50,6 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
if (obj == NULL)
return NULL;
- kref_init(&obj->kref);
obj->ops = ops;
strlcpy(obj->name, name, sizeof(obj->name));
@@ -74,33 +67,34 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
}
EXPORT_SYMBOL(sync_timeline_create);
-static void sync_timeline_free(struct kref *kref)
+static void sync_timeline_free(struct sync_timeline *obj)
{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
unsigned long flags;
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
- obj->destroyed = true;
- smp_wmb();
+ unsigned long flags;
+ bool needs_freeing;
- /*
- * signal any children that their parent is going away.
- */
- sync_timeline_signal(obj);
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ obj->destroyed = true;
+ needs_freeing = list_empty(&obj->child_list_head);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
- kref_put(&obj->kref, sync_timeline_free);
+ if (needs_freeing)
+ sync_timeline_free(obj);
+ else
+ sync_timeline_signal(obj);
}
EXPORT_SYMBOL(sync_timeline_destroy);
@@ -119,6 +113,7 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
{
struct sync_timeline *obj = pt->parent;
unsigned long flags;
+ bool needs_freeing;
spin_lock_irqsave(&obj->active_list_lock, flags);
if (!list_empty(&pt->active_list))
@@ -126,10 +121,12 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
spin_unlock_irqrestore(&obj->active_list_lock, flags);
spin_lock_irqsave(&obj->child_list_lock, flags);
- if (!list_empty(&pt->child_list)) {
- list_del_init(&pt->child_list);
- }
+ list_del(&pt->child_list);
+ needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
+
+ if (needs_freeing)
+ sync_timeline_free(obj);
}
void sync_timeline_signal(struct sync_timeline *obj)
@@ -138,30 +135,24 @@ void sync_timeline_signal(struct sync_timeline *obj)
LIST_HEAD(signaled_pts);
struct list_head *pos, *n;
- trace_sync_timeline(obj);
-
spin_lock_irqsave(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &obj->active_list_head) {
struct sync_pt *pt =
container_of(pos, struct sync_pt, active_list);
- if (_sync_pt_has_signaled(pt)) {
- list_del_init(pos);
- list_add(&pt->signaled_list, &signaled_pts);
- kref_get(&pt->fence->kref);
- }
+ if (_sync_pt_has_signaled(pt))
+ list_move(pos, &signaled_pts);
}
spin_unlock_irqrestore(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &signaled_pts) {
struct sync_pt *pt =
- container_of(pos, struct sync_pt, signaled_list);
+ container_of(pos, struct sync_pt, active_list);
list_del_init(pos);
sync_fence_signal_pt(pt);
- kref_put(&pt->fence->kref, sync_fence_free);
}
}
EXPORT_SYMBOL(sync_timeline_signal);
@@ -178,7 +169,6 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
return NULL;
INIT_LIST_HEAD(&pt->active_list);
- kref_get(&parent->kref);
sync_timeline_add_pt(parent, pt);
return pt;
@@ -192,8 +182,6 @@ void sync_pt_free(struct sync_pt *pt)
sync_timeline_remove_pt(pt);
- kref_put(&pt->parent->kref, sync_timeline_free);
-
kfree(pt);
}
EXPORT_SYMBOL(sync_pt_free);
@@ -265,13 +253,12 @@ static struct sync_fence *sync_fence_alloc(const char *name)
if (fence->file == NULL)
goto err;
- kref_init(&fence->kref);
strlcpy(fence->name, name, sizeof(fence->name));
INIT_LIST_HEAD(&fence->pt_list_head);
INIT_LIST_HEAD(&fence->waiter_list_head);
spin_lock_init(&fence->waiter_list_lock);
- trace_sync_alloc(fence,current->pid);
+
init_waitqueue_head(&fence->wq);
spin_lock_irqsave(&sync_fence_list_lock, flags);
@@ -301,12 +288,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
list_add(&pt->pt_list, &fence->pt_list_head);
sync_pt_activate(pt);
- /*
- * signal the fence in case pt was activated before
- * sync_pt_activate(pt) was called
- */
- sync_fence_signal_pt(pt);
-
return fence;
}
EXPORT_SYMBOL(sync_fence_create);
@@ -325,68 +306,12 @@ static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
new_pt->fence = dst;
list_add(&new_pt->pt_list, &dst->pt_list_head);
+ sync_pt_activate(new_pt);
}
return 0;
}
-static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
-{
- struct list_head *src_pos, *dst_pos, *n;
-
- list_for_each(src_pos, &src->pt_list_head) {
- struct sync_pt *src_pt =
- container_of(src_pos, struct sync_pt, pt_list);
- bool collapsed = false;
-
- list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
- struct sync_pt *dst_pt =
- container_of(dst_pos, struct sync_pt, pt_list);
- /* collapse two sync_pts on the same timeline
- * to a single sync_pt that will signal at
- * the later of the two
- */
- if (dst_pt->parent == src_pt->parent) {
- if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
- struct sync_pt *new_pt =
- sync_pt_dup(src_pt);
- if (new_pt == NULL)
- return -ENOMEM;
-
- new_pt->fence = dst;
- list_replace(&dst_pt->pt_list,
- &new_pt->pt_list);
- sync_pt_free(dst_pt);
- }
- collapsed = true;
- break;
- }
- }
-
- if (!collapsed) {
- struct sync_pt *new_pt = sync_pt_dup(src_pt);
-
- if (new_pt == NULL)
- return -ENOMEM;
-
- new_pt->fence = dst;
- list_add(&new_pt->pt_list, &dst->pt_list_head);
- }
- }
-
- return 0;
-}
-
-static void sync_fence_detach_pts(struct sync_fence *fence)
-{
- struct list_head *pos, *n;
-
- list_for_each_safe(pos, n, &fence->pt_list_head) {
- struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
- sync_timeline_remove_pt(pt);
- }
-}
-
static void sync_fence_free_pts(struct sync_fence *fence)
{
struct list_head *pos, *n;
@@ -451,7 +376,6 @@ struct sync_fence *sync_fence_merge(const char *name,
struct sync_fence *a, struct sync_fence *b)
{
struct sync_fence *fence;
- struct list_head *pos;
int err;
fence = sync_fence_alloc(name);
@@ -462,23 +386,11 @@ struct sync_fence *sync_fence_merge(const char *name,
if (err < 0)
goto err;
- err = sync_fence_merge_pts(fence, b);
+ err = sync_fence_copy_pts(fence, b);
if (err < 0)
goto err;
- list_for_each(pos, &fence->pt_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, pt_list);
- sync_pt_activate(pt);
- }
-
- /*
- * signal the fence in case one of it's pts were activated before
- * they were activated
- */
- sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
- struct sync_pt,
- pt_list));
+ fence->status = sync_fence_get_status(fence);
return fence;
err:
@@ -577,91 +489,44 @@ int sync_fence_cancel_async(struct sync_fence *fence,
}
EXPORT_SYMBOL(sync_fence_cancel_async);
-static bool sync_fence_check(struct sync_fence *fence)
-{
- /*
- * Make sure that reads to fence->status are ordered with the
- * wait queue event triggering
- */
- smp_rmb();
- return fence->status != 0;
-}
-
int sync_fence_wait(struct sync_fence *fence, long timeout)
{
- int err = 0;
- struct sync_pt *pt;
-
- trace_sync_wait(fence, 1);
- list_for_each_entry(pt, &fence->pt_list_head, pt_list)
- trace_sync_pt(pt);
+ int err;
- if (timeout > 0) {
+ if (timeout) {
timeout = msecs_to_jiffies(timeout);
err = wait_event_interruptible_timeout(fence->wq,
- sync_fence_check(fence),
+ fence->status != 0,
timeout);
- } else if (timeout < 0) {
- err = wait_event_interruptible(fence->wq,
- sync_fence_check(fence));
+ } else {
+ err = wait_event_interruptible(fence->wq, fence->status != 0);
}
- trace_sync_wait(fence, 0);
if (err < 0)
return err;
- if (fence->status < 0) {
- pr_info("fence error %d on [%p]\n", fence->status, fence);
- sync_dump(fence);
+ if (fence->status < 0)
return fence->status;
- }
- if (fence->status == 0) {
- if (timeout > 0) {
- pr_info("fence timeout on [%p] after %dms\n", fence,
- jiffies_to_msecs(timeout));
- sync_dump(fence);
- }
+ if (fence->status == 0)
return -ETIME;
- }
return 0;
}
EXPORT_SYMBOL(sync_fence_wait);
-static void sync_fence_free(struct kref *kref)
-{
- struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
-
- trace_sync_free(fence, current->pid);
-
- sync_fence_free_pts(fence);
-
- kfree(fence);
-}
-
static int sync_fence_release(struct inode *inode, struct file *file)
{
struct sync_fence *fence = file->private_data;
unsigned long flags;
- /*
- * We need to remove all ways to access this fence before droping
- * our ref.
- *
- * start with its membership in the global fence list
- */
+ sync_fence_free_pts(fence);
+
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_del(&fence->sync_fence_list);
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- /*
- * remove its pts from their parents so that sync_timeline_signal()
- * can't reference the fence.
- */
- sync_fence_detach_pts(fence);
-
- kref_put(&fence->kref, sync_fence_free);
+ kfree(fence);
return 0;
}
@@ -672,12 +537,6 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
poll_wait(file, &fence->wq, wait);
- /*
- * Make sure that reads to fence->status are ordered with the
- * wait queue event triggering
- */
- smp_rmb();
-
if (fence->status == 1)
return POLLIN;
else if (fence->status < 0)
@@ -688,7 +547,7 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
{
- __s32 value;
+ __u32 value;
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
@@ -703,13 +562,8 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
struct sync_fence *fence2, *fence3;
struct sync_merge_data data;
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
fence2 = sync_fence_fdget(data.fd2);
if (fence2 == NULL) {
@@ -866,17 +720,7 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
}
- if (pt->parent->ops->timeline_value_str &&
- pt->parent->ops->pt_value_str) {
- char value[64];
- pt->parent->ops->pt_value_str(pt, value, sizeof(value));
- seq_printf(s, ": %s", value);
- if (fence) {
- pt->parent->ops->timeline_value_str(pt->parent, value,
- sizeof(value));
- seq_printf(s, " / %s", value);
- }
- } else if (pt->parent->ops->print_pt) {
+ if (pt->parent->ops->print_pt) {
seq_printf(s, ": ");
pt->parent->ops->print_pt(s, pt);
}
@@ -891,11 +735,7 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
- if (obj->ops->timeline_value_str) {
- char value[64];
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- } else if (obj->ops->print_obj) {
+ if (obj->ops->print_obj) {
seq_printf(s, ": ");
obj->ops->print_obj(s, obj);
}
@@ -916,8 +756,7 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
struct list_head *pos;
unsigned long flags;
- seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
- sync_status_str(fence->status));
+ seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
list_for_each(pos, &fence->pt_list_head) {
struct sync_pt *pt =
@@ -985,36 +824,7 @@ static __init int sync_debugfs_init(void)
debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
return 0;
}
-late_initcall(sync_debugfs_init);
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-static void sync_dump(struct sync_fence *fence)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- seq_printf(&s, "fence:\n--------------\n");
- sync_print_fence(&s, fence);
- seq_printf(&s, "\n");
+late_initcall(sync_debugfs_init);
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
-#else
-static void sync_dump(struct sync_fence *fence)
-{
-}
#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 40d33b4..4897837 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -642,7 +642,7 @@ config TILE_SROM
config MSM_ROTATOR
tristate "MSM Offline Image Rotator Driver"
- depends on (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
+ depends on (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960) && ANDROID_PMEM
default y
help
This driver provides support for the image rotator HW block in the
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index d2aca70..93fab78 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
+#include <linux/android_pmem.h>
#include <linux/msm_rotator.h>
#include <linux/io.h>
#include <mach/msm_rotator_imem.h>
@@ -27,10 +28,7 @@
#include <linux/file.h>
#include <linux/major.h>
#include <linux/regulator/consumer.h>
-#include <linux/msm_ion.h>
-#include <linux/sync.h>
-#include <linux/sw_sync.h>
-
+#include <linux/ion.h>
#ifdef CONFIG_MSM_BUS_SCALING
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
@@ -93,17 +91,10 @@
#define VERSION_KEY_MASK 0xFFFFFF00
#define MAX_DOWNSCALE_RATIO 3
-#define MAX_TIMELINE_NAME_LEN 16
-#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC
-#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
-
#define ROTATOR_REVISION_V0 0
#define ROTATOR_REVISION_V1 1
#define ROTATOR_REVISION_V2 2
#define ROTATOR_REVISION_NONE 0xffffffff
-#define BASE_ADDR(height, y_stride) ((height % 64) * y_stride)
-#define HW_BASE_ADDR(height, y_stride) (((dstp0_ystride >> 5) << 11) - \
- ((dst_height & 0x3f) * dstp0_ystride))
uint32_t rotator_hw_revision;
static char rot_iommu_split_domain;
@@ -137,29 +128,12 @@ struct msm_rotator_fd_info {
struct list_head list;
};
-struct rot_sync_info {
- u32 initialized;
- struct sync_fence *acq_fen;
- int cur_rel_fen_fd;
- struct sync_pt *cur_rel_sync_pt;
- struct sync_fence *cur_rel_fence;
- struct sync_fence *last_rel_fence;
- struct sw_sync_timeline *timeline;
- int timeline_value;
- struct mutex sync_mutex;
-};
-
-struct msm_rotator_session {
- struct msm_rotator_img_info img_info;
- struct msm_rotator_fd_info fd_info;
- int fast_yuv_enable;
-};
-
struct msm_rotator_dev {
void __iomem *io_base;
int irq;
+ struct msm_rotator_img_info *img_info[MAX_SESSIONS];
struct clk *core_clk;
- struct msm_rotator_session *rot_session[MAX_SESSIONS];
+ struct msm_rotator_fd_info *fd_info[MAX_SESSIONS];
struct list_head fd_list;
struct clk *pclk;
int rot_clk_state;
@@ -183,7 +157,6 @@ struct msm_rotator_dev {
#ifdef CONFIG_MSM_BUS_SCALING
uint32_t bus_client_handle;
#endif
- struct rot_sync_info sync_info[MAX_SESSIONS];
};
#define COMPONENT_5BITS 1
@@ -212,7 +185,8 @@ int msm_rotator_iommu_map_buf(int mem_id, int domain,
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl, mem_id);
+ pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
+ ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
if (rot_iommu_split_domain) {
if (secure) {
@@ -357,164 +331,6 @@ static irqreturn_t msm_rotator_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void msm_rotator_signal_timeline(u32 session_index)
-{
- struct rot_sync_info *sync_info;
- sync_info = &msm_rotator_dev->sync_info[session_index];
-
- if ((!sync_info->timeline) || (!sync_info->initialized))
- return;
-
- mutex_lock(&sync_info->sync_mutex);
- sw_sync_timeline_inc(sync_info->timeline, 1);
- sync_info->timeline_value++;
- sync_info->last_rel_fence = sync_info->cur_rel_fence;
- sync_info->cur_rel_fence = 0;
- mutex_unlock(&sync_info->sync_mutex);
-}
-
-static void msm_rotator_release_acq_fence(u32 session_index)
-{
- struct rot_sync_info *sync_info;
- sync_info = &msm_rotator_dev->sync_info[session_index];
-
- if ((!sync_info->timeline) || (!sync_info->initialized))
- return;
- mutex_lock(&sync_info->sync_mutex);
- sync_info->acq_fen = NULL;
- mutex_unlock(&sync_info->sync_mutex);
-}
-
-static void msm_rotator_release_all_timeline(void)
-{
- int i;
- struct rot_sync_info *sync_info;
- for (i = 0; i < MAX_SESSIONS; i++) {
- sync_info = &msm_rotator_dev->sync_info[i];
- if (sync_info->initialized) {
- msm_rotator_signal_timeline(i);
- msm_rotator_release_acq_fence(i);
- }
- }
-}
-
-static void msm_rotator_wait_for_fence_sub(u32 session_index)
-{
- struct rot_sync_info *sync_info;
- int ret;
- sync_info = &msm_rotator_dev->sync_info[session_index];
- if (sync_info->acq_fen) {
- ret = sync_fence_wait(sync_info->acq_fen,
- WAIT_FENCE_FIRST_TIMEOUT);
- if (ret == -ETIME) {
- pr_warn("%s: timeout, wait %ld more ms\n",
- __func__, WAIT_FENCE_FINAL_TIMEOUT);
- ret = sync_fence_wait(sync_info->acq_fen,
- WAIT_FENCE_FINAL_TIMEOUT);
- }
- if (ret < 0) {
- pr_err("%s: sync_fence_wait failed! ret = %x\n",
- __func__, ret);
- }
- sync_fence_put(sync_info->acq_fen);
- sync_info->acq_fen = NULL;
- }
-}
-
-static void msm_rotator_wait_for_fence(u32 session_index)
-{
- struct rot_sync_info *sync_info;
- sync_info = &msm_rotator_dev->sync_info[session_index];
- if ((!sync_info->timeline) || (!sync_info->initialized))
- return;
-
- mutex_lock(&sync_info->sync_mutex);
- msm_rotator_wait_for_fence_sub(session_index);
- mutex_unlock(&sync_info->sync_mutex);
-}
-
-static int msm_rotator_buf_sync(unsigned long arg)
-{
- struct msm_rotator_buf_sync buf_sync;
- int ret = 0;
- struct sync_fence *fence = NULL;
- struct rot_sync_info *sync_info;
- u32 s;
-
- if (copy_from_user(&buf_sync, (void __user *)arg, sizeof(buf_sync)))
- return -EFAULT;
-
- for (s = 0; s < MAX_SESSIONS; s++)
- if ((msm_rotator_dev->rot_session[s] != NULL) &&
- (buf_sync.session_id ==
- (unsigned int)msm_rotator_dev->rot_session[s]
- ))
- break;
-
- if (s == MAX_SESSIONS) {
- pr_err("%s invalid session id %d", __func__,
- buf_sync.session_id);
- return -EINVAL;
- }
-
- sync_info = &msm_rotator_dev->sync_info[s];
-
- if ((sync_info->timeline == NULL) ||
- (sync_info->initialized == false))
- return -EINVAL;
-
- mutex_lock(&sync_info->sync_mutex);
- if (buf_sync.acq_fen_fd >= 0)
- fence = sync_fence_fdget(buf_sync.acq_fen_fd);
-
- sync_info->acq_fen = fence;
-
- if (sync_info->acq_fen &&
- (buf_sync.flags & MDP_BUF_SYNC_FLAG_WAIT))
- msm_rotator_wait_for_fence_sub(s);
-
- sync_info->cur_rel_sync_pt = sw_sync_pt_create(sync_info->timeline,
- sync_info->timeline_value + 1);
- if (sync_info->cur_rel_sync_pt == NULL) {
- pr_err("%s: cannot create sync point", __func__);
- ret = -ENOMEM;
- goto buf_sync_err_1;
- }
- /* create fence */
- sync_info->cur_rel_fence = sync_fence_create("msm_rotator-fence",
- sync_info->cur_rel_sync_pt);
- if (sync_info->cur_rel_fence == NULL) {
- sync_pt_free(sync_info->cur_rel_sync_pt);
- sync_info->cur_rel_sync_pt = NULL;
- pr_err("%s: cannot create fence", __func__);
- ret = -ENOMEM;
- goto buf_sync_err_1;
- }
- /* create fd */
- sync_info->cur_rel_fen_fd = get_unused_fd_flags(0);
- if (sync_info->cur_rel_fen_fd < 0) {
- pr_err("%s: get_unused_fd_flags failed", __func__);
- ret = -EIO;
- goto buf_sync_err_2;
- }
- sync_fence_install(sync_info->cur_rel_fence, sync_info->cur_rel_fen_fd);
- buf_sync.rel_fen_fd = sync_info->cur_rel_fen_fd;
-
- ret = copy_to_user((void __user *)arg, &buf_sync, sizeof(buf_sync));
- mutex_unlock(&sync_info->sync_mutex);
- return ret;
-buf_sync_err_2:
- sync_fence_put(sync_info->cur_rel_fence);
- sync_info->cur_rel_fence = NULL;
- sync_info->cur_rel_fen_fd = 0;
-buf_sync_err_1:
- if (sync_info->acq_fen)
- sync_fence_put(sync_info->acq_fen);
- sync_info->acq_fen = NULL;
- mutex_unlock(&sync_info->sync_mutex);
- return ret;
-}
-
static unsigned int tile_size(unsigned int src_width,
unsigned int src_height,
const struct tile_parm *tp)
@@ -556,7 +372,6 @@ static int get_bpp(int format)
case MDP_YCRCB_H1V1:
return 3;
- case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
return 2;/* YCrYCb interleave */
@@ -600,7 +415,6 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h,
case MDP_RGB_888:
case MDP_RGB_565:
case MDP_BGR_565:
- case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
@@ -609,7 +423,6 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h,
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H1V2:
p->num_planes = 2;
p->plane_size[0] = w * h;
p->plane_size[1] = w * h;
@@ -649,110 +462,6 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h,
return 0;
}
-/* Checking invalid destination image size on FAST YUV for YUV420PP(NV12) with
- * HW issue for rotation 90 + U/D filp + with/without flip operation
- * (rotation 90 + U/D + L/R flip is rotation 270 degree option) and pix_rot
- * block issue with tile line size is 4.
- *
- * Rotator structure is:
- * if Fetch input image: W x H,
- * Downscale: W` x H` = W/ScaleHor(2, 4 or 8) x H/ScaleVert(2, 4 or 8)
- * Rotated output : W`` x H`` = (W` x H`) or (H` x W`) depends on "Rotation 90
- * degree option"
- *
- * Pack: W`` x H``
- *
- * Rotator source ROI image width restriction is applied to W x H (case a,
- * image resolution before downscaling)
- *
- * Packer source Image width/ height restriction are applied to W`` x H``
- * (case c, image resolution after rotation)
- *
- * Supertile (64 x 8) and YUV (2 x 2) alignment restriction should be
- * applied to the W x H (case a). Input image should be at least (2 x 2).
- *
- * "Support If packer source image height <= 256, multiple of 8", this
- * restriction should be applied to the rotated image (W`` x H``)
- */
-
-uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode,
- uint32_t src_width,
- uint32_t dst_width,
- uint32_t dst_height,
- uint32_t dstp0_ystride,
- uint32_t is_planar420)
-{
- uint32_t hw_limit;
-
- hw_limit = is_planar420 ? 512 : 256;
-
- /* checking image constaints for missing EOT event from pix_rot block */
- if ((src_width > hw_limit) && ((src_width % (hw_limit / 2)) == 8))
- return -EINVAL;
-
- if (rot_mode & MDP_ROT_90) {
-
- /* if rotation 90 degree on fast yuv
- * rotator image input width has to be multiple of 8
- * rotator image input height has to be multiple of 8
- */
- if (((dst_width % 8) != 0) || ((dst_height % 8) != 0))
- return -EINVAL;
-
- if ((rot_mode & MDP_FLIP_UD) ||
- (rot_mode & (MDP_FLIP_UD | MDP_FLIP_LR))) {
-
- /* image constraint checking for wrong address
- * generation HW issue for Y plane checking
- */
- if (((dst_height % 64) != 0) &&
- ((dst_height / 64) >= 4)) {
-
- /* compare golden logic for second
- * tile base address generation in row
- * with actual HW implementation
- */
- if (BASE_ADDR(dst_height, dstp0_ystride) !=
- HW_BASE_ADDR(dst_height, dstp0_ystride))
- return -EINVAL;
- }
-
- if (is_planar420) {
- dst_width = dst_width / 2;
- dstp0_ystride = dstp0_ystride / 2;
- }
-
- dst_height = dst_height / 2;
-
- /* image constraint checking for wrong
- * address generation HW issue. for
- * U/V (P) or UV (PP) plane checking
- */
- if (((dst_height % 64) != 0) && ((dst_height / 64) >=
- (hw_limit / 128))) {
-
- /* compare golden logic for
- * second tile base address
- * generation in row with
- * actual HW implementation
- */
- if (BASE_ADDR(dst_height, dstp0_ystride) !=
- HW_BASE_ADDR(dst_height, dstp0_ystride))
- return -EINVAL;
- }
- }
- } else {
- /* if NOT applying rotation 90 degree on fast yuv,
- * rotator image input width has to be multiple of 8
- * rotator image input height has to be multiple of 2
- */
- if (((dst_width % 8) != 0) || ((dst_height % 2) != 0))
- return -EINVAL;
- }
-
- return 0;
-}
-
static int msm_rotator_ycxcx_h2v1(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
@@ -833,38 +542,22 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
int new_session,
unsigned int in_chroma_paddr,
unsigned int out_chroma_paddr,
- unsigned int in_chroma2_paddr,
- unsigned int out_chroma2_paddr)
+ unsigned int in_chroma2_paddr)
{
uint32_t dst_format;
int is_tile = 0;
- struct msm_rotator_session *rot_ssn =
- container_of(info, struct msm_rotator_session, img_info);
- int fast_yuv_en = rot_ssn->fast_yuv_enable;
switch (info->src.format) {
case MDP_Y_CRCB_H2V2_TILE:
is_tile = 1;
- dst_format = MDP_Y_CRCB_H2V2;
- break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
- if (fast_yuv_en) {
- dst_format = info->src.format;
- break;
- }
case MDP_Y_CRCB_H2V2:
dst_format = MDP_Y_CRCB_H2V2;
break;
- case MDP_Y_CB_CR_H2V2:
- if (fast_yuv_en) {
- dst_format = info->src.format;
- break;
- }
- dst_format = MDP_Y_CBCR_H2V2;
- break;
case MDP_Y_CBCR_H2V2_TILE:
is_tile = 1;
+ case MDP_Y_CB_CR_H2V2:
case MDP_Y_CBCR_H2V2:
dst_format = MDP_Y_CBCR_H2V2;
break;
@@ -888,12 +581,8 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP0_ADDR);
iowrite32(out_chroma_paddr +
- (((info->dst_y * info->dst.width)/2) + info->dst_x),
+ ((info->dst_y * info->dst.width)/2 + info->dst_x),
MSM_ROTATOR_OUTP1_ADDR);
- if (out_chroma2_paddr)
- iowrite32(out_chroma2_paddr +
- (((info->dst_y * info->dst.width)/2) + info->dst_x),
- MSM_ROTATOR_OUTP2_ADDR);
if (new_session) {
if (in_chroma2_paddr) {
@@ -915,28 +604,11 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
info->src.width << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
}
- if (out_chroma2_paddr) {
- if (info->dst.format == MDP_Y_CR_CB_GH2V2) {
- iowrite32(ALIGN(info->dst.width, 16) |
- ALIGN((info->dst.width / 2), 16) << 16,
- MSM_ROTATOR_OUT_YSTRIDE1);
- iowrite32(ALIGN((info->dst.width / 2), 16),
- MSM_ROTATOR_OUT_YSTRIDE2);
- } else {
- iowrite32(info->dst.width |
- info->dst.width/2 << 16,
- MSM_ROTATOR_OUT_YSTRIDE1);
- iowrite32(info->dst.width/2,
- MSM_ROTATOR_OUT_YSTRIDE2);
- }
- } else {
- iowrite32(info->dst.width |
- info->dst.width << 16,
- MSM_ROTATOR_OUT_YSTRIDE1);
- }
+ iowrite32(info->dst.width |
+ info->dst.width << 16,
+ MSM_ROTATOR_OUT_YSTRIDE1);
- if (dst_format == MDP_Y_CBCR_H2V2 ||
- dst_format == MDP_Y_CB_CR_H2V2) {
+ if (dst_format == MDP_Y_CBCR_H2V2) {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
@@ -947,11 +619,9 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
}
-
iowrite32((3 << 18) | /* chroma sampling 3=4:2:0 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
- fast_yuv_en << 4 | /*fast YUV*/
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
@@ -973,7 +643,7 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
return 0;
}
-static int msm_rotator_ycxycx(struct msm_rotator_img_info *info,
+static int msm_rotator_ycrycb(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
@@ -983,22 +653,10 @@ static int msm_rotator_ycxycx(struct msm_rotator_img_info *info,
int bpp;
uint32_t dst_format;
- switch (info->src.format) {
- case MDP_YCBYCR_H2V1:
- if (info->rotations & MDP_ROT_90)
- dst_format = MDP_Y_CBCR_H1V2;
- else
- dst_format = MDP_Y_CBCR_H2V1;
- break;
- case MDP_YCRYCB_H2V1:
- if (info->rotations & MDP_ROT_90)
- dst_format = MDP_Y_CRCB_H1V2;
- else
- dst_format = MDP_Y_CRCB_H2V1;
- break;
- default:
+ if (info->src.format == MDP_YCRYCB_H2V1)
+ dst_format = MDP_Y_CRCB_H2V1;
+ else
return -EINVAL;
- }
if (info->dst.format != dst_format)
return -EINVAL;
@@ -1027,18 +685,10 @@ static int msm_rotator_ycxycx(struct msm_rotator_img_info *info,
(info->dst.width) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
- if (dst_format == MDP_Y_CBCR_H1V2 ||
- dst_format == MDP_Y_CBCR_H2V1) {
- iowrite32(GET_PACK_PATTERN(0, CLR_CB, 0, CLR_CR, 8),
- MSM_ROTATOR_SRC_UNPACK_PATTERN1);
- iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
- MSM_ROTATOR_OUT_PACK_PATTERN1);
- } else {
- iowrite32(GET_PACK_PATTERN(0, CLR_CR, 0, CLR_CB, 8),
- MSM_ROTATOR_SRC_UNPACK_PATTERN1);
- iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
- MSM_ROTATOR_OUT_PACK_PATTERN1);
- }
+ iowrite32(GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8),
+ MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+ iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+ MSM_ROTATOR_OUT_PACK_PATTERN1);
iowrite32((1 << 18) | /* chroma sampling 1=H2V1 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
@@ -1187,6 +837,10 @@ static int get_img(struct msmfb_data *fbd, int domain,
struct file *file = NULL;
int put_needed, fb_num;
#endif
+#ifdef CONFIG_ANDROID_PMEM
+ unsigned long vstart;
+#endif
+
*p_need = 0;
#ifdef CONFIG_FB
@@ -1217,14 +871,27 @@ static int get_img(struct msmfb_data *fbd, int domain,
}
#endif
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
len, p_ihdl, secure);
+#endif
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
+ return 0;
+ else
+ return -ENOMEM;
+#endif
}
static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
int domain, unsigned int secure)
{
+#ifdef CONFIG_ANDROID_PMEM
+ if (p_file != NULL)
+ put_pmem_file(p_file);
+#endif
+
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
if (!IS_ERR_OR_NULL(p_ihdl)) {
pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
@@ -1254,7 +921,7 @@ static int msm_rotator_do_rotate(unsigned long arg)
struct ion_handle *srcp1_ihdl = NULL, *dstp1_ihdl = NULL;
int ps0_need = 0, p_need;
unsigned int in_chroma_paddr = 0, out_chroma_paddr = 0;
- unsigned int in_chroma2_paddr = 0, out_chroma2_paddr = 0;
+ unsigned int in_chroma2_paddr = 0;
struct msm_rotator_img_info *img_info;
struct msm_rotator_mem_planes src_planes, dst_planes;
@@ -1263,9 +930,9 @@ static int msm_rotator_do_rotate(unsigned long arg)
mutex_lock(&msm_rotator_dev->rotator_lock);
for (s = 0; s < MAX_SESSIONS; s++)
- if ((msm_rotator_dev->rot_session[s] != NULL) &&
+ if ((msm_rotator_dev->img_info[s] != NULL) &&
(info.session_id ==
- (unsigned int)msm_rotator_dev->rot_session[s]
+ (unsigned int)msm_rotator_dev->img_info[s]
))
break;
@@ -1273,27 +940,25 @@ static int msm_rotator_do_rotate(unsigned long arg)
pr_err("%s() : Attempt to use invalid session_id %d\n",
__func__, s);
rc = -EINVAL;
- mutex_unlock(&msm_rotator_dev->rotator_lock);
- return rc;
+ goto do_rotate_unlock_mutex;
}
- img_info = &(msm_rotator_dev->rot_session[s]->img_info);
- if (img_info->enable == 0) {
+ if (msm_rotator_dev->img_info[s]->enable == 0) {
dev_dbg(msm_rotator_dev->device,
- "%s() : Session_id %d not enabled\n", __func__, s);
+ "%s() : Session_id %d not enabled \n",
+ __func__, s);
rc = -EINVAL;
- mutex_unlock(&msm_rotator_dev->rotator_lock);
- return rc;
+ goto do_rotate_unlock_mutex;
}
+ img_info = msm_rotator_dev->img_info[s];
if (msm_rotator_get_plane_sizes(img_info->src.format,
img_info->src.width,
img_info->src.height,
&src_planes)) {
pr_err("%s: invalid src format\n", __func__);
rc = -EINVAL;
- mutex_unlock(&msm_rotator_dev->rotator_lock);
- return rc;
+ goto do_rotate_unlock_mutex;
}
if (msm_rotator_get_plane_sizes(img_info->dst.format,
img_info->dst.width,
@@ -1301,8 +966,7 @@ static int msm_rotator_do_rotate(unsigned long arg)
&dst_planes)) {
pr_err("%s: invalid dst format\n", __func__);
rc = -EINVAL;
- mutex_unlock(&msm_rotator_dev->rotator_lock);
- return rc;
+ goto do_rotate_unlock_mutex;
}
rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr,
@@ -1323,7 +987,7 @@ static int msm_rotator_do_rotate(unsigned long arg)
goto do_rotate_unlock_mutex;
}
- format = img_info->src.format;
+ format = msm_rotator_dev->img_info[s]->src.format;
if (((info.version_key & VERSION_KEY_MASK) == 0xA5B4C300) &&
((info.version_key & ~VERSION_KEY_MASK) > 0) &&
(src_planes.num_planes == 2)) {
@@ -1412,10 +1076,6 @@ static int msm_rotator_do_rotate(unsigned long arg)
out_chroma_paddr = out_paddr + dst_planes.plane_size[0];
if (src_planes.num_planes >= 3)
in_chroma2_paddr = in_chroma_paddr + src_planes.plane_size[1];
- if (dst_planes.num_planes >= 3)
- out_chroma2_paddr = out_chroma_paddr + dst_planes.plane_size[1];
-
- msm_rotator_wait_for_fence(s);
cancel_delayed_work(&msm_rotator_dev->rot_clk_work);
if (msm_rotator_dev->rot_clk_state != CLK_EN) {
@@ -1437,17 +1097,17 @@ static int msm_rotator_do_rotate(unsigned long arg)
if (use_imem)
iowrite32(0x42, MSM_ROTATOR_MAX_BURST_SIZE);
- iowrite32(((img_info->src_rect.h & 0x1fff)
+ iowrite32(((msm_rotator_dev->img_info[s]->src_rect.h & 0x1fff)
<< 16) |
- (img_info->src_rect.w & 0x1fff),
+ (msm_rotator_dev->img_info[s]->src_rect.w & 0x1fff),
MSM_ROTATOR_SRC_SIZE);
- iowrite32(((img_info->src_rect.y & 0x1fff)
+ iowrite32(((msm_rotator_dev->img_info[s]->src_rect.y & 0x1fff)
<< 16) |
- (img_info->src_rect.x & 0x1fff),
+ (msm_rotator_dev->img_info[s]->src_rect.x & 0x1fff),
MSM_ROTATOR_SRC_XY);
- iowrite32(((img_info->src.height & 0x1fff)
+ iowrite32(((msm_rotator_dev->img_info[s]->src.height & 0x1fff)
<< 16) |
- (img_info->src.width & 0x1fff),
+ (msm_rotator_dev->img_info[s]->src.width & 0x1fff),
MSM_ROTATOR_SRC_IMAGE_SIZE);
#ifdef CONFIG_HUAWEI_KERNEL
@@ -1468,7 +1128,7 @@ static int msm_rotator_do_rotate(unsigned long arg)
case MDP_RGBX_8888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
- rc = msm_rotator_rgb_types(img_info,
+ rc = msm_rotator_rgb_types(msm_rotator_dev->img_info[s],
in_paddr, out_paddr,
use_imem,
msm_rotator_dev->last_session_idx
@@ -1481,27 +1141,25 @@ static int msm_rotator_do_rotate(unsigned long arg)
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2_TILE:
- rc = msm_rotator_ycxcx_h2v2(img_info,
+ rc = msm_rotator_ycxcx_h2v2(msm_rotator_dev->img_info[s],
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx
!= s,
in_chroma_paddr,
out_chroma_paddr,
- in_chroma2_paddr,
- out_chroma2_paddr);
+ in_chroma2_paddr);
break;
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H2V1:
- rc = msm_rotator_ycxcx_h2v1(img_info,
+ rc = msm_rotator_ycxcx_h2v1(msm_rotator_dev->img_info[s],
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx
!= s,
in_chroma_paddr,
out_chroma_paddr);
break;
- case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
- rc = msm_rotator_ycxycx(img_info,
+ rc = msm_rotator_ycrycb(msm_rotator_dev->img_info[s],
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx != s,
out_chroma_paddr);
@@ -1547,17 +1205,16 @@ do_rotate_exit:
schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
do_rotate_unlock_mutex:
put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
- msm_rotator_dev->rot_session[s]->img_info.secure);
+ msm_rotator_dev->img_info[s]->secure);
put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
- msm_rotator_dev->rot_session[s]->img_info.secure);
+ msm_rotator_dev->img_info[s]->secure);
/* only source may use frame buffer */
if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
fput_light(srcp0_file, ps0_need);
else
put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
- msm_rotator_signal_timeline(s);
mutex_unlock(&msm_rotator_dev->rotator_lock);
dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
__func__, rc);
@@ -1588,14 +1245,10 @@ static int msm_rotator_start(unsigned long arg,
struct msm_rotator_fd_info *fd_info)
{
struct msm_rotator_img_info info;
- struct msm_rotator_session *rot_session = NULL;
int rc = 0;
int s, is_rgb = 0;
- int first_free_idx = INVALID_SESSION;
+ int first_free_index = INVALID_SESSION;
unsigned int dst_w, dst_h;
- unsigned int is_planar420 = 0;
- int fast_yuv_en = 0;
- struct rot_sync_info *sync_info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -1627,30 +1280,6 @@ static int msm_rotator_start(unsigned long arg,
}
switch (info.src.format) {
- case MDP_Y_CB_CR_H2V2:
- case MDP_Y_CR_CB_H2V2:
- case MDP_Y_CR_CB_GH2V2:
- is_planar420 = 1;
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CRCB_H2V2_TILE:
- case MDP_Y_CBCR_H2V2_TILE:
- if (rotator_hw_revision >= ROTATOR_REVISION_V2 &&
- !(info.downscale_ratio &&
- (info.rotations & MDP_ROT_90)))
- fast_yuv_en = !fast_yuv_invalid_size_checker(
- info.rotations,
- info.src.width,
- dst_w,
- dst_h,
- dst_w,
- is_planar420);
- break;
- default:
- fast_yuv_en = 0;
- }
-
- switch (info.src.format) {
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_RGB_888:
@@ -1670,32 +1299,15 @@ static int msm_rotator_start(unsigned long arg,
case MDP_YCRCB_H1V1:
info.dst.format = info.src.format;
break;
- case MDP_YCBYCR_H2V1:
- if (info.rotations & MDP_ROT_90)
- info.dst.format = MDP_Y_CBCR_H1V2;
- else
- info.dst.format = MDP_Y_CBCR_H2V1;
- break;
case MDP_YCRYCB_H2V1:
- if (info.rotations & MDP_ROT_90)
- info.dst.format = MDP_Y_CRCB_H1V2;
- else
- info.dst.format = MDP_Y_CRCB_H2V1;
+ info.dst.format = MDP_Y_CRCB_H2V1;
break;
case MDP_Y_CB_CR_H2V2:
- if (fast_yuv_en) {
- info.dst.format = info.src.format;
- break;
- }
case MDP_Y_CBCR_H2V2_TILE:
info.dst.format = MDP_Y_CBCR_H2V2;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
- if (fast_yuv_en) {
- info.dst.format = info.src.format;
- break;
- }
case MDP_Y_CRCB_H2V2_TILE:
info.dst.format = MDP_Y_CRCB_H2V2;
break;
@@ -1708,14 +1320,12 @@ static int msm_rotator_start(unsigned long arg,
msm_rotator_set_perf_level((info.src.width*info.src.height), is_rgb);
for (s = 0; s < MAX_SESSIONS; s++) {
- if ((msm_rotator_dev->rot_session[s] != NULL) &&
+ if ((msm_rotator_dev->img_info[s] != NULL) &&
(info.session_id ==
- (unsigned int)msm_rotator_dev->rot_session[s]
+ (unsigned int)msm_rotator_dev->img_info[s]
)) {
- rot_session = msm_rotator_dev->rot_session[s];
- rot_session->img_info = info;
- rot_session->fd_info = *fd_info;
- rot_session->fast_yuv_enable = fast_yuv_en;
+ *(msm_rotator_dev->img_info[s]) = info;
+ msm_rotator_dev->fd_info[s] = fd_info;
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
@@ -1723,30 +1333,27 @@ static int msm_rotator_start(unsigned long arg,
break;
}
- if ((msm_rotator_dev->rot_session[s] == NULL) &&
- (first_free_idx == INVALID_SESSION))
- first_free_idx = s;
+ if ((msm_rotator_dev->img_info[s] == NULL) &&
+ (first_free_index ==
+ INVALID_SESSION))
+ first_free_index = s;
}
- if ((s == MAX_SESSIONS) && (first_free_idx != INVALID_SESSION)) {
+ if ((s == MAX_SESSIONS) && (first_free_index != INVALID_SESSION)) {
/* allocate a session id */
- msm_rotator_dev->rot_session[first_free_idx] =
- kzalloc(sizeof(struct msm_rotator_session),
+ msm_rotator_dev->img_info[first_free_index] =
+ kzalloc(sizeof(struct msm_rotator_img_info),
GFP_KERNEL);
- if (!msm_rotator_dev->rot_session[first_free_idx]) {
+ if (!msm_rotator_dev->img_info[first_free_index]) {
printk(KERN_ERR "%s : unable to alloc mem\n",
__func__);
rc = -ENOMEM;
goto rotator_start_exit;
}
info.session_id = (unsigned int)
- msm_rotator_dev->rot_session[first_free_idx];
- rot_session = msm_rotator_dev->rot_session[first_free_idx];
-
- rot_session->img_info = info;
- rot_session->fd_info = *fd_info;
- rot_session->fast_yuv_enable = fast_yuv_en;
- s = first_free_idx;
+ msm_rotator_dev->img_info[first_free_index];
+ *(msm_rotator_dev->img_info[first_free_index]) = info;
+ msm_rotator_dev->fd_info[first_free_index] = fd_info;
} else if (s == MAX_SESSIONS) {
dev_dbg(msm_rotator_dev->device, "%s: all sessions in use\n",
__func__);
@@ -1773,16 +1380,15 @@ static int msm_rotator_finish(unsigned long arg)
mutex_lock(&msm_rotator_dev->rotator_lock);
for (s = 0; s < MAX_SESSIONS; s++) {
- if ((msm_rotator_dev->rot_session[s] != NULL) &&
+ if ((msm_rotator_dev->img_info[s] != NULL) &&
(session_id ==
- (unsigned int)msm_rotator_dev->rot_session[s])) {
+ (unsigned int)msm_rotator_dev->img_info[s])) {
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
INVALID_SESSION;
- msm_rotator_signal_timeline(s);
- msm_rotator_release_acq_fence(s);
- kfree(msm_rotator_dev->rot_session[s]);
- msm_rotator_dev->rot_session[s] = NULL;
+ kfree(msm_rotator_dev->img_info[s]);
+ msm_rotator_dev->img_info[s] = NULL;
+ msm_rotator_dev->fd_info[s] = NULL;
break;
}
}
@@ -1808,7 +1414,7 @@ msm_rotator_open(struct inode *inode, struct file *filp)
mutex_lock(&msm_rotator_dev->rotator_lock);
for (i = 0; i < MAX_SESSIONS; i++) {
- if (msm_rotator_dev->rot_session[i] == NULL)
+ if (msm_rotator_dev->fd_info[i] == NULL)
break;
}
@@ -1852,20 +1458,20 @@ msm_rotator_close(struct inode *inode, struct file *filp)
fd_info = (struct msm_rotator_fd_info *)filp->private_data;
mutex_lock(&msm_rotator_dev->rotator_lock);
- msm_rotator_release_all_timeline();
if (--fd_info->ref_cnt > 0) {
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
for (s = 0; s < MAX_SESSIONS; s++) {
- if (msm_rotator_dev->rot_session[s] != NULL &&
- &(msm_rotator_dev->rot_session[s]->fd_info) == fd_info) {
+ if (msm_rotator_dev->img_info[s] != NULL &&
+ msm_rotator_dev->fd_info[s] == fd_info) {
pr_debug("%s: freeing rotator session %p (pid %d)\n",
- __func__, msm_rotator_dev->rot_session[s],
+ __func__, msm_rotator_dev->img_info[s],
fd_info->pid);
- kfree(msm_rotator_dev->rot_session[s]);
- msm_rotator_dev->rot_session[s] = NULL;
+ kfree(msm_rotator_dev->img_info[s]);
+ msm_rotator_dev->img_info[s] = NULL;
+ msm_rotator_dev->fd_info[s] = NULL;
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
INVALID_SESSION;
@@ -1898,9 +1504,6 @@ static long msm_rotator_ioctl(struct file *file, unsigned cmd,
return msm_rotator_do_rotate(arg);
case MSM_ROTATOR_IOCTL_FINISH:
return msm_rotator_finish(arg);
- case MSM_ROTATOR_IOCTL_BUFFER_SYNC:
- return msm_rotator_buf_sync(arg);
-
/*add a ioctl macro to get a flag that decide how degrees to rotate*/
#ifdef CONFIG_HUAWEI_KERNEL
case MSM_ROTATOR_IOCTL_MIRROR_FLIP:
@@ -1940,7 +1543,7 @@ static int __devinit msm_rotator_probe(struct platform_device *pdev)
return -ENOMEM;
}
for (i = 0; i < MAX_SESSIONS; i++)
- msm_rotator_dev->rot_session[i] = NULL;
+ msm_rotator_dev->img_info[i] = NULL;
msm_rotator_dev->last_session_idx = INVALID_SESSION;
pdata = pdev->dev.platform_data;
@@ -2156,11 +1759,7 @@ static int __devexit msm_rotator_remove(struct platform_device *plat_dev)
int i;
#ifdef CONFIG_MSM_BUS_SCALING
- if (msm_rotator_dev->bus_client_handle) {
- msm_bus_scale_unregister_client
- (msm_rotator_dev->bus_client_handle);
- msm_rotator_dev->bus_client_handle = 0;
- }
+ msm_bus_scale_unregister_client(msm_rotator_dev->bus_client_handle);
#endif
free_irq(msm_rotator_dev->irq, NULL);
mutex_destroy(&msm_rotator_dev->rotator_lock);
@@ -2185,8 +1784,8 @@ static int __devexit msm_rotator_remove(struct platform_device *plat_dev)
msm_rotator_dev->pclk = NULL;
mutex_destroy(&msm_rotator_dev->imem_lock);
for (i = 0; i < MAX_SESSIONS; i++)
- if (msm_rotator_dev->rot_session[i] != NULL)
- kfree(msm_rotator_dev->rot_session[i]);
+ if (msm_rotator_dev->img_info[i] != NULL)
+ kfree(msm_rotator_dev->img_info[i]);
kfree(msm_rotator_dev);
return 0;
}
@@ -2206,7 +1805,6 @@ static int msm_rotator_suspend(struct platform_device *dev, pm_message_t state)
disable_rot_clks();
msm_rotator_dev->rot_clk_state = CLK_SUSPEND;
}
- msm_rotator_release_all_timeline();
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_gov_msm.c b/drivers/cpufreq/cpufreq_gov_msm.c
index 8f086aa..9c49f80 100644
--- a/drivers/cpufreq/cpufreq_gov_msm.c
+++ b/drivers/cpufreq/cpufreq_gov_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,114 +18,20 @@
#include <linux/kobject.h>
#include <linux/cpufreq.h>
#include <linux/platform_device.h>
-#include <linux/cpu_pm.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
#include <mach/msm_dcvs.h>
-struct cpu_idle_info {
- int enabled;
- int dcvs_core_id;
- struct pm_qos_request pm_qos_req;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
-static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
-static uint32_t latency;
-
-static int msm_dcvs_idle_notifier(int core_num,
- enum msm_core_control_event event)
-{
- struct cpu_idle_info *info = &per_cpu(cpu_idle_info, core_num);
-
- switch (event) {
- case MSM_DCVS_ENABLE_IDLE_PULSE:
- info->enabled = true;
- break;
-
- case MSM_DCVS_DISABLE_IDLE_PULSE:
- info->enabled = false;
- break;
-
- case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
- pm_qos_update_request(&info->pm_qos_req, PM_QOS_DEFAULT_VALUE);
- break;
-
- case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
- pm_qos_update_request(&info->pm_qos_req, latency);
- break;
- }
-
- return 0;
-}
-
-static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
- void *v)
-{
- struct cpu_idle_info *info =
- &per_cpu(cpu_idle_info, smp_processor_id());
- u64 io_wait_us = 0;
- u64 prev_io_wait_us = 0;
- u64 last_update_time = 0;
- u64 val = 0;
- uint32_t iowaited = 0;
-
- if (!info->enabled)
- return NOTIFY_OK;
-
- switch (cmd) {
- case CPU_PM_ENTER:
- val = get_cpu_iowait_time_us(smp_processor_id(),
- &last_update_time);
- /* val could be -1 when NOHZ is not enabled */
- if (val == (u64)-1)
- val = 0;
- per_cpu(iowait_on_cpu, smp_processor_id()) = val;
- msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
- break;
-
- case CPU_PM_EXIT:
- prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
- val = get_cpu_iowait_time_us(smp_processor_id(),
- &last_update_time);
- if (val == (u64)-1)
- val = 0;
- io_wait_us = val;
- iowaited = (io_wait_us - prev_io_wait_us);
- msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_EXIT, iowaited);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block idle_nb = {
- .notifier_call = msm_cpuidle_notifier,
-};
-
-static void msm_gov_idle_source_init(int cpu, int dcvs_core_id)
-{
- struct cpu_idle_info *info = NULL;
-
- info = &per_cpu(cpu_idle_info, cpu);
- info->dcvs_core_id = dcvs_core_id;
-
- pm_qos_add_request(&info->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-}
-
struct msm_gov {
- int cpu;
- unsigned int cur_freq;
- unsigned int min_freq;
- unsigned int max_freq;
- struct cpufreq_policy *policy;
- int dcvs_core_id;
+ int cpu;
+ unsigned int cur_freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+ struct msm_dcvs_freq gov_notifier;
+ struct cpufreq_policy *policy;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct mutex, gov_mutex);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_gov, msm_gov_info);
+static char core_name[NR_CPUS][10];
static void msm_gov_check_limits(struct cpufreq_policy *policy)
{
@@ -134,7 +40,7 @@ static void msm_gov_check_limits(struct cpufreq_policy *policy)
if (policy->max < gov->cur_freq)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > gov->cur_freq)
+ else if (policy->min > gov->min_freq)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
else
@@ -144,14 +50,14 @@ static void msm_gov_check_limits(struct cpufreq_policy *policy)
gov->cur_freq = policy->cur;
gov->min_freq = policy->min;
gov->max_freq = policy->max;
- msm_dcvs_update_limits(gov->dcvs_core_id);
}
-static int msm_dcvs_freq_set(int core_num,
+static int msm_dcvs_freq_set(struct msm_dcvs_freq *self,
unsigned int freq)
{
int ret = -EINVAL;
- struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
+ struct msm_gov *gov =
+ container_of(self, struct msm_gov, gov_notifier);
mutex_lock(&per_cpu(gov_mutex, gov->cpu));
@@ -160,30 +66,23 @@ static int msm_dcvs_freq_set(int core_num,
if (freq > gov->max_freq)
freq = gov->max_freq;
- mutex_unlock(&per_cpu(gov_mutex, gov->cpu));
+ ret = __cpufreq_driver_target(gov->policy, freq, CPUFREQ_RELATION_L);
+ gov->cur_freq = gov->policy->cur;
- ret = cpufreq_driver_target(gov->policy, freq, CPUFREQ_RELATION_L);
+ mutex_unlock(&per_cpu(gov_mutex, gov->cpu));
- if (!ret) {
- gov->cur_freq = cpufreq_quick_get(gov->cpu);
- if (freq != gov->cur_freq)
- pr_err("cpu %d freq %u gov->cur_freq %u didn't match",
- gov->cpu, freq, gov->cur_freq);
- }
- ret = gov->cur_freq;
+ if (!ret)
+ return gov->cur_freq;
return ret;
}
-static unsigned int msm_dcvs_freq_get(int core_num)
+static unsigned int msm_dcvs_freq_get(struct msm_dcvs_freq *self)
{
- struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
- /*
- * the rw_sem in cpufreq is always held when this is called.
- * The policy->cur won't be updated in this case - so it is safe to
- * access policy->cur
- */
- return gov->policy->cur;
+ struct msm_gov *gov =
+ container_of(self, struct msm_gov, gov_notifier);
+
+ return gov->cur_freq;
}
static int cpufreq_governor_msm(struct cpufreq_policy *policy,
@@ -193,6 +92,8 @@ static int cpufreq_governor_msm(struct cpufreq_policy *policy,
int ret = 0;
int handle = 0;
struct msm_gov *gov = &per_cpu(msm_gov_info, policy->cpu);
+ struct msm_dcvs_freq *dcvs_notifier =
+ &(per_cpu(msm_gov_info, cpu).gov_notifier);
switch (event) {
case CPUFREQ_GOV_START:
@@ -202,14 +103,19 @@ static int cpufreq_governor_msm(struct cpufreq_policy *policy,
mutex_lock(&per_cpu(gov_mutex, cpu));
per_cpu(msm_gov_info, cpu).cpu = cpu;
gov->policy = policy;
- handle = msm_dcvs_freq_sink_start(gov->dcvs_core_id);
+ dcvs_notifier->core_name = core_name[cpu];
+ dcvs_notifier->set_frequency = msm_dcvs_freq_set;
+ dcvs_notifier->get_frequency = msm_dcvs_freq_get;
+ handle = msm_dcvs_freq_sink_register(dcvs_notifier);
BUG_ON(handle < 0);
msm_gov_check_limits(policy);
mutex_unlock(&per_cpu(gov_mutex, cpu));
break;
case CPUFREQ_GOV_STOP:
- msm_dcvs_freq_sink_stop(gov->dcvs_core_id);
+ mutex_lock(&per_cpu(gov_mutex, cpu));
+ msm_dcvs_freq_sink_unregister(dcvs_notifier);
+ mutex_unlock(&per_cpu(gov_mutex, cpu));
break;
case CPUFREQ_GOV_LIMITS:
@@ -230,41 +136,21 @@ struct cpufreq_governor cpufreq_gov_msm = {
static int __devinit msm_gov_probe(struct platform_device *pdev)
{
+ int ret = 0;
int cpu;
+ uint32_t group_id = 0x43505530; /* CPU0 */
struct msm_dcvs_core_info *core = NULL;
- struct msm_dcvs_core_info *core_info = NULL;
- struct msm_gov_platform_data *pdata = pdev->dev.platform_data;
- int sensor = 0;
core = pdev->dev.platform_data;
- core_info = pdata->info;
- latency = pdata->latency;
for_each_possible_cpu(cpu) {
- struct msm_gov *gov = &per_cpu(msm_gov_info, cpu);
-
mutex_init(&per_cpu(gov_mutex, cpu));
- if (cpu < core->num_cores)
- sensor = core_info->sensors[cpu];
- gov->dcvs_core_id = msm_dcvs_register_core(
- MSM_DCVS_CORE_TYPE_CPU,
- cpu,
- core_info,
- msm_dcvs_freq_set,
- msm_dcvs_freq_get,
- msm_dcvs_idle_notifier,
- NULL,
- sensor);
- if (gov->dcvs_core_id < 0) {
+ snprintf(core_name[cpu], 10, "cpu%d", cpu);
+ ret = msm_dcvs_register_core(core_name[cpu], group_id, core);
+ if (ret)
pr_err("Unable to register core for %d\n", cpu);
- return -EINVAL;
- }
-
- msm_gov_idle_source_init(cpu, gov->dcvs_core_id);
}
- cpu_pm_register_notifier(&idle_nb);
-
return cpufreq_register_governor(&cpufreq_gov_msm);
}
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 341de30..55cf651 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -479,6 +479,19 @@ static int count_sg(struct scatterlist *sg, int nbytes)
return i;
}
+static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
+ struct scatterlist *sg)
+{
+ int i = 0;
+ for (i = 0; i < entries; i++) {
+
+ sg->dma_address = (dma_addr_t)pmem->offset;
+ sg++;
+ pmem++;
+ }
+ return 0;
+}
+
static int _probe_ce_engine(struct qce_device *pce_dev)
{
unsigned int val;
@@ -1045,6 +1058,52 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev)
return 0;
};
+static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
+{
+ struct ablkcipher_request *areq;
+ uint32_t iv_out[4];
+ unsigned char iv[4 * sizeof(uint32_t)];
+ uint32_t status;
+
+ areq = (struct ablkcipher_request *) pce_dev->areq;
+
+ /* check ce error status */
+ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+ if (status & (1 << CRYPTO_SW_ERR)) {
+ pce_dev->err++;
+ dev_err(pce_dev->pdev,
+ "Qualcomm Crypto Error at 0x%x, status%x\n",
+ pce_dev->phy_iobase, status);
+ _init_ce_engine(pce_dev);
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+ return 0;
+ };
+
+ /* get iv out */
+ if (pce_dev->mode == QCE_MODE_ECB) {
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ } else {
+ iv_out[0] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ iv_out[1] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ iv_out[2] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR2_IV2_REG);
+ iv_out[3] = readl_relaxed(pce_dev->iobase +
+ CRYPTO_CNTR3_IV3_REG);
+
+ _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+ clk_disable(pce_dev->ce_clk);
+ pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+ pce_dev->chan_ce_out_status);
+ }
+
+ return 0;
+};
+
static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
unsigned int plen, unsigned int paddr, int *index)
{
@@ -1470,6 +1529,53 @@ static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
};
+static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_in_status = -1;
+ } else
+ pce_dev->chan_ce_in_status = 0;
+
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_use_pmem_complete(pce_dev);
+ }
+};
+
+static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result, struct msm_dmov_errdata *err)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = (struct qce_device *) cmd_ptr->user;
+ if (result != ADM_STATUS_OK) {
+ dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+ result);
+ pce_dev->chan_ce_out_status = -1;
+ } else {
+ pce_dev->chan_ce_out_status = 0;
+ };
+
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+ if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+ pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+ pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_use_pmem_complete(pce_dev);
+ }
+};
+
static int _setup_cmd_template(struct qce_device *pce_dev)
{
dmov_sg *pcmd;
@@ -2051,9 +2157,13 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
/* cipher input */
pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
- qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ if (c_req->use_pmem != 1)
+ dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
+ else
+ dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
+ areq->src);
if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
rc = -ENOMEM;
@@ -2063,8 +2173,12 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
/* cipher output */
if (areq->src != areq->dst) {
pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
- qce_dma_map_sg(pce_dev->pdev, areq->dst,
+ if (c_req->use_pmem != 1)
+ dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
+ else
+ dma_map_pmem_sg(&c_req->pmem->dst[0],
+ pce_dev->dst_nents, areq->dst);
};
if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
rc = -ENOMEM;
@@ -2101,25 +2215,34 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
/* setup for callback, and issue command to adm */
pce_dev->areq = areq;
pce_dev->qce_cb = c_req->qce_cb;
- pce_dev->chan_ce_in_cmd->complete_func =
+ if (c_req->use_pmem == 1) {
+ pce_dev->chan_ce_in_cmd->complete_func =
+ _ablk_cipher_ce_in_call_back_pmem;
+ pce_dev->chan_ce_out_cmd->complete_func =
+ _ablk_cipher_ce_out_call_back_pmem;
+ } else {
+ pce_dev->chan_ce_in_cmd->complete_func =
_ablk_cipher_ce_in_call_back;
- pce_dev->chan_ce_out_cmd->complete_func =
+ pce_dev->chan_ce_out_cmd->complete_func =
_ablk_cipher_ce_out_call_back;
+ }
rc = _qce_start_dma(pce_dev, true, true);
if (rc == 0)
return 0;
bad:
- if (pce_dev->dst_nents) {
- qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
- pce_dev->dst_nents, DMA_FROM_DEVICE);
- }
- if (pce_dev->src_nents) {
- qce_dma_unmap_sg(pce_dev->pdev, areq->src,
- pce_dev->src_nents,
- (areq->src == areq->dst) ?
- DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ if (c_req->use_pmem != 1) {
+ if (pce_dev->dst_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ }
+ if (pce_dev->src_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->src,
+ pce_dev->src_nents,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
}
return rc;
}
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 1bb3e59..2a191d5 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*/
#include <linux/mman.h>
-
+#include <linux/android_pmem.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -454,12 +454,14 @@ static int start_cipher_req(struct qcedev_control *podev)
/* start the command on the podev->active_command */
qcedev_areq = podev->active_command;
+
qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
- if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
- pr_err("%s: Use of PMEM is not supported\n", __func__);
- goto unsupported;
- }
- creq.pmem = NULL;
+ creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
+ if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
+ creq.pmem = &qcedev_areq->cipher_op_req.pmem;
+ else
+ creq.pmem = NULL;
+
switch (qcedev_areq->cipher_op_req.alg) {
case QCEDEV_ALG_DES:
creq.alg = CIPHER_ALG_DES;
@@ -1261,6 +1263,224 @@ static int qcedev_hash_final(struct qcedev_async_req *areq,
return qcedev_hmac_final(areq, handle);
}
+#ifdef CONFIG_ANDROID_PMEM
+static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ int i = 0;
+ int err = 0;
+ struct scatterlist *sg_src = NULL;
+ struct scatterlist *sg_dst = NULL;
+ struct scatterlist *sg_ndex = NULL;
+ struct file *file_src = NULL;
+ struct file *file_dst = NULL;
+ unsigned long paddr;
+ unsigned long kvaddr;
+ unsigned long len;
+
+ sg_src = kmalloc((sizeof(struct scatterlist) *
+ areq->cipher_op_req.entries), GFP_KERNEL);
+ if (sg_src == NULL) {
+ pr_err("%s: Can't Allocate memory:sg_src 0x%x\n",
+ __func__, (uint32_t)sg_src);
+ return -ENOMEM;
+
+ }
+ memset(sg_src, 0, (sizeof(struct scatterlist) *
+ areq->cipher_op_req.entries));
+ sg_ndex = sg_src;
+ areq->cipher_req.creq.src = sg_src;
+
+ /* address src */
+ get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
+ &kvaddr, &len, &file_src);
+
+ for (i = 0; i < areq->cipher_op_req.entries; i++) {
+ sg_set_buf(sg_ndex,
+ ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
+ areq->cipher_op_req.pmem.src[i].len);
+ sg_ndex++;
+ }
+ sg_mark_end(--sg_ndex);
+
+ for (i = 0; i < areq->cipher_op_req.entries; i++)
+ areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
+
+ /* address dst */
+ /* If not place encryption/decryption */
+ if (areq->cipher_op_req.in_place_op != 1) {
+ sg_dst = kmalloc((sizeof(struct scatterlist) *
+ areq->cipher_op_req.entries), GFP_KERNEL);
+ if (sg_dst == NULL) {
+ pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n",
+ __func__, (uint32_t)sg_dst);
+ return -ENOMEM;
+ }
+ memset(sg_dst, 0, (sizeof(struct scatterlist) *
+ areq->cipher_op_req.entries));
+ areq->cipher_req.creq.dst = sg_dst;
+ sg_ndex = sg_dst;
+
+ get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
+ &kvaddr, &len, &file_dst);
+ for (i = 0; i < areq->cipher_op_req.entries; i++)
+ sg_set_buf(sg_ndex++,
+ ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
+ + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
+ sg_mark_end(--sg_ndex);
+
+ for (i = 0; i < areq->cipher_op_req.entries; i++)
+ areq->cipher_op_req.pmem.dst[i].offset +=
+ (uint32_t)paddr;
+ } else {
+ areq->cipher_req.creq.dst = sg_src;
+ for (i = 0; i < areq->cipher_op_req.entries; i++) {
+ areq->cipher_op_req.pmem.dst[i].offset =
+ areq->cipher_op_req.pmem.src[i].offset;
+ areq->cipher_op_req.pmem.dst[i].len =
+ areq->cipher_op_req.pmem.src[i].len;
+ }
+ }
+
+ areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+ areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+
+ err = submit_req(areq, handle);
+
+ kfree(sg_src);
+ kfree(sg_dst);
+
+ if (file_dst)
+ put_pmem_file(file_dst);
+ if (file_src)
+ put_pmem_file(file_src);
+
+ return err;
+};
+
+
+static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ int num_entries = 0;
+ uint32_t total = 0;
+ struct qcedev_cipher_op_req *saved_req;
+ struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
+
+ saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+ if (saved_req == NULL) {
+ pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n",
+ __func__, (uint32_t)saved_req);
+ return -ENOMEM;
+ }
+ memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+ struct qcedev_cipher_op_req req;
+
+ /* save the original req structure */
+ memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ i = 0;
+ /* Address 32 KB at a time */
+ while ((i < req.entries) && (err == 0)) {
+ if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
+ creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
+ if (i > 0) {
+ creq->pmem.src[0].offset =
+ creq->pmem.src[i].offset;
+ }
+
+ creq->data_len = QCE_MAX_OPER_DATA;
+ creq->entries = 1;
+
+ err =
+ qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+ handle);
+
+ creq->pmem.src[i].len = req.pmem.src[i].len -
+ QCE_MAX_OPER_DATA;
+ creq->pmem.src[i].offset =
+ req.pmem.src[i].offset +
+ QCE_MAX_OPER_DATA;
+ req.pmem.src[i].offset =
+ creq->pmem.src[i].offset;
+ req.pmem.src[i].len = creq->pmem.src[i].len;
+ } else {
+ total = 0;
+ for (j = i; j < req.entries; j++) {
+ num_entries++;
+ if ((total + creq->pmem.src[j].len)
+ >= QCE_MAX_OPER_DATA) {
+ creq->pmem.src[j].len =
+ QCE_MAX_OPER_DATA - total;
+ total = QCE_MAX_OPER_DATA;
+ break;
+ }
+ total += creq->pmem.src[j].len;
+ }
+
+ creq->data_len = total;
+ if (i > 0)
+ for (k = 0; k < num_entries; k++) {
+ creq->pmem.src[k].len =
+ creq->pmem.src[i+k].len;
+ creq->pmem.src[k].offset =
+ creq->pmem.src[i+k].offset;
+ }
+ creq->entries = num_entries;
+
+ i = j;
+ err =
+ qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+ handle);
+ num_entries = 0;
+
+ creq->pmem.src[i].offset =
+ req.pmem.src[i].offset +
+ creq->pmem.src[i].len;
+ creq->pmem.src[i].len =
+ req.pmem.src[i].len -
+ creq->pmem.src[i].len;
+ req.pmem.src[i].offset =
+ creq->pmem.src[i].offset;
+ req.pmem.src[i].len =
+ creq->pmem.src[i].len;
+
+ if (creq->pmem.src[i].len == 0)
+ i++;
+ }
+
+ } /* end of while ((i < req.entries) && (err == 0)) */
+
+ } else
+ err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
+
+ /* Restore the original req structure */
+ for (i = 0; i < saved_req->entries; i++) {
+ creq->pmem.src[i].len = saved_req->pmem.src[i].len;
+ creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
+ }
+ creq->entries = saved_req->entries;
+ creq->data_len = saved_req->data_len;
+ kfree(saved_req);
+
+ return err;
+
+}
+#else
+static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ return -EPERM;
+}
+#endif/*CONFIG_ANDROID_PMEM*/
+
static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
int *di, struct qcedev_handle *handle,
uint8_t *k_align_src)
@@ -1522,10 +1742,6 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
struct qcedev_control *podev)
{
- if (req->use_pmem) {
- pr_err("%s: Use of PMEM is not supported\n", __func__);
- goto error;
- }
if ((req->entries == 0) || (req->data_len == 0))
goto error;
if ((req->alg >= QCEDEV_ALG_LAST) ||
@@ -1565,6 +1781,15 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
if (req->byteoffset) {
if (req->mode != QCEDEV_AES_MODE_CTR)
goto error;
+ else { /* if using CTR mode make sure not using Pmem */
+ if (req->use_pmem)
+ goto error;
+ }
+ }
+ /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
+ if (req->use_pmem) {
+ if (!req->in_place_op)
+ goto error;
}
/* Ensure zer ivlen for ECB mode */
if (req->ivlen != 0) {
@@ -1654,7 +1879,10 @@ static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
podev))
return -EINVAL;
- err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+ if (qcedev_areq.cipher_op_req.use_pmem)
+ err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
+ else
+ err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
if (err)
return err;
if (__copy_to_user((void __user *)arg,
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 51349f6..c9e8a94 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,4 +1,3 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
-obj-$(CONFIG_CMA) += ion_cma_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index e69e7ed..31bbb1f 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -33,7 +33,6 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
-#include <linux/msm_ion.h>
#include <mach/iommu_domains.h>
#include "ion_priv.h"
@@ -107,6 +106,24 @@ struct ion_handle {
static void ion_iommu_release(struct kref *kref);
+static int ion_validate_buffer_flags(struct ion_buffer *buffer,
+ unsigned long flags)
+{
+ if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
+ buffer->iommu_map_cnt) {
+ if (buffer->flags != flags) {
+ pr_err("%s: buffer was already mapped with flags %lx,"
+ " cannot map with flags %lx\n", __func__,
+ buffer->flags, flags);
+ return 1;
+ }
+
+ } else {
+ buffer->flags = flags;
+ }
+ return 0;
+}
+
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -214,7 +231,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->dev = dev;
buffer->size = len;
- buffer->flags = flags;
table = buffer->heap->ops->map_dma(buffer->heap, buffer);
if (IS_ERR_OR_NULL(table)) {
@@ -395,8 +411,7 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_mask,
- unsigned int flags)
+ size_t align, unsigned int flags)
{
struct rb_node *n;
struct ion_handle *handle;
@@ -427,11 +442,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
if (!((1 << heap->type) & client->heap_mask))
continue;
/* if the caller didn't specify this heap type */
- if (!((1 << heap->id) & heap_mask))
+ if (!((1 << heap->id) & flags))
continue;
/* Do not allow un-secure heap if secure is specified */
- if (secure_allocation &&
- (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
+ if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer))
@@ -756,7 +770,8 @@ out:
}
EXPORT_SYMBOL(ion_unmap_iommu);
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+ unsigned long flags)
{
struct ion_buffer *buffer;
void *vaddr;
@@ -778,6 +793,11 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
return ERR_PTR(-ENODEV);
}
+ if (ion_validate_buffer_flags(buffer, flags)) {
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EEXIST);
+ }
+
mutex_lock(&buffer->lock);
vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
@@ -799,6 +819,31 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_unmap_kernel);
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *vma;
+ int ret = 1;
+
+ if (end < start)
+ goto out;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (vma && vma->vm_start < end) {
+ if (start < vma->vm_start)
+ goto out_up;
+ if (end > vma->vm_end)
+ goto out_up;
+ ret = 0;
+ }
+
+out_up:
+ up_read(&mm->mmap_sem);
+out:
+ return ret;
+}
+
int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *uaddr, unsigned long offset, unsigned long len,
unsigned int cmd)
@@ -864,7 +909,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
type == ION_HEAP_TYPE_CARVEOUT ||
- type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
+ type == ION_HEAP_TYPE_CP)
seq_printf(s, " : %12lx", handle->buffer->priv_phys);
else
seq_printf(s, " : %12s", "N/A");
@@ -1211,12 +1256,9 @@ static int ion_share_set_flags(struct ion_client *client,
{
struct ion_buffer *buffer;
bool valid_handle;
- unsigned long ion_flags = 0;
+ unsigned long ion_flags = ION_SET_CACHE(CACHED);
if (flags & O_DSYNC)
- ion_flags = ION_SET_UNCACHED(ion_flags);
- else
- ion_flags = ION_SET_CACHED(ion_flags);
-
+ ion_flags = ION_SET_CACHE(UNCACHED);
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
@@ -1228,6 +1270,12 @@ static int ion_share_set_flags(struct ion_client *client,
buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ if (ion_validate_buffer_flags(buffer, ion_flags)) {
+ mutex_unlock(&buffer->lock);
+ return -EEXIST;
+ }
+ mutex_unlock(&buffer->lock);
return 0;
}
@@ -1311,25 +1359,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
data.handle = ion_alloc(client, data.len, data.align,
- data.heap_mask, data.flags);
-
- if (IS_ERR(data.handle))
- return PTR_ERR(data.handle);
-
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- ion_free(client, data.handle);
- return -EFAULT;
- }
- break;
- }
- case ION_IOC_ALLOC_COMPAT:
- {
- struct ion_allocation_data_old data;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- data.handle = ion_alloc(client, data.len, data.align,
- data.flags, data.flags);
+ data.flags);
if (IS_ERR(data.handle))
return PTR_ERR(data.handle);
@@ -1383,10 +1413,8 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
sizeof(struct ion_fd_data)))
return -EFAULT;
data.handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(data.handle)) {
- ret = PTR_ERR(data.handle);
+ if (IS_ERR(data.handle))
data.handle = NULL;
- }
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
@@ -1407,21 +1435,65 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return dev->custom_ioctl(client, data.cmd, data.arg);
}
case ION_IOC_CLEAN_CACHES:
- case ION_IOC_CLEAN_CACHES_COMPAT:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_CACHES, arg);
case ION_IOC_INV_CACHES:
- case ION_IOC_INV_CACHES_COMPAT:
- return client->dev->custom_ioctl(client,
- ION_IOC_INV_CACHES, arg);
case ION_IOC_CLEAN_INV_CACHES:
- case ION_IOC_CLEAN_INV_CACHES_COMPAT:
- return client->dev->custom_ioctl(client,
- ION_IOC_CLEAN_INV_CACHES, arg);
+ {
+ struct ion_flush_data data;
+ unsigned long start, end;
+ struct ion_handle *handle = NULL;
+ int ret;
+
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_flush_data)))
+ return -EFAULT;
+
+ start = (unsigned long) data.vaddr;
+ end = (unsigned long) data.vaddr + data.length;
+
+ if (check_vaddr_bounds(start, end)) {
+ pr_err("%s: virtual address %p is out of bounds\n",
+ __func__, data.vaddr);
+ return -EINVAL;
+ }
+
+ if (!data.handle) {
+ handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(handle)) {
+ pr_info("%s: Could not import handle: %d\n",
+ __func__, (int)handle);
+ return -EINVAL;
+ }
+ }
+
+ ret = ion_do_cache_op(client,
+ data.handle ? data.handle : handle,
+ data.vaddr, data.offset, data.length,
+ cmd);
+
+ if (!data.handle)
+ ion_free(client, handle);
+
+ if (ret < 0)
+ return ret;
+ break;
+
+ }
case ION_IOC_GET_FLAGS:
- case ION_IOC_GET_FLAGS_COMPAT:
- return client->dev->custom_ioctl(client,
- ION_IOC_GET_FLAGS, arg);
+ {
+ struct ion_flag_data data;
+ int ret;
+ if (copy_from_user(&data, (void __user *)arg,
+ sizeof(struct ion_flag_data)))
+ return -EFAULT;
+
+ ret = ion_handle_get_flags(client, data.handle, &data.flags);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(struct ion_flag_data)))
+ return -EFAULT;
+ break;
+ }
default:
return -ENOTTY;
}
@@ -1709,7 +1781,7 @@ int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+ if (heap->type != ION_HEAP_TYPE_CP)
continue;
if (ION_HEAP(heap->id) != heap_id)
continue;
@@ -1737,7 +1809,7 @@ int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+ if (heap->type != ION_HEAP_TYPE_CP)
continue;
if (ION_HEAP(heap->id) != heap_id)
continue;
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 256de07..a591eb4 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_carveout_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
#include "ion_priv.h"
@@ -30,7 +31,6 @@
#include <mach/iommu_domains.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
-#include <linux/msm_ion.h>
struct ion_carveout_heap {
struct ion_heap heap;
@@ -240,78 +240,25 @@ int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
- unsigned int size_to_vmap, total_size;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
- total_size = buffer->size;
-
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
}
if (carveout_heap->has_outer_cache) {
@@ -410,7 +357,7 @@ int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
goto out1;
}
- sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
+ sglist = vmalloc(sizeof(*sglist));
if (!sglist)
goto out1;
@@ -434,13 +381,13 @@ int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
if (ret)
goto out2;
}
- kfree(sglist);
+ vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- kfree(sglist);
+ vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
deleted file mode 100644
index 722f778..0000000
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * drivers/gpu/ion/ion_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/ion.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_ion.h>
-#include <mach/iommu_domains.h>
-
-#include <asm/cacheflush.h>
-
-/* for ion_heap_ops structure */
-#include "ion_priv.h"
-
-#define ION_CMA_ALLOCATE_FAILED -1
-
-struct ion_cma_buffer_info {
- void *cpu_addr;
- dma_addr_t handle;
- struct sg_table *table;
- bool is_cached;
-};
-
-static int cma_heap_has_outer_cache;
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replace by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- struct page *page = virt_to_page(cpu_addr);
- int ret;
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
-}
-
-/* ION CMA heap operations functions */
-static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
- unsigned long flags)
-{
- struct device *dev = heap->priv;
- struct ion_cma_buffer_info *info;
-
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
- info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
- if (!info) {
- dev_err(dev, "Can't allocate buffer info\n");
- return ION_CMA_ALLOCATE_FAILED;
- }
-
- if (!ION_IS_CACHED(flags))
- info->cpu_addr = dma_alloc_writecombine(dev, len,
- &(info->handle), 0);
- else
- info->cpu_addr = dma_alloc_nonconsistent(dev, len,
- &(info->handle), 0);
-
- if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
- goto err;
- }
-
- info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!info->table) {
- dev_err(dev, "Fail to allocate sg table\n");
- goto err;
- }
-
- info->is_cached = ION_IS_CACHED(flags);
-
- ion_cma_get_sgtable(dev,
- info->table, info->cpu_addr, info->handle, len);
-
- /* keep this for memory release */
- buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
- return 0;
-
-err:
- kfree(info);
- return ION_CMA_ALLOCATE_FAILED;
-}
-
-static void ion_cma_free(struct ion_buffer *buffer)
-{
- struct device *dev = buffer->heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Release buffer %p\n", buffer);
- /* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
- sg_free_table(info->table);
- /* release sg table */
- kfree(info->table);
- kfree(info);
-}
-
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct device *dev = heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
- info->handle);
-
- *addr = info->handle;
- *len = buffer->size;
-
- return 0;
-}
-
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct device *dev = buffer->heap->priv;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- if (info->is_cached)
- return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
- else
- return dma_mmap_writecombine(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
-}
-
-static void *ion_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return;
-}
-
-int ion_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
- prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
-int ion_cma_cache_ops(struct ion_heap *heap,
- struct ion_buffer *buffer, void *vaddr,
- unsigned int offset, unsigned int length,
- unsigned int cmd)
-{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t);
-
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
-
- if (cma_heap_has_outer_cache) {
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- outer_cache_op(info->handle, info->handle + length);
- }
-
- return 0;
-}
-
-static struct ion_heap_ops ion_cma_ops = {
- .allocate = ion_cma_allocate,
- .free = ion_cma_free,
- .map_dma = ion_cma_heap_map_dma,
- .unmap_dma = ion_cma_heap_unmap_dma,
- .phys = ion_cma_phys,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
- .unmap_kernel = ion_cma_unmap_kernel,
- .map_iommu = ion_cma_map_iommu,
- .unmap_iommu = ion_cma_unmap_iommu,
- .cache_op = ion_cma_cache_ops,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
-{
- struct ion_heap *heap;
-
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
-
- if (!heap)
- return ERR_PTR(-ENOMEM);
-
- heap->ops = &ion_cma_ops;
- /* set device as private heaps data, later it will be
- * used to make the link with reserved CMA memory */
- heap->priv = data->priv;
- heap->type = ION_HEAP_TYPE_DMA;
- cma_heap_has_outer_cache = data->has_outer_cache;
- return heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
- kfree(heap);
-}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 439d6cf..4f82467 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_cp_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -19,10 +19,11 @@
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/memory_alloc.h>
#include <linux/seq_file.h>
#include <linux/iommu.h>
@@ -81,8 +82,8 @@ struct ion_cp_heap {
unsigned int heap_protected;
unsigned long allocated_bytes;
unsigned long total_size;
- int (*heap_request_region)(void *);
- int (*heap_release_region)(void *);
+ int (*request_region)(void *);
+ int (*release_region)(void *);
void *bus_id;
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
@@ -93,7 +94,6 @@ struct ion_cp_heap {
int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
- int disallow_non_secure_allocation;
};
enum {
@@ -119,16 +119,6 @@ static unsigned long ion_cp_get_total_kmap_count(
return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
}
-static int ion_on_first_alloc(struct ion_heap *heap)
-{
- return 0;
-}
-
-static void ion_on_last_free(struct ion_heap *heap)
-{
-}
-
-
/**
* Protects memory if heap is unsecured heap.
* Must be called with heap->lock locked.
@@ -141,14 +131,6 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
/* Make sure we are in C state when the heap is protected. */
- if (!cp_heap->allocated_bytes) {
- ret_value = ion_on_first_alloc(heap);
- if (ret_value) {
- atomic_dec(&cp_heap->protect_cnt);
- goto out;
- }
- }
-
ret_value = ion_cp_protect_mem(cp_heap->secure_base,
cp_heap->secure_size, cp_heap->permission_type,
version, data);
@@ -156,9 +138,6 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
pr_err("Failed to protect memory for heap %s - "
"error code: %d\n", heap->name, ret_value);
- if (!cp_heap->allocated_bytes)
- ion_on_last_free(heap);
-
atomic_dec(&cp_heap->protect_cnt);
} else {
cp_heap->heap_protected = HEAP_PROTECTED;
@@ -166,7 +145,7 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
heap->name, cp_heap->base);
}
}
-out:
+
pr_debug("%s: protect count is %d\n", __func__,
atomic_read(&cp_heap->protect_cnt));
BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
@@ -193,9 +172,6 @@ static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
(unsigned int) cp_heap->base);
-
- if (!cp_heap->allocated_bytes)
- ion_on_last_free(heap);
}
}
pr_debug("%s: protect count is %d\n", __func__,
@@ -210,7 +186,6 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
{
unsigned long offset;
unsigned long secure_allocation = flags & ION_SECURE;
- unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -223,14 +198,6 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
return ION_CP_ALLOCATE_FAIL;
}
- if (!force_contig && !secure_allocation &&
- cp_heap->disallow_non_secure_allocation) {
- mutex_unlock(&cp_heap->lock);
- pr_debug("%s: non-secure allocation disallowed from this heap\n",
- __func__);
- return ION_CP_ALLOCATE_FAIL;
- }
-
if (secure_allocation &&
(cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
mutex_unlock(&cp_heap->lock);
@@ -241,16 +208,6 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
return ION_CP_ALLOCATE_FAIL;
}
- /*
- * if this is the first reusable allocation, transition
- * the heap
- */
- if (!cp_heap->allocated_bytes)
- if (ion_on_first_alloc(heap)) {
- mutex_unlock(&cp_heap->lock);
- return ION_RESERVED_ALLOCATE_FAIL;
- }
-
cp_heap->allocated_bytes += size;
mutex_unlock(&cp_heap->lock);
@@ -268,9 +225,7 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
__func__, heap->name,
cp_heap->total_size -
cp_heap->allocated_bytes, size);
- if (!cp_heap->allocated_bytes &&
- cp_heap->heap_protected == HEAP_NOT_PROTECTED)
- ion_on_last_free(heap);
+
mutex_unlock(&cp_heap->lock);
return ION_CP_ALLOCATE_FAIL;
@@ -315,10 +270,6 @@ void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
- if (!cp_heap->allocated_bytes &&
- cp_heap->heap_protected == HEAP_NOT_PROTECTED)
- ion_on_last_free(heap);
-
/* Unmap everything if we previously mapped the whole heap at once. */
if (!cp_heap->allocated_bytes) {
unsigned int i;
@@ -412,9 +363,8 @@ static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
- if (cp_heap->heap_request_region)
- ret_value = cp_heap->heap_request_region(
- cp_heap->bus_id);
+ if (cp_heap->request_region)
+ ret_value = cp_heap->request_region(cp_heap->bus_id);
return ret_value;
}
@@ -425,9 +375,8 @@ static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
- if (cp_heap->heap_release_region)
- ret_value = cp_heap->heap_release_region(
- cp_heap->bus_id);
+ if (cp_heap->release_region)
+ ret_value = cp_heap->release_region(cp_heap->bus_id);
return ret_value;
}
@@ -447,12 +396,12 @@ void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
return NULL;
}
- if (ION_IS_CACHED(buffer->flags))
- ret_value = ioremap_cached(buffer->priv_phys,
- buffer->size);
- else
- ret_value = ioremap(buffer->priv_phys,
+ if (ION_IS_CACHED(buffer->flags))
+ ret_value = ioremap_cached(buffer->priv_phys,
buffer->size);
+ else
+ ret_value = ioremap(buffer->priv_phys,
+ buffer->size);
if (!ret_value) {
ion_cp_release_region(cp_heap);
@@ -473,7 +422,7 @@ void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
- __arm_iounmap(buffer->vaddr);
+ __arm_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
@@ -536,77 +485,25 @@ int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
- void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
+ void (*outer_cache_op)(phys_addr_t, phys_addr_t);
struct ion_cp_heap *cp_heap =
- container_of(heap, struct ion_cp_heap, heap);
- unsigned int size_to_vmap, total_size;
- int i, j;
- void *ptr = NULL;
- ion_phys_addr_t buff_phys = buffer->priv_phys;
-
- if (!vaddr) {
- /*
- * Split the vmalloc space into smaller regions in
- * order to clean and/or invalidate the cache.
- */
- size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
- total_size = buffer->size;
- for (i = 0; i < total_size; i += size_to_vmap) {
- size_to_vmap = min(size_to_vmap, total_size - i);
- for (j = 0; j < 10 && size_to_vmap; ++j) {
- ptr = ioremap(buff_phys, size_to_vmap);
- if (ptr) {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(ptr,
- ptr + size_to_vmap);
- outer_cache_op =
- outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
- buff_phys += size_to_vmap;
- break;
- } else {
- size_to_vmap >>= 1;
- }
- }
- if (!ptr) {
- pr_err("Couldn't io-remap the memory\n");
- return -EINVAL;
- }
- iounmap(ptr);
- }
- } else {
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- dmac_clean_range(vaddr, vaddr + length);
- outer_cache_op = outer_clean_range;
- break;
- case ION_IOC_INV_CACHES:
- dmac_inv_range(vaddr, vaddr + length);
- outer_cache_op = outer_inv_range;
- break;
- case ION_IOC_CLEAN_INV_CACHES:
- dmac_flush_range(vaddr, vaddr + length);
- outer_cache_op = outer_flush_range;
- break;
- default:
- return -EINVAL;
- }
+ container_of(heap, struct ion_cp_heap, heap);
+
+ switch (cmd) {
+ case ION_IOC_CLEAN_CACHES:
+ dmac_clean_range(vaddr, vaddr + length);
+ outer_cache_op = outer_clean_range;
+ break;
+ case ION_IOC_INV_CACHES:
+ dmac_inv_range(vaddr, vaddr + length);
+ outer_cache_op = outer_inv_range;
+ break;
+ case ION_IOC_CLEAN_INV_CACHES:
+ dmac_flush_range(vaddr, vaddr + length);
+ outer_cache_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
}
if (cp_heap->has_outer_cache) {
@@ -953,7 +850,7 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
cp_heap->kmap_uncached_count = 0;
cp_heap->total_size = heap_data->size;
cp_heap->heap.ops = &cp_heap_ops;
- cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
+ cp_heap->heap.type = ION_HEAP_TYPE_CP;
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
cp_heap->secure_base = cp_heap->base;
cp_heap->secure_size = heap_data->size;
@@ -970,17 +867,13 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
if (extra_data->setup_region)
cp_heap->bus_id = extra_data->setup_region();
if (extra_data->request_region)
- cp_heap->heap_request_region =
- extra_data->request_region;
+ cp_heap->request_region = extra_data->request_region;
if (extra_data->release_region)
- cp_heap->heap_release_region =
- extra_data->release_region;
+ cp_heap->release_region = extra_data->release_region;
cp_heap->iommu_map_all =
extra_data->iommu_map_all;
cp_heap->iommu_2x_map_domain =
extra_data->iommu_2x_map_domain;
- cp_heap->disallow_non_secure_allocation =
- extra_data->no_nonsecure_alloc;
}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 98c1a8c..6ea49db 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,13 +18,12 @@
#include <linux/err.h>
#include <linux/ion.h>
#include "ion_priv.h"
-#include <linux/msm_ion.h>
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
- switch ((int) heap_data->type) {
+ switch (heap_data->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
heap = ion_system_contig_heap_create(heap_data);
break;
@@ -40,11 +39,6 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
case ION_HEAP_TYPE_CP:
heap = ion_cp_heap_create(heap_data);
break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
-#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
@@ -60,7 +54,6 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
heap->name = heap_data->name;
heap->id = heap_data->id;
- heap->priv = heap_data->priv;
return heap;
}
@@ -69,7 +62,7 @@ void ion_heap_destroy(struct ion_heap *heap)
if (!heap)
return;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
ion_system_contig_heap_destroy(heap);
break;
@@ -85,11 +78,6 @@ void ion_heap_destroy(struct ion_heap *heap)
case ION_HEAP_TYPE_CP:
ion_cp_heap_destroy(heap);
break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
-#endif
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 49721fb..d0f101c 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,15 +12,13 @@
*/
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/mm.h>
-#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/iommu.h>
#include <linux/pfn.h>
-#include <linux/dma-mapping.h>
#include "ion_priv.h"
#include <asm/mach/map.h>
@@ -33,131 +31,36 @@ struct ion_iommu_heap {
unsigned int has_outer_cache;
};
-/*
- * We will attempt to allocate high-order pages and store those in an
- * sg_list. However, some APIs expect an array of struct page * where
- * each page is of size PAGE_SIZE. We use this extra structure to
- * carry around an array of such pages (derived from the high-order
- * pages with nth_page).
- */
struct ion_iommu_priv_data {
struct page **pages;
- unsigned int pages_uses_vmalloc;
int nrpages;
unsigned long size;
};
-#define MAX_VMAP_RETRIES 10
-
-static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-
-struct page_info {
- struct page *page;
- unsigned int order;
- struct list_head list;
-};
-
-static unsigned int order_to_size(int order)
-{
- return PAGE_SIZE << order;
-}
-
-static struct page_info *alloc_largest_available(unsigned long size,
- unsigned int max_order)
-{
- struct page *page;
- struct page_info *info;
- int i;
-
- for (i = 0; i < num_orders; i++) {
- gfp_t gfp;
- if (size < order_to_size(orders[i]))
- continue;
- if (max_order < orders[i])
- continue;
-
- gfp = __GFP_HIGHMEM;
-
- if (orders[i]) {
- gfp |= __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN;
- } else {
- gfp |= GFP_KERNEL;
- }
- page = alloc_pages(gfp, orders[i]);
- if (!page)
- continue;
-
- info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
- info->page = page;
- info->order = orders[i];
- return info;
- }
- return NULL;
-}
-
static int ion_iommu_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
int ret, i;
- struct list_head pages_list;
- struct page_info *info, *tmp_info;
struct ion_iommu_priv_data *data = NULL;
if (msm_use_iommu()) {
struct scatterlist *sg;
struct sg_table *table;
- int j;
- void *ptr = NULL;
- unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
- unsigned long size_remaining = PAGE_ALIGN(size);
- unsigned int max_order = orders[0];
- unsigned int page_tbl_size;
+ unsigned int i;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- INIT_LIST_HEAD(&pages_list);
- while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining,
- max_order);
- if (!info) {
- ret = -ENOMEM;
- goto err_free_data;
- }
- list_add_tail(&info->list, &pages_list);
- size_remaining -= order_to_size(info->order);
- max_order = info->order;
- num_large_pages++;
- }
-
data->size = PFN_ALIGN(size);
data->nrpages = data->size >> PAGE_SHIFT;
- data->pages_uses_vmalloc = 0;
- page_tbl_size = sizeof(struct page *) * data->nrpages;
-
- if (page_tbl_size > SZ_8K) {
- /*
- * Do fallback to ensure we have a balance between
- * performance and availability.
- */
- data->pages = kmalloc(page_tbl_size,
- __GFP_COMP | __GFP_NORETRY |
- __GFP_NO_KSWAPD | __GFP_NOWARN);
- if (!data->pages) {
- data->pages = vmalloc(page_tbl_size);
- data->pages_uses_vmalloc = 1;
- }
- } else {
- data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
- }
+ data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
+ GFP_KERNEL);
if (!data->pages) {
ret = -ENOMEM;
- goto err_free_data;
+ goto err1;
}
table = buffer->sg_table =
@@ -167,60 +70,17 @@ static int ion_iommu_heap_allocate(struct ion_heap *heap,
ret = -ENOMEM;
goto err1;
}
- ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
+ ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
if (ret)
goto err2;
- i = 0;
- sg = table->sgl;
- list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
- struct page *page = info->page;
- sg_set_page(sg, page, order_to_size(info->order), 0);
- sg_dma_address(sg) = sg_phys(sg);
- sg = sg_next(sg);
- for (j = 0; j < (1 << info->order); ++j)
- data->pages[i++] = nth_page(page, j);
- list_del(&info->list);
- kfree(info);
- }
-
- /*
- * As an optimization, we omit __GFP_ZERO from
- * alloc_page above and manually zero out all of the
- * pages in one fell swoop here. To safeguard against
- * insufficient vmalloc space, we only vmap
- * `npages_to_vmap' at a time, starting with a
- * conservative estimate of 1/8 of the total number of
- * vmalloc pages available. Note that the `pages'
- * array is composed of all 4K pages, irrespective of
- * the size of the pages on the sg list.
- */
- npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
- >> PAGE_SHIFT;
- total_pages = data->nrpages;
- for (i = 0; i < total_pages; i += npages_to_vmap) {
- npages_to_vmap = min(npages_to_vmap, total_pages - i);
- for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
- ++j) {
- ptr = vmap(&data->pages[i], npages_to_vmap,
- VM_IOREMAP, pgprot_kernel);
- if (ptr)
- break;
- else
- npages_to_vmap >>= 1;
- }
- if (!ptr) {
- pr_err("Couldn't vmap the pages for zeroing\n");
- ret = -ENOMEM;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!data->pages[i])
goto err3;
- }
- memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
- vunmap(ptr);
- }
- if (!ION_IS_CACHED(flags))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
+ }
buffer->priv_virt = data;
return 0;
@@ -235,45 +95,29 @@ err3:
err2:
kfree(buffer->sg_table);
buffer->sg_table = 0;
-err1:
- if (data->pages_uses_vmalloc)
- vfree(data->pages);
- else
- kfree(data->pages);
-err_free_data:
- kfree(data);
- list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
- if (info->page)
- __free_pages(info->page, info->order);
- list_del(&info->list);
- kfree(info);
+ for (i = 0; i < data->nrpages; i++) {
+ if (data->pages[i])
+ __free_page(data->pages[i]);
}
+ kfree(data->pages);
+err1:
+ kfree(data);
return ret;
}
static void ion_iommu_heap_free(struct ion_buffer *buffer)
{
- int i;
- struct scatterlist *sg;
- struct sg_table *table = buffer->sg_table;
struct ion_iommu_priv_data *data = buffer->priv_virt;
+ int i;
- if (!table)
- return;
if (!data)
return;
- for_each_sg(table->sgl, sg, table->nents, i)
- __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
+ for (i = 0; i < data->nrpages; i++)
+ __free_page(data->pages[i]);
- sg_free_table(table);
- kfree(table);
- table = 0;
- if (data->pages_uses_vmalloc)
- vfree(data->pages);
- else
- kfree(data->pages);
+ kfree(data->pages);
kfree(data);
}
@@ -287,7 +131,7 @@ void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
return NULL;
if (!ION_IS_CACHED(buffer->flags))
- page_prot = pgprot_writecombine(page_prot);
+ page_prot = pgprot_noncached(page_prot);
buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
@@ -307,34 +151,25 @@ void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
- struct sg_table *table = buffer->sg_table;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
- struct scatterlist *sg;
+ struct ion_iommu_priv_data *data = buffer->priv_virt;
int i;
+ unsigned long curr_addr;
+ if (!data)
+ return -EINVAL;
if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg_dma_len(sg);
-
- if (offset >= sg_dma_len(sg)) {
- offset -= sg_dma_len(sg);
- continue;
- } else if (offset) {
- page += offset / PAGE_SIZE;
- len = sg_dma_len(sg) - offset;
- offset = 0;
+ curr_addr = vma->vm_start;
+ for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
+ if (vm_insert_page(vma, curr_addr, data->pages[i])) {
+ /*
+ * This will fail the mmap which will
+ * clean up the vma space properly.
+ */
+ return -EINVAL;
}
- len = min(len, remainder);
- remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
- addr += len;
- if (addr >= vma->vm_end)
- return 0;
+ curr_addr += PAGE_SIZE;
}
return 0;
}
@@ -358,14 +193,6 @@ int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (buffer->sg_table->sgl->length > align)
- align = buffer->sg_table->sgl->length;
-
ret = msm_allocate_iova_address(domain_num, partition_num,
data->mapped_size, align,
&data->iova_addr);
@@ -444,30 +271,15 @@ static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
+ dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
+ dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
+ dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
@@ -498,6 +310,10 @@ static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static struct ion_heap_ops iommu_heap_ops = {
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index d494f7a..273e57e 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -159,7 +159,6 @@ struct ion_heap_ops {
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
- * @priv: private heap data
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -173,7 +172,6 @@ struct ion_heap {
struct ion_heap_ops *ops;
int id;
const char *name;
- void *priv;
};
/**
@@ -256,10 +254,6 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
-#ifdef CONFIG_CMA
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
-void ion_cma_heap_destroy(struct ion_heap *);
-#endif
struct ion_heap *msm_get_contiguous_heap(void);
/**
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index e94a585..c79c184 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -27,8 +27,6 @@
#include "ion_priv.h"
#include <mach/memory.h>
#include <asm/cacheflush.h>
-#include <linux/msm_ion.h>
-#include <linux/dma-mapping.h>
static atomic_t system_heap_allocated;
static atomic_t system_contig_heap_allocated;
@@ -185,30 +183,15 @@ int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
- if (!vaddr)
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- else
- dmac_clean_range(vaddr, vaddr + length);
+ dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
- if (!vaddr)
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- else
- dmac_inv_range(vaddr, vaddr + length);
+ dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
- if (!vaddr) {
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_TO_DEVICE);
- dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_FROM_DEVICE);
- } else {
- dmac_flush_range(vaddr, vaddr + length);
- }
+ dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
@@ -271,14 +254,6 @@ int ion_system_heap_map_iommu(struct ion_buffer *buffer,
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (table->sgl->length > align)
- align = table->sgl->length;
-
ret = msm_allocate_iova_address(domain_num, partition_num,
data->mapped_size, align,
&data->iova_addr);
@@ -507,7 +482,7 @@ int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
}
page = virt_to_page(buffer->vaddr);
- sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
+ sglist = vmalloc(sizeof(*sglist));
if (!sglist)
goto out1;
@@ -529,13 +504,13 @@ int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
if (ret)
goto out2;
}
- kfree(sglist);
+ vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- kfree(sglist);
+ vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 41e0a04..b274ba2 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
index eec66e6..69dd19e 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.h
+++ b/drivers/gpu/ion/msm/ion_cp_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,7 +15,7 @@
#define ION_CP_COMMON_H
#include <asm-generic/errno-base.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#define ION_CP_V1 1
#define ION_CP_V2 2
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index d896391..7cd28c5 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,96 +13,19 @@
#include <linux/export.h>
#include <linux/err.h>
-#include <linux/msm_ion.h>
+#include <linux/ion.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/memory_alloc.h>
-#include <linux/of.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/rwsem.h>
-#include <linux/uaccess.h>
#include <mach/ion.h>
#include <mach/msm_memtypes.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
-#define ION_COMPAT_STR "qcom,msm-ion"
-#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
-
static struct ion_device *idev;
static int num_heaps;
static struct ion_heap **heaps;
-struct ion_heap_desc {
- unsigned int id;
- enum ion_heap_type type;
- const char *name;
- unsigned int permission_type;
-};
-
-
-static struct ion_heap_desc ion_heap_meta[] = {
- {
- .id = ION_SYSTEM_HEAP_ID,
- .type = ION_HEAP_TYPE_SYSTEM,
- .name = ION_VMALLOC_HEAP_NAME,
- },
- {
- .id = ION_SYSTEM_CONTIG_HEAP_ID,
- .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .name = ION_KMALLOC_HEAP_NAME,
- },
- {
- .id = ION_CP_MM_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
- .name = ION_MM_HEAP_NAME,
- .permission_type = IPT_TYPE_MM_CARVEOUT,
- },
- {
- .id = ION_MM_FIRMWARE_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_MM_FIRMWARE_HEAP_NAME,
- },
- {
- .id = ION_CP_MFC_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
- .name = ION_MFC_HEAP_NAME,
- .permission_type = IPT_TYPE_MFC_SHAREDMEM,
- },
- {
- .id = ION_SF_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_SF_HEAP_NAME,
- },
- {
- .id = ION_IOMMU_HEAP_ID,
- .type = ION_HEAP_TYPE_IOMMU,
- .name = ION_IOMMU_HEAP_NAME,
- },
- {
- .id = ION_QSECOM_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_QSECOM_HEAP_NAME,
- },
- {
- .id = ION_AUDIO_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_AUDIO_HEAP_NAME,
- },
- {
- .id = ION_CP_WB_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
- .name = ION_WB_HEAP_NAME,
- },
- {
- .id = ION_CAMERA_HEAP_ID,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = ION_CAMERA_HEAP_NAME,
- },
-};
-
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
@@ -246,7 +169,7 @@ static void msm_ion_allocate(struct ion_platform_heap *heap)
if (!heap->base && heap->extra_data) {
unsigned int align = 0;
- switch ((int) heap->type) {
+ switch (heap->type) {
case ION_HEAP_TYPE_CARVEOUT:
align =
((struct ion_co_heap_pdata *) heap->extra_data)->align;
@@ -312,339 +235,11 @@ static void check_for_heap_overlap(const struct ion_platform_heap heap_list[],
}
}
-static int msm_init_extra_data(struct ion_platform_heap *heap,
- const struct ion_heap_desc *heap_desc)
-{
- int ret = 0;
-
- switch ((int) heap->type) {
- case ION_HEAP_TYPE_CP:
- {
- heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
- GFP_KERNEL);
- if (!heap->extra_data) {
- ret = -ENOMEM;
- } else {
- struct ion_cp_heap_pdata *extra = heap->extra_data;
- extra->permission_type = heap_desc->permission_type;
- }
- break;
- }
- case ION_HEAP_TYPE_CARVEOUT:
- {
- heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
- GFP_KERNEL);
- if (!heap->extra_data)
- ret = -ENOMEM;
- break;
- }
- default:
- heap->extra_data = 0;
- break;
- }
- return ret;
-}
-
-static int msm_ion_populate_heap(struct ion_platform_heap *heap)
-{
- unsigned int i;
- int ret = -EINVAL;
- unsigned int len = ARRAY_SIZE(ion_heap_meta);
- for (i = 0; i < len; ++i) {
- if (ion_heap_meta[i].id == heap->id) {
- heap->name = ion_heap_meta[i].name;
- heap->type = ion_heap_meta[i].type;
- ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
- break;
- }
- }
- if (ret)
- pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
- return ret;
-}
-
-static void free_pdata(const struct ion_platform_data *pdata)
-{
- unsigned int i;
- for (i = 0; i < pdata->nr; ++i)
- kfree(pdata->heaps[i].extra_data);
- kfree(pdata);
-}
-
-static int memtype_to_ion_memtype[] = {
- [MEMTYPE_SMI_KERNEL] = ION_SMI_TYPE,
- [MEMTYPE_SMI] = ION_SMI_TYPE,
- [MEMTYPE_EBI0] = ION_EBI_TYPE,
- [MEMTYPE_EBI1] = ION_EBI_TYPE,
-};
-
-static void msm_ion_get_heap_align(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
-
- int ret = of_property_read_u32(node, "qcom,heap-align", &val);
- if (!ret) {
- switch ((int) heap->type) {
- case ION_HEAP_TYPE_CP:
- {
- struct ion_cp_heap_pdata *extra =
- heap->extra_data;
- extra->align = val;
- break;
- }
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra =
- heap->extra_data;
- extra->align = val;
- break;
- }
- default:
- pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
- heap->name);
- break;
- }
- }
-}
-
-static int msm_ion_get_heap_size(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
- int ret = 0;
- const char *memory_name_prop;
-
- ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
- if (!ret) {
- heap->size = val;
- ret = of_property_read_string(node,
- "qcom,memory-reservation-type",
- &memory_name_prop);
-
- if (!ret && memory_name_prop) {
- val = msm_get_memory_type_from_name(memory_name_prop);
- if (val < 0) {
- ret = -EINVAL;
- goto out;
- }
- heap->memory_type = memtype_to_ion_memtype[val];
- }
- if (heap->size && (ret || !memory_name_prop)) {
- pr_err("%s: Need to specify reservation type\n",
- __func__);
- ret = -EINVAL;
- }
- } else {
- ret = 0;
- }
-out:
- return ret;
-}
-
-
-static void msm_ion_get_heap_adjacent(struct device_node *node,
- struct ion_platform_heap *heap)
-{
- unsigned int val;
- int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
- if (!ret) {
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra = heap->extra_data;
- extra->adjacent_mem_id = val;
- break;
- }
- default:
- pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
- heap->name);
- break;
- }
- } else {
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- {
- struct ion_co_heap_pdata *extra = heap->extra_data;
- extra->adjacent_mem_id = INVALID_HEAP_ID;
- break;
- }
- default:
- break;
- }
- }
-}
-
-static struct ion_platform_data *msm_ion_parse_dt(
- const struct device_node *dt_node)
-{
- struct ion_platform_data *pdata = 0;
- struct device_node *node;
- uint32_t val = 0;
- int ret = 0;
- uint32_t num_heaps = 0;
- int idx = 0;
-
- for_each_child_of_node(dt_node, node)
- num_heaps++;
-
- if (!num_heaps)
- return ERR_PTR(-EINVAL);
-
- pdata = kzalloc(sizeof(struct ion_platform_data) +
- num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->nr = num_heaps;
-
- for_each_child_of_node(dt_node, node) {
- /**
- * TODO: Replace this with of_get_address() when this patch
- * gets merged: http://
- * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
- */
- ret = of_property_read_u32(node, "reg", &val);
- if (ret) {
- pr_err("%s: Unable to find reg key", __func__);
- goto free_heaps;
- }
- pdata->heaps[idx].id = val;
-
- ret = msm_ion_populate_heap(&pdata->heaps[idx]);
- if (ret)
- goto free_heaps;
-
- msm_ion_get_heap_align(node, &pdata->heaps[idx]);
-
- ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
- if (ret)
- goto free_heaps;
-
- msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
-
- ++idx;
- }
- return pdata;
-
-free_heaps:
- free_pdata(pdata);
- return ERR_PTR(ret);
-}
-
-static int check_vaddr_bounds(unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *vma;
- int ret = 1;
-
- if (end < start)
- goto out;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, start);
- if (vma && vma->vm_start < end) {
- if (start < vma->vm_start)
- goto out_up;
- if (end > vma->vm_end)
- goto out_up;
- ret = 0;
- }
-
-out_up:
- up_read(&mm->mmap_sem);
-out:
- return ret;
-}
-
-static long msm_ion_custom_ioctl(struct ion_client *client,
- unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case ION_IOC_CLEAN_CACHES:
- case ION_IOC_INV_CACHES:
- case ION_IOC_CLEAN_INV_CACHES:
- {
- struct ion_flush_data data;
- unsigned long start, end;
- struct ion_handle *handle = NULL;
- int ret;
-
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_flush_data)))
- return -EFAULT;
-
- start = (unsigned long) data.vaddr;
- end = (unsigned long) data.vaddr + data.length;
-
- if (start && check_vaddr_bounds(start, end)) {
- pr_err("%s: virtual address %p is out of bounds\n",
- __func__, data.vaddr);
- return -EINVAL;
- }
-
- if (!data.handle) {
- handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(handle)) {
- pr_info("%s: Could not import handle: %d\n",
- __func__, (int)handle);
- return -EINVAL;
- }
- }
-
- ret = ion_do_cache_op(client,
- data.handle ? data.handle : handle,
- data.vaddr, data.offset, data.length,
- cmd);
-
- if (!data.handle)
- ion_free(client, handle);
-
- if (ret < 0)
- return ret;
- break;
-
- }
- case ION_IOC_GET_FLAGS:
- {
- struct ion_flag_data data;
- int ret;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_flag_data)))
- return -EFAULT;
-
- ret = ion_handle_get_flags(client, data.handle, &data.flags);
- if (ret < 0)
- return ret;
- if (copy_to_user((void __user *)arg, &data,
- sizeof(struct ion_flag_data)))
- return -EFAULT;
- break;
- }
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
static int msm_ion_probe(struct platform_device *pdev)
{
- struct ion_platform_data *pdata;
- unsigned int pdata_needs_to_be_freed;
- int err = -1;
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
int i;
- if (pdev->dev.of_node) {
- pdata = msm_ion_parse_dt(pdev->dev.of_node);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto out;
- }
- pdata_needs_to_be_freed = 1;
- } else {
- pdata = pdev->dev.platform_data;
- pdata_needs_to_be_freed = 0;
- }
num_heaps = pdata->nr;
@@ -655,7 +250,7 @@ static int msm_ion_probe(struct platform_device *pdev)
goto out;
}
- idev = ion_device_create(msm_ion_custom_ioctl);
+ idev = ion_device_create(NULL);
if (IS_ERR_OR_NULL(idev)) {
err = PTR_ERR(idev);
goto freeheaps;
@@ -686,8 +281,6 @@ static int msm_ion_probe(struct platform_device *pdev)
ion_device_add_heap(idev, heaps[i]);
}
- if (pdata_needs_to_be_freed)
- free_pdata(pdata);
check_for_heap_overlap(pdata->heaps, num_heaps);
platform_set_drvdata(pdev, idev);
@@ -695,8 +288,6 @@ static int msm_ion_probe(struct platform_device *pdev)
freeheaps:
kfree(heaps);
- if (pdata_needs_to_be_freed)
- free_pdata(pdata);
out:
return err;
}
@@ -714,19 +305,10 @@ static int msm_ion_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id msm_ion_match_table[] = {
- {.compatible = ION_COMPAT_STR},
- {},
-};
-EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
-
static struct platform_driver msm_ion_driver = {
.probe = msm_ion_probe,
.remove = msm_ion_remove,
- .driver = {
- .name = "ion-msm",
- .of_match_table = msm_ion_match_table,
- },
+ .driver = { .name = "ion-msm" }
};
static int __init msm_ion_init(void)
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index fec5363..6cdb5f1 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -9,8 +9,7 @@ msm_kgsl_core-y = \
kgsl_mmu.o \
kgsl_gpummu.o \
kgsl_iommu.o \
- kgsl_snapshot.o \
- kgsl_events.o
+ kgsl_snapshot.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
@@ -18,7 +17,6 @@ msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
msm_kgsl_core-$(CONFIG_MSM_DCVS) += kgsl_pwrscale_msm.o
-msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
msm_adreno-y += \
adreno_ringbuffer.o \
@@ -37,7 +35,6 @@ msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
msm_z180-y += \
z180.o \
- z180_postmortem.o \
z180_trace.o
msm_kgsl_core-objs = $(msm_kgsl_core-y)
diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h
deleted file mode 100644
index e1e2c15..0000000
--- a/drivers/gpu/msm/a2xx_reg.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Copyright (c) 2002,2007-2012,2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __A200_REG_H
-#define __A200_REG_H
-
-enum VGT_EVENT_TYPE {
- VS_DEALLOC = 0,
- PS_DEALLOC = 1,
- VS_DONE_TS = 2,
- PS_DONE_TS = 3,
- CACHE_FLUSH_TS = 4,
- CONTEXT_DONE = 5,
- CACHE_FLUSH = 6,
- VIZQUERY_START = 7,
- VIZQUERY_END = 8,
- SC_WAIT_WC = 9,
- RST_PIX_CNT = 13,
- RST_VTX_CNT = 14,
- TILE_FLUSH = 15,
- CACHE_FLUSH_AND_INV_TS_EVENT = 20,
- ZPASS_DONE = 21,
- CACHE_FLUSH_AND_INV_EVENT = 22,
- PERFCOUNTER_START = 23,
- PERFCOUNTER_STOP = 24,
- VS_FETCH_DONE = 27,
- FACENESS_FLUSH = 28,
-};
-
-enum COLORFORMATX {
- COLORX_4_4_4_4 = 0,
- COLORX_1_5_5_5 = 1,
- COLORX_5_6_5 = 2,
- COLORX_8 = 3,
- COLORX_8_8 = 4,
- COLORX_8_8_8_8 = 5,
- COLORX_S8_8_8_8 = 6,
- COLORX_16_FLOAT = 7,
- COLORX_16_16_FLOAT = 8,
- COLORX_16_16_16_16_FLOAT = 9,
- COLORX_32_FLOAT = 10,
- COLORX_32_32_FLOAT = 11,
- COLORX_32_32_32_32_FLOAT = 12,
- COLORX_2_3_3 = 13,
- COLORX_8_8_8 = 14,
-};
-
-enum SURFACEFORMAT {
- FMT_1_REVERSE = 0,
- FMT_1 = 1,
- FMT_8 = 2,
- FMT_1_5_5_5 = 3,
- FMT_5_6_5 = 4,
- FMT_6_5_5 = 5,
- FMT_8_8_8_8 = 6,
- FMT_2_10_10_10 = 7,
- FMT_8_A = 8,
- FMT_8_B = 9,
- FMT_8_8 = 10,
- FMT_Cr_Y1_Cb_Y0 = 11,
- FMT_Y1_Cr_Y0_Cb = 12,
- FMT_5_5_5_1 = 13,
- FMT_8_8_8_8_A = 14,
- FMT_4_4_4_4 = 15,
- FMT_10_11_11 = 16,
- FMT_11_11_10 = 17,
- FMT_DXT1 = 18,
- FMT_DXT2_3 = 19,
- FMT_DXT4_5 = 20,
- FMT_24_8 = 22,
- FMT_24_8_FLOAT = 23,
- FMT_16 = 24,
- FMT_16_16 = 25,
- FMT_16_16_16_16 = 26,
- FMT_16_EXPAND = 27,
- FMT_16_16_EXPAND = 28,
- FMT_16_16_16_16_EXPAND = 29,
- FMT_16_FLOAT = 30,
- FMT_16_16_FLOAT = 31,
- FMT_16_16_16_16_FLOAT = 32,
- FMT_32 = 33,
- FMT_32_32 = 34,
- FMT_32_32_32_32 = 35,
- FMT_32_FLOAT = 36,
- FMT_32_32_FLOAT = 37,
- FMT_32_32_32_32_FLOAT = 38,
- FMT_32_AS_8 = 39,
- FMT_32_AS_8_8 = 40,
- FMT_16_MPEG = 41,
- FMT_16_16_MPEG = 42,
- FMT_8_INTERLACED = 43,
- FMT_32_AS_8_INTERLACED = 44,
- FMT_32_AS_8_8_INTERLACED = 45,
- FMT_16_INTERLACED = 46,
- FMT_16_MPEG_INTERLACED = 47,
- FMT_16_16_MPEG_INTERLACED = 48,
- FMT_DXN = 49,
- FMT_8_8_8_8_AS_16_16_16_16 = 50,
- FMT_DXT1_AS_16_16_16_16 = 51,
- FMT_DXT2_3_AS_16_16_16_16 = 52,
- FMT_DXT4_5_AS_16_16_16_16 = 53,
- FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_11_11_AS_16_16_16_16 = 55,
- FMT_11_11_10_AS_16_16_16_16 = 56,
- FMT_32_32_32_FLOAT = 57,
- FMT_DXT3A = 58,
- FMT_DXT5A = 59,
- FMT_CTX1 = 60,
- FMT_DXT3A_AS_1_1_1_1 = 61
-};
-
-#define REG_PERF_MODE_CNT 0x0
-#define REG_PERF_STATE_RESET 0x0
-#define REG_PERF_STATE_ENABLE 0x1
-#define REG_PERF_STATE_FREEZE 0x2
-
-#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
-#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
-#define RB_EDRAM_INFO_UNUSED0_SIZE 8
-#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
-
-struct rb_edram_info_t {
- unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
- unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
- unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
- unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
-};
-
-union reg_rb_edram_info {
- unsigned int val;
- struct rb_edram_info_t f;
-};
-
-#define RBBM_READ_ERROR_ADDRESS_MASK 0x0001fffc
-#define RBBM_READ_ERROR_REQUESTER (1<<30)
-#define RBBM_READ_ERROR_ERROR (1<<31)
-
-#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
-#define CP_RB_CNTL_UNUSED0_SIZE 2
-#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
-#define CP_RB_CNTL_UNUSED1_SIZE 2
-#define CP_RB_CNTL_BUF_SWAP_SIZE 2
-#define CP_RB_CNTL_UNUSED2_SIZE 2
-#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
-#define CP_RB_CNTL_UNUSED3_SIZE 6
-#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
-#define CP_RB_CNTL_UNUSED4_SIZE 3
-#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
-
-struct cp_rb_cntl_t {
- unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
- unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
- unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
- unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
- unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
- unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
- unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
- unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
- unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
- unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
- unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
-};
-
-union reg_cp_rb_cntl {
- unsigned int val:32;
- struct cp_rb_cntl_t f;
-};
-
-#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
-#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
-
-
-#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
-#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
-
-#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
-#define RBBM_INT_CNTL__PROTECT_INT_MASK 0x00100000L
-#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
-#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
-
-#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
-#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
-#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
-#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
-#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
-#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
-#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
-#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
-#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
-#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
-#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
-#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
-#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
-#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
-#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
-#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
-#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
-#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
-#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
-#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
-
-#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
-#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
-#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
-#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
-#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
-#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
-#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
-#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
-#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
-
-#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
-#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
-#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
-#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
-
-#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
-#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
-
-#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
-#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
-#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
-#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
-#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
-#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
-#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
-#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
-#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
-#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
-#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
-#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
-#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
-#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
-
-#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
-#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
-#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
-#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
-
-#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
-#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
-#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
-
-#define REG_CP_CSQ_IB1_STAT 0x01FE
-#define REG_CP_CSQ_IB2_STAT 0x01FF
-#define REG_CP_CSQ_RB_STAT 0x01FD
-#define REG_CP_DEBUG 0x01FC
-#define REG_CP_IB1_BASE 0x0458
-#define REG_CP_IB1_BUFSZ 0x0459
-#define REG_CP_IB2_BASE 0x045A
-#define REG_CP_IB2_BUFSZ 0x045B
-#define REG_CP_INT_ACK 0x01F4
-#define REG_CP_INT_CNTL 0x01F2
-#define REG_CP_INT_STATUS 0x01F3
-#define REG_CP_ME_CNTL 0x01F6
-#define REG_CP_ME_RAM_DATA 0x01FA
-#define REG_CP_ME_RAM_WADDR 0x01F8
-#define REG_CP_ME_RAM_RADDR 0x01F9
-#define REG_CP_ME_STATUS 0x01F7
-#define REG_CP_PFP_UCODE_ADDR 0x00C0
-#define REG_CP_PFP_UCODE_DATA 0x00C1
-#define REG_CP_QUEUE_THRESHOLDS 0x01D5
-#define REG_CP_RB_BASE 0x01C0
-#define REG_CP_RB_CNTL 0x01C1
-#define REG_CP_RB_RPTR 0x01C4
-#define REG_CP_RB_RPTR_ADDR 0x01C3
-#define REG_CP_RB_RPTR_WR 0x01C7
-#define REG_CP_RB_WPTR 0x01C5
-#define REG_CP_RB_WPTR_BASE 0x01C8
-#define REG_CP_RB_WPTR_DELAY 0x01C6
-#define REG_CP_STAT 0x047F
-#define REG_CP_STATE_DEBUG_DATA 0x01ED
-#define REG_CP_STATE_DEBUG_INDEX 0x01EC
-#define REG_CP_ST_BASE 0x044D
-#define REG_CP_ST_BUFSZ 0x044E
-
-#define REG_CP_PERFMON_CNTL 0x0444
-#define REG_CP_PERFCOUNTER_SELECT 0x0445
-#define REG_CP_PERFCOUNTER_LO 0x0446
-#define REG_CP_PERFCOUNTER_HI 0x0447
-
-#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
-#define REG_RBBM_PERFCOUNTER1_HI 0x0398
-#define REG_RBBM_PERFCOUNTER1_LO 0x0397
-
-#define REG_SQ_PERFCOUNTER3_SELECT 0x0DCB
-#define REG_SQ_PERFCOUNTER3_LO 0x0DD2
-#define REG_SQ_PERFCOUNTER3_HI 0x0DD3
-
-#define REG_MASTER_INT_SIGNAL 0x03B7
-
-#define REG_PA_CL_VPORT_XSCALE 0x210F
-#define REG_PA_CL_VPORT_ZOFFSET 0x2114
-#define REG_PA_CL_VPORT_ZSCALE 0x2113
-#define REG_PA_CL_CLIP_CNTL 0x2204
-#define REG_PA_CL_VTE_CNTL 0x2206
-#define REG_PA_SC_AA_MASK 0x2312
-#define REG_PA_SC_LINE_CNTL 0x2300
-#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
-#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
-#define REG_PA_SC_VIZ_QUERY 0x2293
-#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
-#define REG_PA_SC_WINDOW_OFFSET 0x2080
-#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
-#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
-#define REG_PA_SU_FACE_DATA 0x0C86
-#define REG_PA_SU_POINT_SIZE 0x2280
-#define REG_PA_SU_LINE_CNTL 0x2282
-#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
-#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
-#define REG_PA_SU_SC_MODE_CNTL 0x2205
-
-#define REG_PC_INDEX_OFFSET 0x2102
-
-#define REG_RBBM_CNTL 0x003B
-#define REG_RBBM_INT_ACK 0x03B6
-#define REG_RBBM_INT_CNTL 0x03B4
-#define REG_RBBM_INT_STATUS 0x03B5
-#define REG_RBBM_PATCH_RELEASE 0x0001
-#define REG_RBBM_PERIPHID1 0x03F9
-#define REG_RBBM_PERIPHID2 0x03FA
-#define REG_RBBM_DEBUG 0x039B
-#define REG_RBBM_DEBUG_OUT 0x03A0
-#define REG_RBBM_DEBUG_CNTL 0x03A1
-#define REG_RBBM_PM_OVERRIDE1 0x039C
-#define REG_RBBM_PM_OVERRIDE2 0x039D
-#define REG_RBBM_READ_ERROR 0x03B3
-#define REG_RBBM_SOFT_RESET 0x003C
-#define REG_RBBM_STATUS 0x05D0
-
-/*A2XX Protection */
-#define REG_RBBM_PROTECT_0 0x0140
-#define REG_RBBM_PROTECT_1 0x0141
-#define REG_RBBM_PROTECT_2 0x0142
-#define REG_RBBM_PROTECT_3 0x0143
-#define REG_RBBM_PROTECT_4 0x0144
-#define REG_RBBM_PROTECT_5 0x0145
-#define REG_RBBM_PROTECT_6 0x0146
-#define REG_RBBM_PROTECT_7 0x0147
-#define REG_RBBM_PROTECT_8 0x0148
-#define REG_RBBM_PROTECT_9 0x0149
-#define REG_RBBM_PROTECT_A 0x014A
-#define REG_RBBM_PROTECT_B 0x014B
-#define REG_RBBM_PROTECT_C 0x014C
-#define REG_RBBM_PROTECT_D 0x014D
-#define REG_RBBM_PROTECT_E 0x014E
-#define REG_RBBM_PROTECT_F 0x014F
-
-#define REG_RB_COLORCONTROL 0x2202
-#define REG_RB_COLOR_DEST_MASK 0x2326
-#define REG_RB_COLOR_MASK 0x2104
-#define REG_RB_COPY_CONTROL 0x2318
-#define REG_RB_DEPTHCONTROL 0x2200
-#define REG_RB_EDRAM_INFO 0x0F02
-#define REG_RB_MODECONTROL 0x2208
-#define REG_RB_SURFACE_INFO 0x2000
-#define REG_RB_SAMPLE_POS 0x220a
-
-#define REG_SCRATCH_ADDR 0x01DD
-#define REG_SCRATCH_REG0 0x0578
-#define REG_SCRATCH_REG2 0x057A
-#define REG_SCRATCH_UMSK 0x01DC
-
-#define REG_SQ_CF_BOOLEANS 0x4900
-#define REG_SQ_CF_LOOP 0x4908
-#define REG_SQ_GPR_MANAGEMENT 0x0D00
-#define REG_SQ_FLOW_CONTROL 0x0D01
-#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
-#define REG_SQ_INT_ACK 0x0D36
-#define REG_SQ_INT_CNTL 0x0D34
-#define REG_SQ_INT_STATUS 0x0D35
-#define REG_SQ_PROGRAM_CNTL 0x2180
-#define REG_SQ_PS_PROGRAM 0x21F6
-#define REG_SQ_VS_PROGRAM 0x21F7
-#define REG_SQ_WRAPPING_0 0x2183
-#define REG_SQ_WRAPPING_1 0x2184
-
-#define REG_VGT_ENHANCE 0x2294
-#define REG_VGT_INDX_OFFSET 0x2102
-#define REG_VGT_MAX_VTX_INDX 0x2100
-#define REG_VGT_MIN_VTX_INDX 0x2101
-
-#define REG_TP0_CHICKEN 0x0E1E
-#define REG_TC_CNTL_STATUS 0x0E00
-#define REG_PA_SC_AA_CONFIG 0x2301
-#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
-#define REG_SQ_INTERPOLATOR_CNTL 0x2182
-#define REG_RB_DEPTH_INFO 0x2002
-#define REG_COHER_DEST_BASE_0 0x2006
-#define REG_RB_FOG_COLOR 0x2109
-#define REG_RB_STENCILREFMASK_BF 0x210C
-#define REG_PA_SC_LINE_STIPPLE 0x2283
-#define REG_SQ_PS_CONST 0x2308
-#define REG_RB_DEPTH_CLEAR 0x231D
-#define REG_RB_SAMPLE_COUNT_CTL 0x2324
-#define REG_SQ_CONSTANT_0 0x4000
-#define REG_SQ_FETCH_0 0x4800
-
-#define REG_COHER_BASE_PM4 0xA2A
-#define REG_COHER_STATUS_PM4 0xA2B
-#define REG_COHER_SIZE_PM4 0xA29
-
-/*registers added in adreno220*/
-#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
-#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
-#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
-#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
-#define REG_A220_GRAS_CONTROL 0x2210
-#define REG_A220_VSC_BIN_SIZE 0x0C01
-#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
-
-/*registers added in adreno225*/
-#define REG_A225_RB_COLOR_INFO3 0x2005
-#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
-#define REG_A225_GRAS_UCP0X 0x2340
-#define REG_A225_GRAS_UCP5W 0x2357
-#define REG_A225_GRAS_UCP_ENABLED 0x2360
-#define REG_VSC_BINNING_ENABLE 0x0C00
-
-/* Debug registers used by snapshot */
-#define REG_PA_SU_DEBUG_CNTL 0x0C80
-#define REG_PA_SU_DEBUG_DATA 0x0C81
-#define REG_RB_DEBUG_CNTL 0x0F26
-#define REG_RB_DEBUG_DATA 0x0F27
-#define REG_PC_DEBUG_CNTL 0x0C38
-#define REG_PC_DEBUG_DATA 0x0C39
-#define REG_GRAS_DEBUG_CNTL 0x0C80
-#define REG_GRAS_DEBUG_DATA 0x0C81
-#define REG_SQ_DEBUG_MISC 0x0D05
-#define REG_SQ_DEBUG_INPUT_FSM 0x0DAE
-#define REG_SQ_DEBUG_CONST_MGR_FSM 0x0DAF
-#define REG_SQ_DEBUG_EXP_ALLOC 0x0DB3
-#define REG_SQ_DEBUG_FSM_ALU_0 0x0DB1
-#define REG_SQ_DEBUG_FSM_ALU_1 0x0DB2
-#define REG_SQ_DEBUG_PTR_BUFF 0x0DB4
-#define REG_SQ_DEBUG_GPR_VTX 0x0DB5
-#define REG_SQ_DEBUG_GPR_PIX 0x0DB6
-#define REG_SQ_DEBUG_TB_STATUS_SEL 0x0DB7
-#define REG_SQ_DEBUG_VTX_TB_0 0x0DB8
-#define REG_SQ_DEBUG_VTX_TB_1 0x0DB9
-#define REG_SQ_DEBUG_VTX_TB_STATE_MEM 0x0DBB
-#define REG_SQ_DEBUG_TP_FSM 0x0DB0
-#define REG_SQ_DEBUG_VTX_TB_STATUS_REG 0x0DBA
-#define REG_SQ_DEBUG_PIX_TB_0 0x0DBC
-#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x0DBD
-#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x0DBE
-#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x0DBF
-#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x0DC0
-#define REG_SQ_DEBUG_PIX_TB_STATE_MEM 0x0DC1
-#define REG_SQ_DEBUG_MISC_0 0x2309
-#define REG_SQ_DEBUG_MISC_1 0x230A
-
-#endif /* __A200_REG_H */
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
deleted file mode 100644
index 21d4759..0000000
--- a/drivers/gpu/msm/a3xx_reg.h
+++ /dev/null
@@ -1,712 +0,0 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _A300_REG_H
-#define _A300_REG_H
-
-/* Interrupt bit positions within RBBM_INT_0 */
-
-#define A3XX_INT_RBBM_GPU_IDLE 0
-#define A3XX_INT_RBBM_AHB_ERROR 1
-#define A3XX_INT_RBBM_REG_TIMEOUT 2
-#define A3XX_INT_RBBM_ME_MS_TIMEOUT 3
-#define A3XX_INT_RBBM_PFP_MS_TIMEOUT 4
-#define A3XX_INT_RBBM_ATB_BUS_OVERFLOW 5
-#define A3XX_INT_VFD_ERROR 6
-#define A3XX_INT_CP_SW_INT 7
-#define A3XX_INT_CP_T0_PACKET_IN_IB 8
-#define A3XX_INT_CP_OPCODE_ERROR 9
-#define A3XX_INT_CP_RESERVED_BIT_ERROR 10
-#define A3XX_INT_CP_HW_FAULT 11
-#define A3XX_INT_CP_DMA 12
-#define A3XX_INT_CP_IB2_INT 13
-#define A3XX_INT_CP_IB1_INT 14
-#define A3XX_INT_CP_RB_INT 15
-#define A3XX_INT_CP_REG_PROTECT_FAULT 16
-#define A3XX_INT_CP_RB_DONE_TS 17
-#define A3XX_INT_CP_VS_DONE_TS 18
-#define A3XX_INT_CP_PS_DONE_TS 19
-#define A3XX_INT_CACHE_FLUSH_TS 20
-#define A3XX_INT_CP_AHB_ERROR_HALT 21
-#define A3XX_INT_MISC_HANG_DETECT 24
-#define A3XX_INT_UCHE_OOB_ACCESS 25
-
-/* Register definitions */
-
-#define A3XX_RBBM_HW_VERSION 0x000
-#define A3XX_RBBM_HW_RELEASE 0x001
-#define A3XX_RBBM_HW_CONFIGURATION 0x002
-#define A3XX_RBBM_CLOCK_CTL 0x010
-#define A3XX_RBBM_SP_HYST_CNT 0x012
-#define A3XX_RBBM_SW_RESET_CMD 0x018
-#define A3XX_RBBM_AHB_CTL0 0x020
-#define A3XX_RBBM_AHB_CTL1 0x021
-#define A3XX_RBBM_AHB_CMD 0x022
-#define A3XX_RBBM_AHB_ERROR_STATUS 0x027
-#define A3XX_RBBM_GPR0_CTL 0x02E
-/* This the same register as on A2XX, just in a different place */
-#define A3XX_RBBM_STATUS 0x030
-#define A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x33
-#define A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x50
-#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x51
-#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x54
-#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x57
-#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x5A
-#define A3XX_RBBM_INT_CLEAR_CMD 0x061
-#define A3XX_RBBM_INT_0_MASK 0x063
-#define A3XX_RBBM_INT_0_STATUS 0x064
-#define A3XX_RBBM_PERFCTR_CTL 0x80
-#define A3XX_RBBM_PERFCTR_LOAD_CMD0 0x81
-#define A3XX_RBBM_PERFCTR_LOAD_CMD1 0x82
-#define A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x84
-#define A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x85
-#define A3XX_RBBM_PERFCOUNTER0_SELECT 0x86
-#define A3XX_RBBM_PERFCOUNTER1_SELECT 0x87
-#define A3XX_RBBM_GPU_BUSY_MASKED 0x88
-#define A3XX_RBBM_PERFCTR_CP_0_LO 0x90
-#define A3XX_RBBM_PERFCTR_CP_0_HI 0x91
-#define A3XX_RBBM_PERFCTR_RBBM_0_LO 0x92
-#define A3XX_RBBM_PERFCTR_RBBM_0_HI 0x93
-#define A3XX_RBBM_PERFCTR_RBBM_1_LO 0x94
-#define A3XX_RBBM_PERFCTR_RBBM_1_HI 0x95
-#define A3XX_RBBM_PERFCTR_PC_0_LO 0x96
-#define A3XX_RBBM_PERFCTR_PC_0_HI 0x97
-#define A3XX_RBBM_PERFCTR_PC_1_LO 0x98
-#define A3XX_RBBM_PERFCTR_PC_1_HI 0x99
-#define A3XX_RBBM_PERFCTR_PC_2_LO 0x9A
-#define A3XX_RBBM_PERFCTR_PC_2_HI 0x9B
-#define A3XX_RBBM_PERFCTR_PC_3_LO 0x9C
-#define A3XX_RBBM_PERFCTR_PC_3_HI 0x9D
-#define A3XX_RBBM_PERFCTR_VFD_0_LO 0x9E
-#define A3XX_RBBM_PERFCTR_VFD_0_HI 0x9F
-#define A3XX_RBBM_PERFCTR_VFD_1_LO 0xA0
-#define A3XX_RBBM_PERFCTR_VFD_1_HI 0xA1
-#define A3XX_RBBM_PERFCTR_HLSQ_0_LO 0xA2
-#define A3XX_RBBM_PERFCTR_HLSQ_0_HI 0xA3
-#define A3XX_RBBM_PERFCTR_HLSQ_1_LO 0xA4
-#define A3XX_RBBM_PERFCTR_HLSQ_1_HI 0xA5
-#define A3XX_RBBM_PERFCTR_HLSQ_2_LO 0xA6
-#define A3XX_RBBM_PERFCTR_HLSQ_2_HI 0xA7
-#define A3XX_RBBM_PERFCTR_HLSQ_3_LO 0xA8
-#define A3XX_RBBM_PERFCTR_HLSQ_3_HI 0xA9
-#define A3XX_RBBM_PERFCTR_HLSQ_4_LO 0xAA
-#define A3XX_RBBM_PERFCTR_HLSQ_4_HI 0xAB
-#define A3XX_RBBM_PERFCTR_HLSQ_5_LO 0xAC
-#define A3XX_RBBM_PERFCTR_HLSQ_5_HI 0xAD
-#define A3XX_RBBM_PERFCTR_VPC_0_LO 0xAE
-#define A3XX_RBBM_PERFCTR_VPC_0_HI 0xAF
-#define A3XX_RBBM_PERFCTR_VPC_1_LO 0xB0
-#define A3XX_RBBM_PERFCTR_VPC_1_HI 0xB1
-#define A3XX_RBBM_PERFCTR_TSE_0_LO 0xB2
-#define A3XX_RBBM_PERFCTR_TSE_0_HI 0xB3
-#define A3XX_RBBM_PERFCTR_TSE_1_LO 0xB4
-#define A3XX_RBBM_PERFCTR_TSE_1_HI 0xB5
-#define A3XX_RBBM_PERFCTR_RAS_0_LO 0xB6
-#define A3XX_RBBM_PERFCTR_RAS_0_HI 0xB7
-#define A3XX_RBBM_PERFCTR_RAS_1_LO 0xB8
-#define A3XX_RBBM_PERFCTR_RAS_1_HI 0xB9
-#define A3XX_RBBM_PERFCTR_UCHE_0_LO 0xBA
-#define A3XX_RBBM_PERFCTR_UCHE_0_HI 0xBB
-#define A3XX_RBBM_PERFCTR_UCHE_1_LO 0xBC
-#define A3XX_RBBM_PERFCTR_UCHE_1_HI 0xBD
-#define A3XX_RBBM_PERFCTR_UCHE_2_LO 0xBE
-#define A3XX_RBBM_PERFCTR_UCHE_2_HI 0xBF
-#define A3XX_RBBM_PERFCTR_UCHE_3_LO 0xC0
-#define A3XX_RBBM_PERFCTR_UCHE_3_HI 0xC1
-#define A3XX_RBBM_PERFCTR_UCHE_4_LO 0xC2
-#define A3XX_RBBM_PERFCTR_UCHE_4_HI 0xC3
-#define A3XX_RBBM_PERFCTR_UCHE_5_LO 0xC4
-#define A3XX_RBBM_PERFCTR_UCHE_5_HI 0xC5
-#define A3XX_RBBM_PERFCTR_TP_0_LO 0xC6
-#define A3XX_RBBM_PERFCTR_TP_0_HI 0xC7
-#define A3XX_RBBM_PERFCTR_TP_1_LO 0xC8
-#define A3XX_RBBM_PERFCTR_TP_1_HI 0xC9
-#define A3XX_RBBM_PERFCTR_TP_2_LO 0xCA
-#define A3XX_RBBM_PERFCTR_TP_2_HI 0xCB
-#define A3XX_RBBM_PERFCTR_TP_3_LO 0xCC
-#define A3XX_RBBM_PERFCTR_TP_3_HI 0xCD
-#define A3XX_RBBM_PERFCTR_TP_4_LO 0xCE
-#define A3XX_RBBM_PERFCTR_TP_4_HI 0xCF
-#define A3XX_RBBM_PERFCTR_TP_5_LO 0xD0
-#define A3XX_RBBM_PERFCTR_TP_5_HI 0xD1
-#define A3XX_RBBM_PERFCTR_SP_0_LO 0xD2
-#define A3XX_RBBM_PERFCTR_SP_0_HI 0xD3
-#define A3XX_RBBM_PERFCTR_SP_1_LO 0xD4
-#define A3XX_RBBM_PERFCTR_SP_1_HI 0xD5
-#define A3XX_RBBM_PERFCTR_SP_2_LO 0xD6
-#define A3XX_RBBM_PERFCTR_SP_2_HI 0xD7
-#define A3XX_RBBM_PERFCTR_SP_3_LO 0xD8
-#define A3XX_RBBM_PERFCTR_SP_3_HI 0xD9
-#define A3XX_RBBM_PERFCTR_SP_4_LO 0xDA
-#define A3XX_RBBM_PERFCTR_SP_4_HI 0xDB
-#define A3XX_RBBM_PERFCTR_SP_5_LO 0xDC
-#define A3XX_RBBM_PERFCTR_SP_5_HI 0xDD
-#define A3XX_RBBM_PERFCTR_SP_6_LO 0xDE
-#define A3XX_RBBM_PERFCTR_SP_6_HI 0xDF
-#define A3XX_RBBM_PERFCTR_SP_7_LO 0xE0
-#define A3XX_RBBM_PERFCTR_SP_7_HI 0xE1
-#define A3XX_RBBM_PERFCTR_RB_0_LO 0xE2
-#define A3XX_RBBM_PERFCTR_RB_0_HI 0xE3
-#define A3XX_RBBM_PERFCTR_RB_1_LO 0xE4
-#define A3XX_RBBM_PERFCTR_RB_1_HI 0xE5
-
-#define A3XX_RBBM_RBBM_CTL 0x100
-#define A3XX_RBBM_PERFCTR_PWR_0_LO 0x0EA
-#define A3XX_RBBM_PERFCTR_PWR_0_HI 0x0EB
-#define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
-#define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
-#define A3XX_RBBM_DEBUG_BUS_CTL 0x111
-#define A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x112
-
-/* Following two are same as on A2XX, just in a different place */
-#define A3XX_CP_PFP_UCODE_ADDR 0x1C9
-#define A3XX_CP_PFP_UCODE_DATA 0x1CA
-#define A3XX_CP_ROQ_ADDR 0x1CC
-#define A3XX_CP_ROQ_DATA 0x1CD
-#define A3XX_CP_MERCIU_ADDR 0x1D1
-#define A3XX_CP_MERCIU_DATA 0x1D2
-#define A3XX_CP_MERCIU_DATA2 0x1D3
-#define A3XX_CP_MEQ_ADDR 0x1DA
-#define A3XX_CP_MEQ_DATA 0x1DB
-#define A3XX_CP_PERFCOUNTER_SELECT 0x445
-#define A3XX_CP_HW_FAULT 0x45C
-#define A3XX_CP_AHB_FAULT 0x54D
-#define A3XX_CP_PROTECT_CTRL 0x45E
-#define A3XX_CP_PROTECT_STATUS 0x45F
-#define A3XX_CP_PROTECT_REG_0 0x460
-#define A3XX_CP_PROTECT_REG_1 0x461
-#define A3XX_CP_PROTECT_REG_2 0x462
-#define A3XX_CP_PROTECT_REG_3 0x463
-#define A3XX_CP_PROTECT_REG_4 0x464
-#define A3XX_CP_PROTECT_REG_5 0x465
-#define A3XX_CP_PROTECT_REG_6 0x466
-#define A3XX_CP_PROTECT_REG_7 0x467
-#define A3XX_CP_PROTECT_REG_8 0x468
-#define A3XX_CP_PROTECT_REG_9 0x469
-#define A3XX_CP_PROTECT_REG_A 0x46A
-#define A3XX_CP_PROTECT_REG_B 0x46B
-#define A3XX_CP_PROTECT_REG_C 0x46C
-#define A3XX_CP_PROTECT_REG_D 0x46D
-#define A3XX_CP_PROTECT_REG_E 0x46E
-#define A3XX_CP_PROTECT_REG_F 0x46F
-#define A3XX_CP_SCRATCH_REG2 0x57A
-#define A3XX_CP_SCRATCH_REG3 0x57B
-#define A3XX_VSC_BIN_SIZE 0xC01
-#define A3XX_VSC_SIZE_ADDRESS 0xC02
-#define A3XX_VSC_PIPE_CONFIG_0 0xC06
-#define A3XX_VSC_PIPE_DATA_ADDRESS_0 0xC07
-#define A3XX_VSC_PIPE_DATA_LENGTH_0 0xC08
-#define A3XX_VSC_PIPE_CONFIG_1 0xC09
-#define A3XX_VSC_PIPE_DATA_ADDRESS_1 0xC0A
-#define A3XX_VSC_PIPE_DATA_LENGTH_1 0xC0B
-#define A3XX_VSC_PIPE_CONFIG_2 0xC0C
-#define A3XX_VSC_PIPE_DATA_ADDRESS_2 0xC0D
-#define A3XX_VSC_PIPE_DATA_LENGTH_2 0xC0E
-#define A3XX_VSC_PIPE_CONFIG_3 0xC0F
-#define A3XX_VSC_PIPE_DATA_ADDRESS_3 0xC10
-#define A3XX_VSC_PIPE_DATA_LENGTH_3 0xC11
-#define A3XX_VSC_PIPE_CONFIG_4 0xC12
-#define A3XX_VSC_PIPE_DATA_ADDRESS_4 0xC13
-#define A3XX_VSC_PIPE_DATA_LENGTH_4 0xC14
-#define A3XX_VSC_PIPE_CONFIG_5 0xC15
-#define A3XX_VSC_PIPE_DATA_ADDRESS_5 0xC16
-#define A3XX_VSC_PIPE_DATA_LENGTH_5 0xC17
-#define A3XX_VSC_PIPE_CONFIG_6 0xC18
-#define A3XX_VSC_PIPE_DATA_ADDRESS_6 0xC19
-#define A3XX_VSC_PIPE_DATA_LENGTH_6 0xC1A
-#define A3XX_VSC_PIPE_CONFIG_7 0xC1B
-#define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
-#define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
-#define A3XX_PC_PERFCOUNTER0_SELECT 0xC48
-#define A3XX_PC_PERFCOUNTER1_SELECT 0xC49
-#define A3XX_PC_PERFCOUNTER2_SELECT 0xC4A
-#define A3XX_PC_PERFCOUNTER3_SELECT 0xC4B
-#define A3XX_GRAS_PERFCOUNTER0_SELECT 0xC88
-#define A3XX_GRAS_PERFCOUNTER1_SELECT 0xC89
-#define A3XX_GRAS_PERFCOUNTER2_SELECT 0xC8A
-#define A3XX_GRAS_PERFCOUNTER3_SELECT 0xC8B
-#define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
-#define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
-#define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
-#define A3XX_GRAS_CL_USER_PLANE_W0 0xCA3
-#define A3XX_GRAS_CL_USER_PLANE_X1 0xCA4
-#define A3XX_GRAS_CL_USER_PLANE_Y1 0xCA5
-#define A3XX_GRAS_CL_USER_PLANE_Z1 0xCA6
-#define A3XX_GRAS_CL_USER_PLANE_W1 0xCA7
-#define A3XX_GRAS_CL_USER_PLANE_X2 0xCA8
-#define A3XX_GRAS_CL_USER_PLANE_Y2 0xCA9
-#define A3XX_GRAS_CL_USER_PLANE_Z2 0xCAA
-#define A3XX_GRAS_CL_USER_PLANE_W2 0xCAB
-#define A3XX_GRAS_CL_USER_PLANE_X3 0xCAC
-#define A3XX_GRAS_CL_USER_PLANE_Y3 0xCAD
-#define A3XX_GRAS_CL_USER_PLANE_Z3 0xCAE
-#define A3XX_GRAS_CL_USER_PLANE_W3 0xCAF
-#define A3XX_GRAS_CL_USER_PLANE_X4 0xCB0
-#define A3XX_GRAS_CL_USER_PLANE_Y4 0xCB1
-#define A3XX_GRAS_CL_USER_PLANE_Z4 0xCB2
-#define A3XX_GRAS_CL_USER_PLANE_W4 0xCB3
-#define A3XX_GRAS_CL_USER_PLANE_X5 0xCB4
-#define A3XX_GRAS_CL_USER_PLANE_Y5 0xCB5
-#define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
-#define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
-#define A3XX_RB_GMEM_BASE_ADDR 0xCC0
-#define A3XX_RB_PERFCOUNTER0_SELECT 0xCC6
-#define A3XX_RB_PERFCOUNTER1_SELECT 0xCC7
-#define A3XX_HLSQ_PERFCOUNTER0_SELECT 0xE00
-#define A3XX_HLSQ_PERFCOUNTER1_SELECT 0xE01
-#define A3XX_HLSQ_PERFCOUNTER2_SELECT 0xE02
-#define A3XX_HLSQ_PERFCOUNTER3_SELECT 0xE03
-#define A3XX_HLSQ_PERFCOUNTER4_SELECT 0xE04
-#define A3XX_HLSQ_PERFCOUNTER5_SELECT 0xE05
-#define A3XX_VFD_PERFCOUNTER0_SELECT 0xE44
-#define A3XX_VFD_PERFCOUNTER1_SELECT 0xE45
-#define A3XX_VPC_VPC_DEBUG_RAM_SEL 0xE61
-#define A3XX_VPC_VPC_DEBUG_RAM_READ 0xE62
-#define A3XX_VPC_PERFCOUNTER0_SELECT 0xE64
-#define A3XX_VPC_PERFCOUNTER1_SELECT 0xE65
-#define A3XX_UCHE_CACHE_MODE_CONTROL_REG 0xE82
-#define A3XX_UCHE_PERFCOUNTER0_SELECT 0xE84
-#define A3XX_UCHE_PERFCOUNTER1_SELECT 0xE85
-#define A3XX_UCHE_PERFCOUNTER2_SELECT 0xE86
-#define A3XX_UCHE_PERFCOUNTER3_SELECT 0xE87
-#define A3XX_UCHE_PERFCOUNTER4_SELECT 0xE88
-#define A3XX_UCHE_PERFCOUNTER5_SELECT 0xE89
-#define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
-#define A3XX_SP_PERFCOUNTER0_SELECT 0xEC4
-#define A3XX_SP_PERFCOUNTER1_SELECT 0xEC5
-#define A3XX_SP_PERFCOUNTER2_SELECT 0xEC6
-#define A3XX_SP_PERFCOUNTER3_SELECT 0xEC7
-#define A3XX_SP_PERFCOUNTER4_SELECT 0xEC8
-#define A3XX_SP_PERFCOUNTER5_SELECT 0xEC9
-#define A3XX_SP_PERFCOUNTER6_SELECT 0xECA
-#define A3XX_SP_PERFCOUNTER7_SELECT 0xECB
-#define A3XX_TP_PERFCOUNTER0_SELECT 0xF04
-#define A3XX_TP_PERFCOUNTER1_SELECT 0xF05
-#define A3XX_TP_PERFCOUNTER2_SELECT 0xF06
-#define A3XX_TP_PERFCOUNTER3_SELECT 0xF07
-#define A3XX_TP_PERFCOUNTER4_SELECT 0xF08
-#define A3XX_TP_PERFCOUNTER5_SELECT 0xF09
-#define A3XX_GRAS_CL_CLIP_CNTL 0x2040
-#define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
-#define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
-#define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C
-#define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D
-#define A3XX_GRAS_SU_POINT_MINMAX 0x2068
-#define A3XX_GRAS_SU_POINT_SIZE 0x2069
-#define A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x206C
-#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x206D
-#define A3XX_GRAS_SU_MODE_CONTROL 0x2070
-#define A3XX_GRAS_SC_CONTROL 0x2072
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x2074
-#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x2075
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x2079
-#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x207A
-#define A3XX_RB_MODE_CONTROL 0x20C0
-#define A3XX_RB_RENDER_CONTROL 0x20C1
-#define A3XX_RB_MSAA_CONTROL 0x20C2
-#define A3XX_RB_MRT_CONTROL0 0x20C4
-#define A3XX_RB_MRT_BUF_INFO0 0x20C5
-#define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7
-#define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB
-#define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF
-#define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3
-#define A3XX_RB_BLEND_RED 0x20E4
-#define A3XX_RB_COPY_CONTROL 0x20EC
-#define A3XX_RB_COPY_DEST_INFO 0x20EF
-#define A3XX_RB_DEPTH_CONTROL 0x2100
-#define A3XX_RB_STENCIL_CONTROL 0x2104
-#define A3XX_PC_VSTREAM_CONTROL 0x21E4
-#define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA
-#define A3XX_PC_PRIM_VTX_CNTL 0x21EC
-#define A3XX_PC_RESTART_INDEX 0x21ED
-#define A3XX_HLSQ_CONTROL_0_REG 0x2200
-#define A3XX_HLSQ_VS_CONTROL_REG 0x2204
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207
-#define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A
-#define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C
-#define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211
-#define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212
-#define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214
-#define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215
-#define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217
-#define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A
-#define A3XX_VFD_CONTROL_0 0x2240
-#define A3XX_VFD_INDEX_MIN 0x2242
-#define A3XX_VFD_INDEX_MAX 0x2243
-#define A3XX_VFD_FETCH_INSTR_0_0 0x2246
-#define A3XX_VFD_FETCH_INSTR_0_4 0x224E
-#define A3XX_VFD_FETCH_INSTR_1_F 0x2265
-#define A3XX_VFD_DECODE_INSTR_0 0x2266
-#define A3XX_VFD_VS_THREADING_THRESHOLD 0x227E
-#define A3XX_VPC_ATTR 0x2280
-#define A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x228B
-#define A3XX_SP_SP_CTRL_REG 0x22C0
-#define A3XX_SP_VS_CTRL_REG0 0x22C4
-#define A3XX_SP_VS_CTRL_REG1 0x22C5
-#define A3XX_SP_VS_PARAM_REG 0x22C6
-#define A3XX_SP_VS_OUT_REG_7 0x22CE
-#define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
-#define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
-#define A3XX_SP_VS_OBJ_START_REG 0x22D5
-#define A3XX_SP_VS_PVT_MEM_ADDR_REG 0x22D7
-#define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
-#define A3XX_SP_VS_LENGTH_REG 0x22DF
-#define A3XX_SP_FS_CTRL_REG0 0x22E0
-#define A3XX_SP_FS_CTRL_REG1 0x22E1
-#define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
-#define A3XX_SP_FS_OBJ_START_REG 0x22E3
-#define A3XX_SP_FS_PVT_MEM_ADDR_REG 0x22E5
-#define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
-#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
-#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9
-#define A3XX_SP_FS_OUTPUT_REG 0x22EC
-#define A3XX_SP_FS_MRT_REG_0 0x22F0
-#define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4
-#define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7
-#define A3XX_SP_FS_LENGTH_REG 0x22FF
-#define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340
-#define A3XX_TPL1_TP_FS_TEX_OFFSET 0x2342
-#define A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x2343
-#define A3XX_VBIF_CLKON 0x3001
-#define A3XX_VBIF_FIXED_SORT_EN 0x300C
-#define A3XX_VBIF_FIXED_SORT_SEL0 0x300D
-#define A3XX_VBIF_FIXED_SORT_SEL1 0x300E
-#define A3XX_VBIF_ABIT_SORT 0x301C
-#define A3XX_VBIF_ABIT_SORT_CONF 0x301D
-#define A3XX_VBIF_GATE_OFF_WRREQ_EN 0x302A
-#define A3XX_VBIF_IN_RD_LIM_CONF0 0x302C
-#define A3XX_VBIF_IN_RD_LIM_CONF1 0x302D
-#define A3XX_VBIF_IN_WR_LIM_CONF0 0x3030
-#define A3XX_VBIF_IN_WR_LIM_CONF1 0x3031
-#define A3XX_VBIF_OUT_RD_LIM_CONF0 0x3034
-#define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
-#define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
-#define A3XX_VBIF_ARB_CTL 0x303C
-#define A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
-#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
-#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
-#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
-#define A3XX_VBIF_PERF_CNT_EN 0x3070
-#define A3XX_VBIF_PERF_CNT_CLR 0x3071
-#define A3XX_VBIF_PERF_CNT_SEL 0x3072
-#define A3XX_VBIF_PERF_CNT0_LO 0x3073
-#define A3XX_VBIF_PERF_CNT0_HI 0x3074
-#define A3XX_VBIF_PERF_CNT1_LO 0x3075
-#define A3XX_VBIF_PERF_CNT1_HI 0x3076
-#define A3XX_VBIF_PERF_PWR_CNT0_LO 0x3077
-#define A3XX_VBIF_PERF_PWR_CNT0_HI 0x3078
-#define A3XX_VBIF_PERF_PWR_CNT1_LO 0x3079
-#define A3XX_VBIF_PERF_PWR_CNT1_HI 0x307a
-#define A3XX_VBIF_PERF_PWR_CNT2_LO 0x307b
-#define A3XX_VBIF_PERF_PWR_CNT2_HI 0x307c
-
-/* Bit flags for RBBM_CTL */
-#define RBBM_RBBM_CTL_RESET_PWR_CTR0 BIT(0)
-#define RBBM_RBBM_CTL_RESET_PWR_CTR1 BIT(1)
-#define RBBM_RBBM_CTL_ENABLE_PWR_CTR0 BIT(16)
-#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1 BIT(17)
-
-/* Various flags used by the context switch code */
-
-#define SP_MULTI 0
-#define SP_BUFFER_MODE 1
-#define SP_TWO_VTX_QUADS 0
-#define SP_PIXEL_BASED 0
-#define SP_R8G8B8A8_UNORM 8
-#define SP_FOUR_PIX_QUADS 1
-
-#define HLSQ_DIRECT 0
-#define HLSQ_BLOCK_ID_SP_VS 4
-#define HLSQ_SP_VS_INSTR 0
-#define HLSQ_SP_FS_INSTR 0
-#define HLSQ_BLOCK_ID_SP_FS 6
-#define HLSQ_TWO_PIX_QUADS 0
-#define HLSQ_TWO_VTX_QUADS 0
-#define HLSQ_BLOCK_ID_TP_TEX 2
-#define HLSQ_TP_TEX_SAMPLERS 0
-#define HLSQ_TP_TEX_MEMOBJ 1
-#define HLSQ_BLOCK_ID_TP_MIPMAP 3
-#define HLSQ_TP_MIPMAP_BASE 1
-#define HLSQ_FOUR_PIX_QUADS 1
-
-#define RB_FACTOR_ONE 1
-#define RB_BLEND_OP_ADD 0
-#define RB_FACTOR_ZERO 0
-#define RB_DITHER_DISABLE 0
-#define RB_DITHER_ALWAYS 1
-#define RB_FRAG_NEVER 0
-#define RB_ENDIAN_NONE 0
-#define RB_R8G8B8A8_UNORM 8
-#define RB_RESOLVE_PASS 2
-#define RB_CLEAR_MODE_RESOLVE 1
-#define RB_TILINGMODE_LINEAR 0
-#define RB_REF_NEVER 0
-#define RB_FRAG_LESS 1
-#define RB_REF_ALWAYS 7
-#define RB_STENCIL_KEEP 0
-#define RB_RENDERING_PASS 0
-#define RB_TILINGMODE_32X32 2
-
-#define PC_DRAW_TRIANGLES 2
-#define PC_DI_PT_RECTLIST 8
-#define PC_DI_SRC_SEL_AUTO_INDEX 2
-#define PC_DI_INDEX_SIZE_16_BIT 0
-#define PC_DI_IGNORE_VISIBILITY 0
-#define PC_DI_PT_TRILIST 4
-#define PC_DI_SRC_SEL_IMMEDIATE 1
-#define PC_DI_INDEX_SIZE_32_BIT 1
-
-#define UCHE_ENTIRE_CACHE 1
-#define UCHE_OP_INVALIDATE 1
-
-/*
- * The following are bit field shifts within some of the registers defined
- * above. These are used in the context switch code in conjunction with the
- * _SET macro
- */
-
-#define GRAS_CL_CLIP_CNTL_CLIP_DISABLE 16
-#define GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 12
-#define GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 21
-#define GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 19
-#define GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 20
-#define GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 17
-#define GRAS_CL_VPORT_XSCALE_VPORT_XSCALE 0
-#define GRAS_CL_VPORT_YSCALE_VPORT_YSCALE 0
-#define GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE 0
-#define GRAS_SC_CONTROL_RASTER_MODE 12
-#define GRAS_SC_CONTROL_RENDER_MODE 4
-#define GRAS_SC_SCREEN_SCISSOR_BR_BR_X 0
-#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
-#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
-#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
-#define GRAS_SU_CTRLMODE_LINEHALFWIDTH 03
-#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
-#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
-#define HLSQ_CTRL0REG_CHUNKDISABLE 26
-#define HLSQ_CTRL0REG_CONSTSWITCHMODE 27
-#define HLSQ_CTRL0REG_FSSUPERTHREADENABLE 6
-#define HLSQ_CTRL0REG_FSTHREADSIZE 4
-#define HLSQ_CTRL0REG_LAZYUPDATEDISABLE 28
-#define HLSQ_CTRL0REG_RESERVED2 10
-#define HLSQ_CTRL0REG_SPCONSTFULLUPDATE 29
-#define HLSQ_CTRL0REG_SPSHADERRESTART 9
-#define HLSQ_CTRL0REG_TPFULLUPDATE 30
-#define HLSQ_CTRL1REG_RESERVED1 9
-#define HLSQ_CTRL1REG_VSSUPERTHREADENABLE 8
-#define HLSQ_CTRL1REG_VSTHREADSIZE 6
-#define HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD 26
-#define HLSQ_FSCTRLREG_FSCONSTLENGTH 0
-#define HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET 12
-#define HLSQ_FSCTRLREG_FSINSTRLENGTH 24
-#define HLSQ_VSCTRLREG_VSINSTRLENGTH 24
-#define PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE 8
-#define PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE 5
-#define PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST 25
-#define PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC 0
-#define PC_DRAW_INITIATOR_PRIM_TYPE 0
-#define PC_DRAW_INITIATOR_SOURCE_SELECT 6
-#define PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE 9
-#define PC_DRAW_INITIATOR_INDEX_SIZE 0x0B
-#define PC_DRAW_INITIATOR_SMALL_INDEX 0x0D
-#define PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x0E
-#define RB_COPYCONTROL_COPY_GMEM_BASE 14
-#define RB_COPYCONTROL_RESOLVE_CLEAR_MODE 4
-#define RB_COPYDESTBASE_COPY_DEST_BASE 4
-#define RB_COPYDESTINFO_COPY_COMPONENT_ENABLE 14
-#define RB_COPYDESTINFO_COPY_DEST_ENDIAN 18
-#define RB_COPYDESTINFO_COPY_DEST_FORMAT 2
-#define RB_COPYDESTINFO_COPY_DEST_TILE 0
-#define RB_COPYDESTPITCH_COPY_DEST_PITCH 0
-#define RB_DEPTHCONTROL_Z_TEST_FUNC 4
-#define RB_MODECONTROL_RENDER_MODE 8
-#define RB_MODECONTROL_MARB_CACHE_SPLIT_MODE 15
-#define RB_MODECONTROL_PACKER_TIMER_ENABLE 16
-#define RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE 21
-#define RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR 24
-#define RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR 16
-#define RB_MRTBLENDCONTROL_CLAMP_ENABLE 29
-#define RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE 5
-#define RB_MRTBLENDCONTROL_RGB_DEST_FACTOR 8
-#define RB_MRTBLENDCONTROL_RGB_SRC_FACTOR 0
-#define RB_MRTBUFBASE_COLOR_BUF_BASE 4
-#define RB_MRTBUFINFO_COLOR_BUF_PITCH 17
-#define RB_MRTBUFINFO_COLOR_FORMAT 0
-#define RB_MRTBUFINFO_COLOR_TILE_MODE 6
-#define RB_MRTCONTROL_COMPONENT_ENABLE 24
-#define RB_MRTCONTROL_DITHER_MODE 12
-#define RB_MRTCONTROL_READ_DEST_ENABLE 3
-#define RB_MRTCONTROL_ROP_CODE 8
-#define RB_MSAACONTROL_MSAA_DISABLE 10
-#define RB_MSAACONTROL_SAMPLE_MASK 16
-#define RB_RENDERCONTROL_ALPHA_TEST_FUNC 24
-#define RB_RENDERCONTROL_BIN_WIDTH 4
-#define RB_RENDERCONTROL_DISABLE_COLOR_PIPE 12
-#define RB_STENCILCONTROL_STENCIL_FAIL 11
-#define RB_STENCILCONTROL_STENCIL_FAIL_BF 23
-#define RB_STENCILCONTROL_STENCIL_FUNC 8
-#define RB_STENCILCONTROL_STENCIL_FUNC_BF 20
-#define RB_STENCILCONTROL_STENCIL_ZFAIL 17
-#define RB_STENCILCONTROL_STENCIL_ZFAIL_BF 29
-#define RB_STENCILCONTROL_STENCIL_ZPASS 14
-#define RB_STENCILCONTROL_STENCIL_ZPASS_BF 26
-#define SP_FSCTRLREG0_FSFULLREGFOOTPRINT 10
-#define SP_FSCTRLREG0_FSHALFREGFOOTPRINT 4
-#define SP_FSCTRLREG0_FSICACHEINVALID 2
-#define SP_FSCTRLREG0_FSINOUTREGOVERLAP 18
-#define SP_FSCTRLREG0_FSINSTRBUFFERMODE 1
-#define SP_FSCTRLREG0_FSLENGTH 24
-#define SP_FSCTRLREG0_FSSUPERTHREADMODE 21
-#define SP_FSCTRLREG0_FSTHREADMODE 0
-#define SP_FSCTRLREG0_FSTHREADSIZE 20
-#define SP_FSCTRLREG0_PIXLODENABLE 22
-#define SP_FSCTRLREG1_FSCONSTLENGTH 0
-#define SP_FSCTRLREG1_FSINITIALOUTSTANDING 20
-#define SP_FSCTRLREG1_HALFPRECVAROFFSET 24
-#define SP_FSMRTREG_REGID 0
-#define SP_FSMRTREG_PRECISION 8
-#define SP_FSOUTREG_PAD0 2
-#define SP_IMAGEOUTPUTREG_MRTFORMAT 0
-#define SP_IMAGEOUTPUTREG_DEPTHOUTMODE 3
-#define SP_IMAGEOUTPUTREG_PAD0 6
-#define SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET 16
-#define SP_OBJOFFSETREG_SHADEROBJOFFSETINIC 25
-#define SP_SHADERLENGTH_LEN 0
-#define SP_SPCTRLREG_CONSTMODE 18
-#define SP_SPCTRLREG_LOMODE 22
-#define SP_SPCTRLREG_SLEEPMODE 20
-#define SP_VSCTRLREG0_VSFULLREGFOOTPRINT 10
-#define SP_VSCTRLREG0_VSICACHEINVALID 2
-#define SP_VSCTRLREG0_VSINSTRBUFFERMODE 1
-#define SP_VSCTRLREG0_VSLENGTH 24
-#define SP_VSCTRLREG0_VSSUPERTHREADMODE 21
-#define SP_VSCTRLREG0_VSTHREADMODE 0
-#define SP_VSCTRLREG0_VSTHREADSIZE 20
-#define SP_VSCTRLREG1_VSINITIALOUTSTANDING 24
-#define SP_VSOUTREG_COMPMASK0 9
-#define SP_VSPARAMREG_POSREGID 0
-#define SP_VSPARAMREG_PSIZEREGID 8
-#define SP_VSPARAMREG_TOTALVSOUTVAR 20
-#define SP_VSVPCDSTREG_OUTLOC0 0
-#define TPL1_TPTEXOFFSETREG_BASETABLEPTR 16
-#define TPL1_TPTEXOFFSETREG_MEMOBJOFFSET 8
-#define TPL1_TPTEXOFFSETREG_SAMPLEROFFSET 0
-#define UCHE_INVALIDATE1REG_OPCODE 0x1C
-#define UCHE_INVALIDATE1REG_ALLORPORTION 0x1F
-#define VFD_BASEADDR_BASEADDR 0
-#define VFD_CTRLREG0_PACKETSIZE 18
-#define VFD_CTRLREG0_STRMDECINSTRCNT 22
-#define VFD_CTRLREG0_STRMFETCHINSTRCNT 27
-#define VFD_CTRLREG0_TOTALATTRTOVS 0
-#define VFD_CTRLREG1_MAXSTORAGE 0
-#define VFD_CTRLREG1_REGID4INST 24
-#define VFD_CTRLREG1_REGID4VTX 16
-#define VFD_DECODEINSTRUCTIONS_CONSTFILL 4
-#define VFD_DECODEINSTRUCTIONS_FORMAT 6
-#define VFD_DECODEINSTRUCTIONS_LASTCOMPVALID 29
-#define VFD_DECODEINSTRUCTIONS_REGID 12
-#define VFD_DECODEINSTRUCTIONS_SHIFTCNT 24
-#define VFD_DECODEINSTRUCTIONS_SWITCHNEXT 30
-#define VFD_DECODEINSTRUCTIONS_WRITEMASK 0
-#define VFD_FETCHINSTRUCTIONS_BUFSTRIDE 7
-#define VFD_FETCHINSTRUCTIONS_FETCHSIZE 0
-#define VFD_FETCHINSTRUCTIONS_INDEXDECODE 18
-#define VFD_FETCHINSTRUCTIONS_STEPRATE 24
-#define VFD_FETCHINSTRUCTIONS_SWITCHNEXT 17
-#define VFD_THREADINGTHRESHOLD_REGID_VTXCNT 8
-#define VFD_THREADINGTHRESHOLD_REGID_THRESHOLD 0
-#define VFD_THREADINGTHRESHOLD_RESERVED6 4
-#define VPC_VPCATTR_LMSIZE 28
-#define VPC_VPCATTR_THRHDASSIGN 12
-#define VPC_VPCATTR_TOTALATTR 0
-#define VPC_VPCPACK_NUMFPNONPOSVAR 8
-#define VPC_VPCPACK_NUMNONPOSVSVAR 16
-#define VPC_VPCVARPSREPLMODE_COMPONENT08 0
-#define VPC_VPCVARPSREPLMODE_COMPONENT09 2
-#define VPC_VPCVARPSREPLMODE_COMPONENT0A 4
-#define VPC_VPCVARPSREPLMODE_COMPONENT0B 6
-#define VPC_VPCVARPSREPLMODE_COMPONENT0C 8
-#define VPC_VPCVARPSREPLMODE_COMPONENT0D 10
-#define VPC_VPCVARPSREPLMODE_COMPONENT0E 12
-#define VPC_VPCVARPSREPLMODE_COMPONENT0F 14
-#define VPC_VPCVARPSREPLMODE_COMPONENT10 16
-#define VPC_VPCVARPSREPLMODE_COMPONENT11 18
-#define VPC_VPCVARPSREPLMODE_COMPONENT12 20
-#define VPC_VPCVARPSREPLMODE_COMPONENT13 22
-#define VPC_VPCVARPSREPLMODE_COMPONENT14 24
-#define VPC_VPCVARPSREPLMODE_COMPONENT15 26
-#define VPC_VPCVARPSREPLMODE_COMPONENT16 28
-#define VPC_VPCVARPSREPLMODE_COMPONENT17 30
-
-/* RBBM Debug bus block IDs */
-#define RBBM_BLOCK_ID_NONE 0x0
-#define RBBM_BLOCK_ID_CP 0x1
-#define RBBM_BLOCK_ID_RBBM 0x2
-#define RBBM_BLOCK_ID_VBIF 0x3
-#define RBBM_BLOCK_ID_HLSQ 0x4
-#define RBBM_BLOCK_ID_UCHE 0x5
-#define RBBM_BLOCK_ID_PC 0x8
-#define RBBM_BLOCK_ID_VFD 0x9
-#define RBBM_BLOCK_ID_VPC 0xa
-#define RBBM_BLOCK_ID_TSE 0xb
-#define RBBM_BLOCK_ID_RAS 0xc
-#define RBBM_BLOCK_ID_VSC 0xd
-#define RBBM_BLOCK_ID_SP_0 0x10
-#define RBBM_BLOCK_ID_SP_1 0x11
-#define RBBM_BLOCK_ID_SP_2 0x12
-#define RBBM_BLOCK_ID_SP_3 0x13
-#define RBBM_BLOCK_ID_TPL1_0 0x18
-#define RBBM_BLOCK_ID_TPL1_1 0x19
-#define RBBM_BLOCK_ID_TPL1_2 0x1a
-#define RBBM_BLOCK_ID_TPL1_3 0x1b
-#define RBBM_BLOCK_ID_RB_0 0x20
-#define RBBM_BLOCK_ID_RB_1 0x21
-#define RBBM_BLOCK_ID_RB_2 0x22
-#define RBBM_BLOCK_ID_RB_3 0x23
-#define RBBM_BLOCK_ID_MARB_0 0x28
-#define RBBM_BLOCK_ID_MARB_1 0x29
-#define RBBM_BLOCK_ID_MARB_2 0x2a
-#define RBBM_BLOCK_ID_MARB_3 0x2b
-
-/* RBBM_CLOCK_CTL default value */
-#define A305_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
-#define A320_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
-#define A330_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAE
-#define A330v2_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
-
-#define A330_RBBM_GPR0_CTL_DEFAULT 0x0AE2B8AE
-#define A330v2_RBBM_GPR0_CTL_DEFAULT 0x0AA2A8AA
-
-/* COUNTABLE FOR SP PERFCOUNTER */
-#define SP_FS_FULL_ALU_INSTRUCTIONS 0x0E
-#define SP_ALU_ACTIVE_CYCLES 0x1D
-#define SP0_ICL1_MISSES 0x1A
-#define SP_FS_CFLOW_INSTRUCTIONS 0x0C
-
-/* VBIF PERFCOUNTER ENA/CLR values */
-#define VBIF_PERF_CNT_0 BIT(0)
-#define VBIF_PERF_CNT_1 BIT(1)
-#define VBIF_PERF_PWR_CNT_0 BIT(2)
-#define VBIF_PERF_PWR_CNT_1 BIT(3)
-#define VBIF_PERF_PWR_CNT_2 BIT(4)
-
-/* VBIF PERFCOUNTER SEL values */
-#define VBIF_PERF_CNT_0_SEL 0
-#define VBIF_PERF_CNT_0_SEL_MASK 0x7f
-#define VBIF_PERF_CNT_1_SEL 8
-#define VBIF_PERF_CNT_1_SEL_MASK 0x7f00
-
-/* VBIF countables */
-#define VBIF_DDR_TOTAL_CYCLES 110
-
-#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
deleted file mode 100644
index 1f76c03..0000000
--- a/drivers/gpu/msm/adreno.c
+++ /dev/null
@@ -1,3982 +0,0 @@
-/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/ioctl.h>
-#include <linux/sched.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/msm_kgsl.h>
-#include <linux/delay.h>
-
-#include <mach/socinfo.h>
-#include <mach/msm_bus_board.h>
-#include <mach/msm_bus.h>
-#include <mach/msm_dcvs.h>
-#include <mach/msm_dcvs_scm.h>
-
-#include "kgsl.h"
-#include "kgsl_pwrscale.h"
-#include "kgsl_cffdump.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_iommu.h"
-
-#include "adreno.h"
-#include "adreno_pm4types.h"
-
-#include "a2xx_reg.h"
-#include "a3xx_reg.h"
-
-#define DRIVER_VERSION_MAJOR 3
-#define DRIVER_VERSION_MINOR 1
-
-/* Adreno MH arbiter config*/
-#define ADRENO_CFG_MHARB \
- (0x10 \
- | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
- | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
- | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
- | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
- | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
-
-#define ADRENO_MMU_CONFIG \
- (0x01 \
- | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
- | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
-
-/*default log levels is error for everything*/
-#define KGSL_LOG_LEVEL_DEFAULT 3
-
-#ifndef CONFIG_DEBUG_FS
-unsigned int kgsl_cff_dump_enable;
-#endif
-
-static const struct kgsl_functable adreno_functable;
-
-static struct adreno_device device_3d0 = {
- .dev = {
- KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
- .name = DEVICE_3D0_NAME,
- .id = KGSL_DEVICE_3D0,
- .mh = {
- .mharb = ADRENO_CFG_MHARB,
- /* Remove 1k boundary check in z470 to avoid a GPU
- * hang. Notice that this solution won't work if
- * both EBI and SMI are used
- */
- .mh_intf_cfg1 = 0x00032f07,
- /* turn off memory protection unit by setting
- acceptable physical address range to include
- all pages. */
- .mpu_base = 0x00000000,
- .mpu_range = 0xFFFFF000,
- },
- .mmu = {
- .config = ADRENO_MMU_CONFIG,
- },
- .pwrctrl = {
- .irq_name = KGSL_3D0_IRQ,
- },
- .iomemname = KGSL_3D0_REG_MEMORY,
- .ftbl = &adreno_functable,
-#ifdef CONFIG_HAS_EARLYSUSPEND
- .display_off = {
- .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
- .suspend = kgsl_early_suspend_driver,
- .resume = kgsl_late_resume_driver,
- },
-#endif
- .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
- .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
- .drv_log = KGSL_LOG_LEVEL_DEFAULT,
- .mem_log = KGSL_LOG_LEVEL_DEFAULT,
- .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
- .ft_log = KGSL_LOG_LEVEL_DEFAULT,
- .pm_dump_enable = 0,
- },
- .gmem_base = 0,
- .gmem_size = SZ_256K,
- .pfp_fw = NULL,
- .pm4_fw = NULL,
- .wait_timeout = 0, /* in milliseconds, 0 means disabled */
- .ib_check_level = 0,
- .ft_policy = KGSL_FT_DEFAULT_POLICY,
- .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
- .fast_hang_detect = 1,
- .long_ib_detect = 1,
-};
-
-/* This set of registers are used for Hang detection
- * If the values of these registers are same after
- * KGSL_TIMEOUT_HANG_DETECT time, GPU hang is reported in
- * kernel log.
- * *****ALERT******ALERT********ALERT*************
- * Order of registers below is important, registers
- * from LONG_IB_DETECT_REG_INDEX_START to
- * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
- */
-#define LONG_IB_DETECT_REG_INDEX_START 1
-#define LONG_IB_DETECT_REG_INDEX_END 5
-
-unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT] = {
- A3XX_RBBM_STATUS,
- REG_CP_RB_RPTR, /* LONG_IB_DETECT_REG_INDEX_START */
- REG_CP_IB1_BASE,
- REG_CP_IB1_BUFSZ,
- REG_CP_IB2_BASE,
- REG_CP_IB2_BUFSZ, /* LONG_IB_DETECT_REG_INDEX_END */
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
-};
-
-/*
- * This is the master list of all GPU cores that are supported by this
- * driver.
- */
-
-#define ANY_ID (~0)
-#define NO_VER (~0)
-
-static const struct {
- enum adreno_gpurev gpurev;
- unsigned int core, major, minor, patchid;
- const char *pm4fw;
- const char *pfpfw;
- struct adreno_gpudev *gpudev;
- unsigned int istore_size;
- unsigned int pix_shader_start;
- /* Size of an instruction in dwords */
- unsigned int instruction_size;
- /* size of gmem for gpu*/
- unsigned int gmem_size;
- /* version of pm4 microcode that supports sync_lock
- between CPU and GPU for SMMU-v1 programming */
- unsigned int sync_lock_pm4_ver;
- /* version of pfp microcode that supports sync_lock
- between CPU and GPU for SMMU-v1 programming */
- unsigned int sync_lock_pfp_ver;
-} adreno_gpulist[] = {
- { ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
- "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
- 512, 384, 3, SZ_256K, NO_VER, NO_VER },
- { ADRENO_REV_A203, 0, 1, 1, ANY_ID,
- "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
- 512, 384, 3, SZ_256K, NO_VER, NO_VER },
- { ADRENO_REV_A205, 0, 1, 0, ANY_ID,
- "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
- 512, 384, 3, SZ_256K, NO_VER, NO_VER },
- { ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
- "leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev,
- 512, 384, 3, SZ_512K, NO_VER, NO_VER },
- /*
- * patchlevel 5 (8960v2) needs special pm4 firmware to work around
- * a hardware problem.
- */
- { ADRENO_REV_A225, 2, 2, 0, 5,
- "a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
- 1536, 768, 3, SZ_512K, NO_VER, NO_VER },
- { ADRENO_REV_A225, 2, 2, 0, 6,
- "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
- 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
- { ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
- "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
- 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
- /* A3XX doesn't use the pix_shader_start */
- { ADRENO_REV_A305, 3, 0, 5, ANY_ID,
- "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
- 512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
- /* A3XX doesn't use the pix_shader_start */
- { ADRENO_REV_A320, 3, 2, ANY_ID, ANY_ID,
- "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
- 512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
- { ADRENO_REV_A330, 3, 3, 0, ANY_ID,
- "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
- 512, 0, 2, SZ_1M, NO_VER, NO_VER },
-};
-
-static unsigned int adreno_isidle(struct kgsl_device *device);
-
-/**
- * adreno_perfcounter_init: Reserve kernel performance counters
- * @device: device to configure
- *
- * The kernel needs/wants a certain group of performance counters for
- * its own activities. Reserve these performance counters at init time
- * to ensure that they are always reserved for the kernel. The performance
- * counters used by the kernel can be obtained by the user, but these
- * performance counters will remain active as long as the device is alive.
- */
-
-static void adreno_perfcounter_init(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- if (adreno_dev->gpudev->perfcounter_init)
- adreno_dev->gpudev->perfcounter_init(adreno_dev);
-};
-
-/**
- * adreno_perfcounter_start: Enable performance counters
- * @adreno_dev: Adreno device to configure
- *
- * Ensure all performance counters are enabled that are allocated. Since
- * the device was most likely stopped, we can't trust that the counters
- * are still valid so make it so.
- */
-
-static void adreno_perfcounter_start(struct adreno_device *adreno_dev)
-{
- struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- struct adreno_perfcount_group *group;
- unsigned int i, j;
-
- /* group id iter */
- for (i = 0; i < counters->group_count; i++) {
- group = &(counters->groups[i]);
-
- /* countable iter */
- for (j = 0; j < group->reg_count; j++) {
- if (group->regs[j].countable ==
- KGSL_PERFCOUNTER_NOT_USED)
- continue;
-
- if (adreno_dev->gpudev->perfcounter_enable)
- adreno_dev->gpudev->perfcounter_enable(
- adreno_dev, i, j,
- group->regs[j].countable);
- }
- }
-}
-
-/**
- * adreno_perfcounter_read_group: Determine which countables are in counters
- * @adreno_dev: Adreno device to configure
- * @reads: List of kgsl_perfcounter_read_groups
- * @count: Length of list
- *
- * Read the performance counters for the groupid/countable pairs and return
- * the 64 bit result for each pair
- */
-
-int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
- struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
-{
- struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- struct adreno_perfcount_group *group;
- struct kgsl_perfcounter_read_group *list = NULL;
- unsigned int i, j;
- int ret = 0;
-
- /* perfcounter get/put/query/read not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
- return -EINVAL;
-
- /* sanity check for later */
- if (!adreno_dev->gpudev->perfcounter_read)
- return -EINVAL;
-
- /* sanity check params passed in */
- if (reads == NULL || count == 0 || count > 100)
- return -EINVAL;
-
- list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
- GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- if (copy_from_user(list, reads,
- sizeof(struct kgsl_perfcounter_read_group) * count)) {
- ret = -EFAULT;
- goto done;
- }
-
- /* list iterator */
- for (j = 0; j < count; j++) {
-
- list[j].value = 0;
-
- /* Verify that the group ID is within range */
- if (list[j].groupid >= counters->group_count) {
- ret = -EINVAL;
- goto done;
- }
-
- group = &(counters->groups[list[j].groupid]);
-
- /* group/counter iterator */
- for (i = 0; i < group->reg_count; i++) {
- if (group->regs[i].countable == list[j].countable) {
- list[j].value =
- adreno_dev->gpudev->perfcounter_read(
- adreno_dev, list[j].groupid,
- i, group->regs[i].offset);
- break;
- }
- }
- }
-
- /* write the data */
- if (copy_to_user(reads, list,
- sizeof(struct kgsl_perfcounter_read_group) *
- count) != 0)
- ret = -EFAULT;
-
-done:
- kfree(list);
- return ret;
-}
-
-/**
- * adreno_perfcounter_query_group: Determine which countables are in counters
- * @adreno_dev: Adreno device to configure
- * @groupid: Desired performance counter group
- * @countables: Return list of all countables in the groups counters
- * @count: Max length of the array
- * @max_counters: max counters for the groupid
- *
- * Query the current state of counters for the group.
- */
-
-int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int *countables, unsigned int count,
- unsigned int *max_counters)
-{
- struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- struct adreno_perfcount_group *group;
- unsigned int i;
-
- *max_counters = 0;
-
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
- return -EINVAL;
-
- if (groupid >= counters->group_count)
- return -EINVAL;
-
- group = &(counters->groups[groupid]);
- *max_counters = group->reg_count;
-
- /*
- * if NULL countable or *count of zero, return max reg_count in
- * *max_counters and return success
- */
- if (countables == NULL || count == 0)
- return 0;
-
- /*
- * Go through all available counters. Write upto *count * countable
- * values.
- */
- for (i = 0; i < group->reg_count && i < count; i++) {
- if (copy_to_user(&countables[i], &(group->regs[i].countable),
- sizeof(unsigned int)) != 0)
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * adreno_perfcounter_get: Try to put a countable in an available counter
- * @adreno_dev: Adreno device to configure
- * @groupid: Desired performance counter group
- * @countable: Countable desired to be in a counter
- * @offset: Return offset of the countable
- * @flags: Used to setup kernel perf counters
- *
- * Try to place a countable in an available counter. If the countable is
- * already in a counter, reference count the counter/countable pair resource
- * and return success
- */
-
-int adreno_perfcounter_get(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable, unsigned int *offset,
- unsigned int flags)
-{
- struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- struct adreno_perfcount_group *group;
- unsigned int i, empty = -1;
-
- /* always clear return variables */
- if (offset)
- *offset = 0;
-
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
- return -EINVAL;
-
- if (groupid >= counters->group_count)
- return -EINVAL;
-
- group = &(counters->groups[groupid]);
-
- /*
- * Check if the countable is already associated with a counter.
- * Refcount and return the offset, otherwise, try and find an empty
- * counter and assign the countable to it.
- */
- for (i = 0; i < group->reg_count; i++) {
- if (group->regs[i].countable == countable) {
- /* Countable already associated with counter */
- group->regs[i].refcount++;
- group->regs[i].flags |= flags;
- if (offset)
- *offset = group->regs[i].offset;
- return 0;
- } else if (group->regs[i].countable ==
- KGSL_PERFCOUNTER_NOT_USED) {
- /* keep track of unused counter */
- empty = i;
- }
- }
-
- /* no available counters, so do nothing else */
- if (empty == -1)
- return -EBUSY;
-
- /* initialize the new counter */
- group->regs[empty].countable = countable;
- group->regs[empty].refcount = 1;
-
- /* enable the new counter */
- adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
- countable);
-
- group->regs[empty].flags = flags;
-
- if (offset)
- *offset = group->regs[empty].offset;
-
- return 0;
-}
-
-
-/**
- * adreno_perfcounter_put: Release a countable from counter resource
- * @adreno_dev: Adreno device to configure
- * @groupid: Desired performance counter group
- * @countable: Countable desired to be freed from a counter
- *
- * Put a performance counter/countable pair that was previously received. If
- * noone else is using the countable, free up the counter for others.
- */
-int adreno_perfcounter_put(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable)
-{
- struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
- struct adreno_perfcount_group *group;
-
- unsigned int i;
-
- /* perfcounter get/put/query not allowed on a2xx */
- if (adreno_is_a2xx(adreno_dev))
- return -EINVAL;
-
- if (groupid >= counters->group_count)
- return -EINVAL;
-
- group = &(counters->groups[groupid]);
-
- for (i = 0; i < group->reg_count; i++) {
- if (group->regs[i].countable == countable) {
- if (group->regs[i].refcount > 0) {
- group->regs[i].refcount--;
-
- /*
- * book keeping to ensure we never free a
- * perf counter used by kernel
- */
- if (group->regs[i].flags &&
- group->regs[i].refcount == 0)
- group->regs[i].refcount++;
-
- /* make available if not used */
- if (group->regs[i].refcount == 0)
- group->regs[i].countable =
- KGSL_PERFCOUNTER_NOT_USED;
- }
-
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
-{
- irqreturn_t result;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- result = adreno_dev->gpudev->irq_handler(adreno_dev);
-
- if (device->requested_state == KGSL_STATE_NONE) {
- if (device->pwrctrl.nap_allowed == true) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- queue_work(device->work_queue, &device->idle_check_ws);
- } else if (device->pwrscale.policy != NULL) {
- queue_work(device->work_queue, &device->idle_check_ws);
- }
- }
-
- /* Reset the time-out in our idle timer */
- mod_timer_pending(&device->idle_timer,
- jiffies + device->pwrctrl.interval_timeout);
- mod_timer_pending(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT)));
- return result;
-}
-
-static void adreno_cleanup_pt(struct kgsl_device *device,
- struct kgsl_pagetable *pagetable)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
-
- kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
-
- kgsl_mmu_unmap(pagetable, &device->memstore);
-
- kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
-}
-
-static int adreno_setup_pt(struct kgsl_device *device,
- struct kgsl_pagetable *pagetable)
-{
- int result = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc);
- if (result)
- goto error;
-
- result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc);
- if (result)
- goto unmap_buffer_desc;
-
- result = kgsl_mmu_map_global(pagetable, &device->memstore);
- if (result)
- goto unmap_memptrs_desc;
-
- result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
- if (result)
- goto unmap_memstore_desc;
-
- /*
- * Set the mpu end to the last "normal" global memory we use.
- * For the IOMMU, this will be used to restrict access to the
- * mapped registers.
- */
- device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr +
- device->mmu.setstate_memory.size;
- return result;
-
-unmap_memstore_desc:
- kgsl_mmu_unmap(pagetable, &device->memstore);
-
-unmap_memptrs_desc:
- kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
-
-unmap_buffer_desc:
- kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
-
-error:
- return result;
-}
-
-/*
- * adreno_use_default_setstate() - Use CPU instead of the GPU to manage the mmu?
- * @adreno_dev: the device
- *
- * In many cases it is preferable to poke the iommu or gpummu directly rather
- * than using the GPU command stream. If we are idle or trying to go to a low
- * power state, using the command stream will be slower and asynchronous, which
- * needlessly complicates the power state transitions. Additionally,
- * the hardware simulators do not support command stream MMU operations so
- * the command stream can never be used if we are capturing CFF data.
- *
- */
-static bool adreno_use_default_setstate(struct adreno_device *adreno_dev)
-{
- return (adreno_isidle(&adreno_dev->dev) ||
- KGSL_STATE_ACTIVE != adreno_dev->dev.state ||
- adreno_dev->dev.active_cnt == 0 ||
- kgsl_cff_dump_enable);
-}
-
-static void adreno_iommu_setstate(struct kgsl_device *device,
- unsigned int context_id,
- uint32_t flags)
-{
- unsigned int pt_val, reg_pt_val;
- unsigned int *link = NULL, *cmds;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int num_iommu_units, i;
- struct kgsl_context *context;
- struct adreno_context *adreno_ctx = NULL;
-
- /*
- * If we're idle and we don't need to use the GPU to save context
- * state, use the CPU instead of the GPU to reprogram the
- * iommu for simplicity's sake.
- */
- if (adreno_use_default_setstate(adreno_dev))
- return kgsl_mmu_device_setstate(&device->mmu, flags);
-
- num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
-
- context = kgsl_context_get(device, context_id);
-
- if (context == NULL)
- return;
- adreno_ctx = context->devctxt;
-
- link = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (link == NULL)
- goto done;
-
- cmds = link;
-
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- goto done;
-
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- if (cpu_is_msm8960())
- cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
- else
- cmds += adreno_add_bank_change_cmds(cmds,
- KGSL_IOMMU_CONTEXT_USER,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- cmds += adreno_add_idle_cmds(adreno_dev, cmds);
-
- /* Acquire GPU-CPU sync Lock here */
- cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
-
- pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
- device->mmu.hwpagetable);
- if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- /*
- * We need to perfrom the following operations for all
- * IOMMU units
- */
- for (i = 0; i < num_iommu_units; i++) {
- reg_pt_val = (pt_val + kgsl_mmu_get_pt_lsb(&device->mmu,
- i, KGSL_IOMMU_CONTEXT_USER));
- /*
- * Set address of the new pagetable by writng to IOMMU
- * TTBR0 register
- */
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
- KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0);
- *cmds++ = reg_pt_val;
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- /*
- * Read back the ttbr0 register as a barrier to ensure
- * above writes have completed
- */
- cmds += adreno_add_read_cmds(device, cmds,
- kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
- KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0),
- reg_pt_val,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
- }
- }
- if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
- /*
- * tlb flush
- */
- for (i = 0; i < num_iommu_units; i++) {
- reg_pt_val = (pt_val + kgsl_mmu_get_pt_lsb(&device->mmu,
- i, KGSL_IOMMU_CONTEXT_USER));
-
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
- KGSL_IOMMU_CONTEXT_USER,
- KGSL_IOMMU_CTX_TLBIALL);
- *cmds++ = 1;
-
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- cmds += adreno_add_read_cmds(device, cmds,
- kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
- KGSL_IOMMU_CONTEXT_USER,
- KGSL_IOMMU_CTX_TTBR0),
- reg_pt_val,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
- }
- }
-
- /* Release GPU-CPU sync Lock here */
- cmds += kgsl_mmu_sync_unlock(&device->mmu, cmds);
-
- if (cpu_is_msm8960())
- cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
- kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
- 0, KGSL_IOMMU_GLOBAL_BASE),
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
- else
- cmds += adreno_add_bank_change_cmds(cmds,
- KGSL_IOMMU_CONTEXT_PRIV,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- cmds += adreno_add_idle_cmds(adreno_dev, cmds);
-
- if ((unsigned int) (cmds - link)) {
- /* invalidate all base pointers */
- *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmds++ = 0x7fff;
- /* This returns the per context timestamp but we need to
- * use the global timestamp for iommu clock disablement */
- adreno_ringbuffer_issuecmds(device, adreno_ctx,
- KGSL_CMD_FLAGS_PMODE,
- link, (unsigned int)(cmds - link));
- kgsl_mmu_disable_clk_on_ts(&device->mmu,
- adreno_dev->ringbuffer.global_ts, true);
- }
-
- if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) {
- KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
- BUG();
- }
-done:
- kfree(link);
- kgsl_context_put(context);
-}
-
-static void adreno_gpummu_setstate(struct kgsl_device *device,
- unsigned int context_id,
- uint32_t flags)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int link[32];
- unsigned int *cmds = &link[0];
- int sizedwords = 0;
- unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
- struct kgsl_context *context;
- struct adreno_context *adreno_ctx = NULL;
-
- /*
- * Fix target freeze issue by adding TLB flush for each submit
- * on A20X based targets.
- */
- if (adreno_is_a20x(adreno_dev))
- flags |= KGSL_MMUFLAGS_TLBFLUSH;
- /*
- * If possible, then set the state via the command stream to avoid
- * a CPU idle. Otherwise, use the default setstate which uses register
- * writes For CFF dump we must idle and use the registers so that it is
- * easier to filter out the mmu accesses from the dump
- */
- if (!adreno_use_default_setstate(adreno_dev)) {
- context = kgsl_context_get(device, context_id);
- if (context == NULL)
- return;
- adreno_ctx = context->devctxt;
-
- if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- /* wait for graphics pipe to be idle */
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- /* set page table base */
- *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
- *cmds++ = kgsl_mmu_get_pt_base_addr(&device->mmu,
- device->mmu.hwpagetable);
- sizedwords += 4;
- }
-
- if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
- if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
- 1);
- *cmds++ = 0x00000000;
- sizedwords += 2;
- }
- *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
- *cmds++ = mh_mmu_invalidate;
- sizedwords += 2;
- }
-
- if (flags & KGSL_MMUFLAGS_PTUPDATE &&
- adreno_is_a20x(adreno_dev)) {
- /* HW workaround: to resolve MMU page fault interrupts
- * caused by the VGT.It prevents the CP PFP from filling
- * the VGT DMA request fifo too early,thereby ensuring
- * that the VGT will not fetch vertex/bin data until
- * after the page table base register has been updated.
- *
- * Two null DRAW_INDX_BIN packets are inserted right
- * after the page table base update, followed by a
- * wait for idle. The null packets will fill up the
- * VGT DMA request fifo and prevent any further
- * vertex/bin updates from occurring until the wait
- * has finished. */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = (0x4 << 16) |
- (REG_PA_SU_SC_MODE_CNTL - 0x2000);
- *cmds++ = 0; /* disable faceness generation */
- *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
- *cmds++ = device->mmu.setstate_memory.gpuaddr;
- *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
- *cmds++ = 0; /* viz query info */
- *cmds++ = 0x0003C004; /* draw indicator */
- *cmds++ = 0; /* bin base */
- *cmds++ = 3; /* bin size */
- *cmds++ =
- device->mmu.setstate_memory.gpuaddr; /* dma base */
- *cmds++ = 6; /* dma size */
- *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
- *cmds++ = 0; /* viz query info */
- *cmds++ = 0x0003C004; /* draw indicator */
- *cmds++ = 0; /* bin base */
- *cmds++ = 3; /* bin size */
- /* dma base */
- *cmds++ = device->mmu.setstate_memory.gpuaddr;
- *cmds++ = 6; /* dma size */
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
- sizedwords += 21;
- }
-
-
- if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
- *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmds++ = 0x7fff; /* invalidate all base pointers */
- sizedwords += 2;
- }
-
- adreno_ringbuffer_issuecmds(device, adreno_ctx,
- KGSL_CMD_FLAGS_PMODE,
- &link[0], sizedwords);
-
- kgsl_context_put(context);
- } else {
- kgsl_mmu_device_setstate(&device->mmu, flags);
- }
-}
-
-static void adreno_setstate(struct kgsl_device *device,
- unsigned int context_id,
- uint32_t flags)
-{
- /* call the mmu specific handler */
- if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
- return adreno_gpummu_setstate(device, context_id, flags);
- else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
- return adreno_iommu_setstate(device, context_id, flags);
-}
-
-static unsigned int
-a3xx_getchipid(struct kgsl_device *device)
-{
- struct kgsl_device_platform_data *pdata =
- kgsl_device_get_drvdata(device);
-
- /*
- * All current A3XX chipids are detected at the SOC level. Leave this
- * function here to support any future GPUs that have working
- * chip ID registers
- */
-
- return pdata->chipid;
-}
-
-static unsigned int
-a2xx_getchipid(struct kgsl_device *device)
-{
- unsigned int chipid = 0;
- unsigned int coreid, majorid, minorid, patchid, revid;
- struct kgsl_device_platform_data *pdata =
- kgsl_device_get_drvdata(device);
-
- /* If the chip id is set at the platform level, then just use that */
-
- if (pdata->chipid != 0)
- return pdata->chipid;
-
- adreno_regread(device, REG_RBBM_PERIPHID1, &coreid);
- adreno_regread(device, REG_RBBM_PERIPHID2, &majorid);
- adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
-
- /*
- * adreno 22x gpus are indicated by coreid 2,
- * but REG_RBBM_PERIPHID1 always contains 0 for this field
- */
- if (cpu_is_msm8x60())
- chipid = 2 << 24;
- else
- chipid = (coreid & 0xF) << 24;
-
- chipid |= ((majorid >> 4) & 0xF) << 16;
-
- minorid = ((revid >> 0) & 0xFF);
-
- patchid = ((revid >> 16) & 0xFF);
-
- /* 8x50 returns 0 for patch release, but it should be 1 */
- /* 8x25 returns 0 for minor id, but it should be 1 */
- if (cpu_is_qsd8x50())
- patchid = 1;
- else if (cpu_is_msm8625() && minorid == 0)
- minorid = 1;
-
- chipid |= (minorid << 8) | patchid;
-
- return chipid;
-}
-
-static unsigned int
-adreno_getchipid(struct kgsl_device *device)
-{
- struct kgsl_device_platform_data *pdata =
- kgsl_device_get_drvdata(device);
-
- /*
- * All A3XX chipsets will have pdata set, so assume !pdata->chipid is
- * an A2XX processor
- */
-
- if (pdata->chipid == 0 || ADRENO_CHIPID_MAJOR(pdata->chipid) == 2)
- return a2xx_getchipid(device);
- else
- return a3xx_getchipid(device);
-}
-
-static inline bool _rev_match(unsigned int id, unsigned int entry)
-{
- return (entry == ANY_ID || entry == id);
-}
-
-static void
-adreno_identify_gpu(struct adreno_device *adreno_dev)
-{
- unsigned int i, core, major, minor, patchid;
-
- adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
-
- core = ADRENO_CHIPID_CORE(adreno_dev->chip_id);
- major = ADRENO_CHIPID_MAJOR(adreno_dev->chip_id);
- minor = ADRENO_CHIPID_MINOR(adreno_dev->chip_id);
- patchid = ADRENO_CHIPID_PATCH(adreno_dev->chip_id);
-
- for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
- if (core == adreno_gpulist[i].core &&
- _rev_match(major, adreno_gpulist[i].major) &&
- _rev_match(minor, adreno_gpulist[i].minor) &&
- _rev_match(patchid, adreno_gpulist[i].patchid))
- break;
- }
-
- if (i == ARRAY_SIZE(adreno_gpulist)) {
- adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
- return;
- }
-
- adreno_dev->gpurev = adreno_gpulist[i].gpurev;
- adreno_dev->gpudev = adreno_gpulist[i].gpudev;
- adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
- adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
- adreno_dev->istore_size = adreno_gpulist[i].istore_size;
- adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start;
- adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
- adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
- adreno_dev->gpulist_index = i;
-
-}
-
-static struct platform_device_id adreno_id_table[] = {
- { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
- {},
-};
-
-MODULE_DEVICE_TABLE(platform, adreno_id_table);
-
-static struct of_device_id adreno_match_table[] = {
- { .compatible = "qcom,kgsl-3d0", },
- {}
-};
-
-static inline int adreno_of_read_property(struct device_node *node,
- const char *prop, unsigned int *ptr)
-{
- int ret = of_property_read_u32(node, prop, ptr);
- if (ret)
- KGSL_CORE_ERR("Unable to read '%s'\n", prop);
- return ret;
-}
-
-static struct device_node *adreno_of_find_subnode(struct device_node *parent,
- const char *name)
-{
- struct device_node *child;
-
- for_each_child_of_node(parent, child) {
- if (of_device_is_compatible(child, name))
- return child;
- }
-
- return NULL;
-}
-
-static int adreno_of_get_pwrlevels(struct device_node *parent,
- struct kgsl_device_platform_data *pdata)
-{
- struct device_node *node, *child;
- int ret = -EINVAL;
-
- node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
-
- if (node == NULL) {
- KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
- return -EINVAL;
- }
-
- pdata->num_levels = 0;
-
- for_each_child_of_node(node, child) {
- unsigned int index;
- struct kgsl_pwrlevel *level;
-
- if (adreno_of_read_property(child, "reg", &index))
- goto done;
-
- if (index >= KGSL_MAX_PWRLEVELS) {
- KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
- index);
- continue;
- }
-
- if (index >= pdata->num_levels)
- pdata->num_levels = index + 1;
-
- level = &pdata->pwrlevel[index];
-
- if (adreno_of_read_property(child, "qcom,gpu-freq",
- &level->gpu_freq))
- goto done;
-
- if (adreno_of_read_property(child, "qcom,bus-freq",
- &level->bus_freq))
- goto done;
-
- if (adreno_of_read_property(child, "qcom,io-fraction",
- &level->io_fraction))
- level->io_fraction = 0;
- }
-
- if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
- &pdata->init_level))
- pdata->init_level = 1;
-
- if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
- KGSL_CORE_ERR("Initial power level out of range\n");
- pdata->init_level = 1;
- }
-
- ret = 0;
-done:
- return ret;
-
-}
-
-static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
-{
- struct device_node *node, *child;
- struct msm_dcvs_core_info *info = NULL;
- int count = 0;
- int ret = -EINVAL;
-
- node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
- if (node == NULL)
- return ERR_PTR(-EINVAL);
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
-
- if (info == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
- ret = -ENOMEM;
- goto err;
- }
-
- for_each_child_of_node(node, child)
- count++;
-
- info->power_param.num_freq = count;
-
- info->freq_tbl = kzalloc(info->power_param.num_freq *
- sizeof(struct msm_dcvs_freq_entry),
- GFP_KERNEL);
-
- if (info->freq_tbl == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- info->power_param.num_freq *
- sizeof(struct msm_dcvs_freq_entry));
- ret = -ENOMEM;
- goto err;
- }
-
- for_each_child_of_node(node, child) {
- unsigned int index;
-
- if (adreno_of_read_property(child, "reg", &index))
- goto err;
-
- if (index >= info->power_param.num_freq) {
- KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
- index);
- continue;
- }
-
- if (adreno_of_read_property(child, "qcom,freq",
- &info->freq_tbl[index].freq))
- goto err;
-
- if (adreno_of_read_property(child, "qcom,voltage",
- &info->freq_tbl[index].voltage))
- info->freq_tbl[index].voltage = 0;
-
- if (adreno_of_read_property(child, "qcom,is_trans_level",
- &info->freq_tbl[index].is_trans_level))
- info->freq_tbl[index].is_trans_level = 0;
-
- if (adreno_of_read_property(child, "qcom,active-energy-offset",
- &info->freq_tbl[index].active_energy_offset))
- info->freq_tbl[index].active_energy_offset = 0;
-
- if (adreno_of_read_property(child, "qcom,leakage-energy-offset",
- &info->freq_tbl[index].leakage_energy_offset))
- info->freq_tbl[index].leakage_energy_offset = 0;
- }
-
- if (adreno_of_read_property(node, "qcom,num-cores", &info->num_cores))
- goto err;
-
- info->sensors = kzalloc(info->num_cores *
- sizeof(int),
- GFP_KERNEL);
-
- for (count = 0; count < info->num_cores; count++) {
- if (adreno_of_read_property(node, "qcom,sensors",
- &(info->sensors[count])))
- goto err;
- }
-
- if (adreno_of_read_property(node, "qcom,core-core-type",
- &info->core_param.core_type))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
- &info->algo_param.disable_pc_threshold))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-win-size-min-us",
- &info->algo_param.em_win_size_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-win-size-max-us",
- &info->algo_param.em_win_size_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
- &info->algo_param.em_max_util_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-group-id",
- &info->algo_param.group_id))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-max-freq-chg-time-us",
- &info->algo_param.max_freq_chg_time_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-mode-dynamic",
- &info->algo_param.slack_mode_dynamic))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-weight-thresh-pct",
- &info->algo_param.slack_weight_thresh_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-time-min-us",
- &info->algo_param.slack_time_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-slack-time-max-us",
- &info->algo_param.slack_time_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-win-size-min-us",
- &info->algo_param.ss_win_size_min_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-win-size-max-us",
- &info->algo_param.ss_win_size_max_us))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
- &info->algo_param.ss_util_pct))
- goto err;
- if (adreno_of_read_property(node, "qcom,algo-ss-no-corr-below-freq",
- &info->algo_param.ss_no_corr_below_freq))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-a",
- &info->energy_coeffs.active_coeff_a))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-b",
- &info->energy_coeffs.active_coeff_b))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-active-coeff-c",
- &info->energy_coeffs.active_coeff_c))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-a",
- &info->energy_coeffs.leakage_coeff_a))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-b",
- &info->energy_coeffs.leakage_coeff_b))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-c",
- &info->energy_coeffs.leakage_coeff_c))
- goto err;
- if (adreno_of_read_property(node, "qcom,energy-leakage-coeff-d",
- &info->energy_coeffs.leakage_coeff_d))
- goto err;
-
- if (adreno_of_read_property(node, "qcom,power-current-temp",
- &info->power_param.current_temp))
- goto err;
-
- return info;
-
-err:
- if (info)
- kfree(info->freq_tbl);
-
- kfree(info);
-
- return ERR_PTR(ret);
-}
-
-static int adreno_of_get_iommu(struct device_node *parent,
- struct kgsl_device_platform_data *pdata)
-{
- struct device_node *node, *child;
- struct kgsl_device_iommu_data *data = NULL;
- struct kgsl_iommu_ctx *ctxs = NULL;
- u32 reg_val[2];
- int ctx_index = 0;
-
- node = of_parse_phandle(parent, "iommu", 0);
- if (node == NULL)
- return -EINVAL;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (data == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
- goto err;
- }
-
- if (of_property_read_u32_array(node, "reg", reg_val, 2))
- goto err;
-
- data->physstart = reg_val[0];
- data->physend = data->physstart + reg_val[1] - 1;
-
- data->iommu_ctx_count = 0;
-
- for_each_child_of_node(node, child)
- data->iommu_ctx_count++;
-
- ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
- GFP_KERNEL);
-
- if (ctxs == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
- goto err;
- }
-
- for_each_child_of_node(node, child) {
- int ret = of_property_read_string(child, "label",
- &ctxs[ctx_index].iommu_ctx_name);
-
- if (ret) {
- KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
- goto err;
- }
-
- if (adreno_of_read_property(child, "qcom,iommu-ctx-sids",
- &ctxs[ctx_index].ctx_id))
- goto err;
-
- ctx_index++;
- }
-
- data->iommu_ctxs = ctxs;
-
- pdata->iommu_data = data;
- pdata->iommu_count = 1;
-
- return 0;
-
-err:
- kfree(ctxs);
- kfree(data);
-
- return -EINVAL;
-}
-
-static int adreno_of_get_pdata(struct platform_device *pdev)
-{
- struct kgsl_device_platform_data *pdata = NULL;
- struct kgsl_device *device;
- int ret = -EINVAL;
-
- pdev->id_entry = adreno_id_table;
-
- pdata = pdev->dev.platform_data;
- if (pdata)
- return 0;
-
- if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
- KGSL_CORE_ERR("Unable to read 'label'\n");
- goto err;
- }
-
- if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
- goto err;
-
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (pdata == NULL) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
- ret = -ENOMEM;
- goto err;
- }
-
- if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
- &pdata->chipid))
- goto err;
-
- /* pwrlevel Data */
- ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
- if (ret)
- goto err;
-
- /* Default value is 83, if not found in DT */
- if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
- &pdata->idle_timeout))
- pdata->idle_timeout = 83;
-
- if (adreno_of_read_property(pdev->dev.of_node, "qcom,nap-allowed",
- &pdata->nap_allowed))
- pdata->nap_allowed = 1;
-
- if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
- &pdata->clk_map))
- goto err;
-
- device = (struct kgsl_device *)pdev->id_entry->driver_data;
-
- if (device->id != KGSL_DEVICE_3D0)
- goto err;
-
- /* Bus Scale Data */
-
- pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
- if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
- ret = PTR_ERR(pdata->bus_scale_table);
- goto err;
- }
-
- pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
- if (IS_ERR_OR_NULL(pdata->core_info)) {
- ret = PTR_ERR(pdata->core_info);
- goto err;
- }
-
- ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
- if (ret)
- goto err;
-
- pdev->dev.platform_data = pdata;
- return 0;
-
-err:
- if (pdata) {
- if (pdata->core_info)
- kfree(pdata->core_info->freq_tbl);
- kfree(pdata->core_info);
-
- if (pdata->iommu_data)
- kfree(pdata->iommu_data->iommu_ctxs);
-
- kfree(pdata->iommu_data);
- }
-
- kfree(pdata);
-
- return ret;
-}
-
-#ifdef CONFIG_MSM_OCMEM
-static int
-adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
-{
- if (!adreno_is_a330(adreno_dev))
- return 0;
-
- /* OCMEM is only needed once, do not support consective allocation */
- if (adreno_dev->ocmem_hdl != NULL)
- return 0;
-
- adreno_dev->ocmem_hdl =
- ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
- if (adreno_dev->ocmem_hdl == NULL)
- return -ENOMEM;
-
- adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
- adreno_dev->ocmem_base = adreno_dev->ocmem_hdl->addr;
-
- return 0;
-}
-
-static void
-adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
-{
- if (!adreno_is_a330(adreno_dev))
- return;
-
- if (adreno_dev->ocmem_hdl == NULL)
- return;
-
- ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
- adreno_dev->ocmem_hdl = NULL;
-}
-#else
-static int
-adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
-{
- return 0;
-}
-
-static void
-adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
-{
-}
-#endif
-
-static int __devinit
-adreno_probe(struct platform_device *pdev)
-{
- struct kgsl_device *device;
- struct adreno_device *adreno_dev;
- int status = -EINVAL;
- bool is_dt;
-
- is_dt = of_match_device(adreno_match_table, &pdev->dev);
-
- if (is_dt && pdev->dev.of_node) {
- status = adreno_of_get_pdata(pdev);
- if (status)
- goto error_return;
- }
-
- device = (struct kgsl_device *)pdev->id_entry->driver_data;
- adreno_dev = ADRENO_DEVICE(device);
- device->parentdev = &pdev->dev;
-
- status = adreno_ringbuffer_init(device);
- if (status != 0)
- goto error;
-
- status = kgsl_device_platform_probe(device);
- if (status)
- goto error_close_rb;
-
- adreno_debugfs_init(device);
-
- adreno_ft_init_sysfs(device);
-
- kgsl_pwrscale_init(device);
- kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
-
- device->flags &= ~KGSL_FLAGS_SOFT_RESET;
- return 0;
-
-error_close_rb:
- adreno_ringbuffer_close(&adreno_dev->ringbuffer);
-error:
- device->parentdev = NULL;
-error_return:
- return status;
-}
-
-static int __devexit adreno_remove(struct platform_device *pdev)
-{
- struct kgsl_device *device;
- struct adreno_device *adreno_dev;
-
- device = (struct kgsl_device *)pdev->id_entry->driver_data;
- adreno_dev = ADRENO_DEVICE(device);
-
- kgsl_pwrscale_detach_policy(device);
- kgsl_pwrscale_close(device);
-
- adreno_ringbuffer_close(&adreno_dev->ringbuffer);
- kgsl_device_platform_remove(device);
-
- return 0;
-}
-
-static int adreno_init(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
-
- /* Power up the device */
- kgsl_pwrctrl_enable(device);
-
- /* Identify the specific GPU */
- adreno_identify_gpu(adreno_dev);
-
- if (adreno_ringbuffer_read_pm4_ucode(device)) {
- KGSL_DRV_ERR(device, "Reading pm4 microcode failed %s\n",
- adreno_dev->pm4_fwfile);
- BUG_ON(1);
- }
-
- if (adreno_ringbuffer_read_pfp_ucode(device)) {
- KGSL_DRV_ERR(device, "Reading pfp microcode failed %s\n",
- adreno_dev->pfp_fwfile);
- BUG_ON(1);
- }
-
- if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
- KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
- adreno_dev->chip_id);
- BUG_ON(1);
- }
-
- /*
- * Check if firmware supports the sync lock PM4 packets needed
- * for IOMMUv1
- */
-
- if ((adreno_dev->pm4_fw_version >=
- adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pm4_ver) &&
- (adreno_dev->pfp_fw_version >=
- adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
- device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
-
- rb->global_ts = 0;
-
- /* Assign correct RBBM status register to hang detect regs
- */
- ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
-
- if (!adreno_is_a2xx(adreno_dev))
- adreno_perfcounter_init(device);
-
- /* Power down the device */
- kgsl_pwrctrl_disable(device);
-
- return 0;
-}
-
-static int adreno_start(struct kgsl_device *device)
-{
- int status = -EINVAL;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int state = device->state;
-
- kgsl_cffdump_open(device);
-
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
-
- /* Power up the device */
- kgsl_pwrctrl_enable(device);
-
- /* Set up a2xx special case */
- if (adreno_is_a2xx(adreno_dev)) {
- /*
- * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
- * on older gpus
- */
- if (adreno_is_a20x(adreno_dev)) {
- device->mh.mh_intf_cfg1 = 0;
- device->mh.mh_intf_cfg2 = 0;
- }
-
- kgsl_mh_start(device);
- }
-
- /* Assign correct RBBM status register to hang detect regs
- */
- ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
-
- /* Add A3XX specific registers for hang detection */
- if (adreno_is_a3xx(adreno_dev)) {
- ft_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
- ft_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
- ft_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
- ft_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
- ft_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
- ft_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
- }
-
- status = kgsl_mmu_start(device);
- if (status)
- goto error_clk_off;
-
- status = adreno_ocmem_gmem_malloc(adreno_dev);
- if (status) {
- KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
- goto error_mmu_off;
- }
-
- /* Start the GPU */
- adreno_dev->gpudev->start(adreno_dev);
-
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- device->ftbl->irqctrl(device, 1);
-
- status = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
- if (status)
- goto error_irq_off;
-
- /*
- * While recovery is on we do not want timer to
- * fire and attempt to change any device state
- */
-
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
-
- if (!adreno_is_a2xx(adreno_dev))
- adreno_perfcounter_start(adreno_dev);
- else {
- unsigned int reg;
-
- kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
- kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
-
- /*
- * Select SP_ALU_INSTR_EXEC (0x85) to get number of
- * ALU instructions executed.
- */
- kgsl_regwrite(device, REG_SQ_PERFCOUNTER3_SELECT, 0x85);
-
- kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
- REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
-
- ft_detect_regs[6] = REG_SQ_PERFCOUNTER3_LO;
- ft_detect_regs[7] = REG_SQ_PERFCOUNTER3_HI;
- }
-
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT)));
-
- device->reset_counter++;
-
- return 0;
-
-error_irq_off:
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
-
-error_mmu_off:
- kgsl_mmu_stop(&device->mmu);
-
-error_clk_off:
- if (KGSL_STATE_DUMP_AND_FT != device->state) {
- kgsl_pwrctrl_disable(device);
- /* set the state back to original state */
- kgsl_pwrctrl_set_state(device, state);
- }
-
- return status;
-}
-
-static int adreno_stop(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- adreno_dev->drawctxt_active = NULL;
-
- adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
-
- kgsl_mmu_stop(&device->mmu);
-
- device->ftbl->irqctrl(device, 0);
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
-
- adreno_ocmem_gmem_free(adreno_dev);
-
- /* Power down the device */
- kgsl_pwrctrl_disable(device);
-
- kgsl_cffdump_close(device->id);
-
- return 0;
-}
-
-/*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party, if fault tolerance failed then
- * mark all as guilty
- */
-
-static int _mark_context_status(int id, void *ptr, void *data)
-{
- unsigned int ft_status = *((unsigned int *) data);
- struct kgsl_context *context = ptr;
- struct adreno_context *adreno_context = context->devctxt;
-
- if (ft_status) {
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
- context->reset_status) {
- if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
- CTXT_FLAGS_GPU_HANG_FT))
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- else
- context->reset_status =
- KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
- }
-
- return 0;
-}
-
-static void adreno_mark_context_status(struct kgsl_device *device,
- int ft_status)
-{
- /* Mark the status for all the contexts in the device */
-
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _mark_context_status, &ft_status);
- read_unlock(&device->context_lock);
-}
-
-/*
- * For hung contexts set the current memstore value to the most recent issued
- * timestamp - this resets the status and lets the system continue on
- */
-
-static int _set_max_ts(int id, void *ptr, void *data)
-{
- struct kgsl_device *device = data;
- struct kgsl_context *context = ptr;
- struct adreno_context *drawctxt = context->devctxt;
-
- if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- soptimestamp),
- drawctxt->timestamp);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- eoptimestamp),
- drawctxt->timestamp);
- }
-
- return 0;
-}
-
-static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
-{
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _set_max_ts, device);
- read_unlock(&device->context_lock);
-}
-
-static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
-{
- vfree(ft_data->rb_buffer);
- vfree(ft_data->bad_rb_buffer);
- vfree(ft_data->good_rb_buffer);
-}
-
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
- unsigned int *ptr,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int val1;
- unsigned int size = rb->buffer_desc.size;
- unsigned int start_ptr = *ptr;
-
- while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
- if (inc)
- start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
- size);
- else
- start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
- size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
- /* Ensure above read is finished before next read */
- rmb();
- if (KGSL_CMD_IDENTIFIER == val1) {
- if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
- start_ptr = adreno_ringbuffer_dec_wrapped(
- start_ptr, size);
- *ptr = start_ptr;
- status = 0;
- break;
- }
- }
- return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int global_eop,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[3];
- int i = 0;
- bool check = false;
-
- if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
- return status;
-
- do {
- /*
- * when decrementing we need to decrement first and
- * then read make sure we cover all the data
- */
- if (!inc)
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
- temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && ((inc && val[i] == global_eop) ||
- (!inc && (val[i] ==
- cp_type3_packet(CP_MEM_WRITE, 2) ||
- val[i] == CACHE_FLUSH_TS)))) {
- /* decrement i, i.e i = (i - 1 + 3) % 3 if
- * we are going forward, else increment i */
- i = (i + 2) % 3;
- if (val[i] == rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)) {
- int j = ((i + 2) % 3);
- if ((inc && (val[j] == CACHE_FLUSH_TS ||
- val[j] == cp_type3_packet(
- CP_MEM_WRITE, 2))) ||
- (!inc && val[j] == global_eop)) {
- /* Found the global eop */
- status = 0;
- break;
- }
- }
- /* if no match found then increment i again
- * since we decremented before matching */
- i = (i + 1) % 3;
- }
- if (inc)
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
- temp_rb_rptr, size);
-
- i = (i + 1) % 3;
- if (2 == i)
- check = true;
- } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
- /* temp_rb_rptr points to the command stream after global eop,
- * move backward till the start of command sequence */
- if (!status) {
- status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
- if (!status) {
- *rb_rptr = temp_rb_rptr;
- KGSL_FT_INFO(rb->device,
- "Offset of cmd sequence after eop timestamp: 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- }
- }
- if (status)
- KGSL_FT_ERR(rb->device,
- "Failed to find the command sequence after eop timestamp %x\n",
- global_eop);
- return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int ib1)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool ctx_switch = false;
-
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && val[i] == ib1) {
- /* decrement i, i.e i = (i - 1 + 2) % 2 */
- i = (i + 1) % 2;
- if (adreno_cmd_is_ib(val[i])) {
- /* go till start of command sequence */
- status = _find_start_of_cmd_seq(rb,
- &temp_rb_rptr, false);
-
- KGSL_FT_INFO(rb->device,
- "Found the hanging IB at offset 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- break;
- }
- /* if no match the increment i since we decremented
- * before checking */
- i = (i + 1) % 2;
- }
- /* Make sure you do not encounter a context switch twice, we can
- * encounter it once for the bad context as the start of search
- * can point to the context switch */
- if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- if (ctx_switch) {
- KGSL_FT_ERR(rb->device,
- "Context switch encountered before bad "
- "IB found\n");
- break;
- }
- ctx_switch = true;
- }
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
- if (!status)
- *rb_rptr = temp_rb_rptr;
- return status;
-}
-
-static void adreno_setup_ft_data(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
- unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
-
- memset(ft_data, 0, sizeof(*ft_data));
- ft_data->start_of_replay_cmds = 0xFFFFFFFF;
- ft_data->replay_for_snapshot = 0xFFFFFFFF;
-
- adreno_regread(device, REG_CP_IB1_BASE, &ft_data->ib1);
-
- kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore,
- &ft_data->global_eop,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
-
- /* Ensure context id and global eop ts read complete */
- rmb();
-
- ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->bad_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->good_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->status = 0;
-
- /* find the start of bad command sequence in rb */
- context = kgsl_context_get(device, ft_data->context_id);
-
- ft_data->ft_policy = adreno_dev->ft_policy;
-
- if (!adreno_dev->ft_policy)
- adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
-
- /* Look for the command stream that is right after the global eop */
- ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
- ft_data->global_eop + 1, false);
- if (ret) {
- ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
- goto done;
- } else {
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
- }
-
- if (context) {
- adreno_context = context->devctxt;
- if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
- if (ft_data->ib1) {
- ret = _find_hanging_ib_sequence(rb,
- &rb_rptr, ft_data->ib1);
- if (ret) {
- KGSL_FT_ERR(device,
- "Start not found for replay IB seq\n");
- goto done;
- }
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->replay_for_snapshot = rb_rptr;
- }
- }
- }
-
-done:
- kgsl_context_put(context);
-}
-
-static int
-_adreno_check_long_ib(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int curr_global_ts = 0;
-
- /* check if the global ts is still the same */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
- /* Ensure above read is finished before long ib check */
- rmb();
-
- /* Mark long ib as handled */
- adreno_dev->long_ib = 0;
-
- if (curr_global_ts == adreno_dev->long_ib_ts) {
- KGSL_FT_ERR(device,
- "IB ran too long, invalidate ctxt\n");
- return 1;
- } else {
- /* Do nothing GPU has gone ahead */
- KGSL_FT_INFO(device, "false long ib detection return\n");
- return 0;
- }
-}
-
-static int
-_adreno_ft_restart_device(struct kgsl_device *device,
- struct kgsl_context *context)
-{
- struct adreno_context *adreno_context = NULL;
-
- /* restart device */
- if (adreno_stop(device)) {
- KGSL_FT_ERR(device, "Device stop failed\n");
- return 1;
- }
-
- if (adreno_init(device)) {
- KGSL_FT_ERR(device, "Device start failed\n");
- return 1;
- }
-
- if (adreno_start(device)) {
- KGSL_FT_ERR(device, "Device start failed\n");
- return 1;
- }
-
- if ((context != NULL) && (context->devctxt != NULL)) {
- adreno_context = context->devctxt;
- kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
- KGSL_MEMSTORE_GLOBAL);
- }
-
- /* If iommu is used then we need to make sure that the iommu clocks
- * are on since there could be commands in pipeline that touch iommu */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return 1;
- }
-
- return 0;
-}
-
-static inline void
-_adreno_debug_ft_info(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
-
- /*
- * Dumping rb is a very useful tool to debug FT.
- * It will tell us if we are extracting the rb correctly
- * NOP'ing the right IB, skipping the EOF correctly etc.
- */
- if (device->ft_log >= 7) {
-
- /* Print fault tolerance data here */
- KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
- ft_data->rb_size);
- adreno_dump_rb(device, ft_data->rb_buffer,
- ft_data->rb_size<<2, 0, ft_data->rb_size);
-
- KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
- ft_data->bad_rb_size);
- adreno_dump_rb(device, ft_data->bad_rb_buffer,
- ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
-
- KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
- ft_data->good_rb_size);
- adreno_dump_rb(device, ft_data->good_rb_buffer,
- ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
-
- }
-}
-
-static int
-_adreno_ft_resubmit_rb(struct kgsl_device *device,
- struct adreno_ringbuffer *rb,
- struct kgsl_context *context,
- struct adreno_ft_data *ft_data,
- unsigned int *buff, unsigned int size)
-{
- unsigned int ret = 0;
- unsigned int retry_num = 0;
-
- _adreno_debug_ft_info(device, ft_data);
-
- do {
- ret = _adreno_ft_restart_device(device, context);
- if (ret == 0)
- break;
- /*
- * If device restart fails sleep for 20ms before
- * attempting restart. This allows GPU HW to settle
- * and improve the chances of next restart to be
- * successful.
- */
- msleep(20);
- KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
- retry_num++;
- } while (retry_num < 4);
-
- if (ret) {
- KGSL_FT_ERR(device, "Device restart failed\n");
- BUG_ON(1);
- goto done;
- }
-
- if (size) {
-
- /* submit commands and wait for them to pass */
- adreno_ringbuffer_restore(rb, buff, size);
-
- ret = adreno_idle(device);
- }
-
-done:
- return ret;
-}
-
-static int
-_adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0, i;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context = NULL;
- struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
- unsigned int long_ib = 0;
- static int no_context_ft;
-
- context = kgsl_context_get(device, ft_data->context_id);
-
- if (context == NULL) {
- KGSL_FT_ERR(device, "Last context unknown id:%d\n",
- ft_data->context_id);
- if (no_context_ft) {
- /*
- * If 2 consecutive no context ft occurred then
- * just reset GPU
- */
- no_context_ft = 0;
- goto play_good_cmds;
- }
- } else {
- no_context_ft = 0;
- adreno_context = context->devctxt;
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- /*
- * set the invalid ts flag to 0 for this context since we have
- * detected a hang for it
- */
- context->wait_on_invalid_ts = false;
-
- if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
- ft_data->status = 1;
- KGSL_FT_ERR(device, "Fault tolerance not supported\n");
- goto play_good_cmds;
- }
-
- /*
- * This flag will be set by userspace for contexts
- * that do not want to be fault tolerant (ex: OPENCL)
- */
- if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
- ft_data->status = 1;
- KGSL_FT_ERR(device,
- "No FT set for this context play good cmds\n");
- goto play_good_cmds;
- }
-
- }
-
- /* Check if we detected a long running IB,
- * if true do not attempt replay of bad cmds */
- if ((adreno_context) && (adreno_dev->long_ib)) {
- long_ib = _adreno_check_long_ib(device);
- if (!long_ib) {
- adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
- return 0;
- }
- }
-
- /*
- * Extract valid contents from rb which can still be executed after
- * hang
- */
- adreno_ringbuffer_extract(rb, ft_data);
-
- /* If long IB detected do not attempt replay of bad cmds */
- if (long_ib) {
- ft_data->status = 1;
- _adreno_debug_ft_info(device, ft_data);
- goto play_good_cmds;
- }
-
- if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
- (ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
- KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- /* Do not try the reply if hang is due to a pagefault */
- if (adreno_context && adreno_context->pagefault) {
- if ((ft_data->context_id == adreno_context->id) &&
- (ft_data->global_eop == adreno_context->pagefault_ts)) {
- ft_data->ft_policy &= ~KGSL_FT_REPLAY;
- KGSL_FT_ERR(device, "MMU fault skipping replay\n");
- }
-
- adreno_context->pagefault = 0;
- }
-
- if (ft_data->ft_policy & KGSL_FT_REPLAY) {
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Replay status: 1\n");
- ft_data->status = 1;
- } else
- goto play_good_cmds;
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if ((ft_data->bad_rb_buffer[i] ==
- CP_HDR_INDIRECT_BUFFER_PFD) &&
- (ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
-
- ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
- ft_data->bad_rb_buffer[i+1] =
- KGSL_NOP_IB_IDENTIFIER;
- ft_data->bad_rb_buffer[i+2] =
- KGSL_NOP_IB_IDENTIFIER;
- break;
- }
- }
-
- if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
- KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if (ft_data->bad_rb_buffer[i] ==
- KGSL_END_OF_FRAME_IDENTIFIER) {
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- break;
- }
- }
-
- /* EOF not found in RB, discard till EOF in
- next IB submission */
- if (adreno_context && (i == ft_data->bad_rb_size)) {
- adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
- KGSL_FT_INFO(device,
- "EOF not found in RB, skip next issueib till EOF\n");
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Skip EOF status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
-play_good_cmds:
-
- if (ft_data->status)
- KGSL_FT_ERR(device, "Bad context commands failed\n");
- else {
- KGSL_FT_INFO(device, "Bad context commands success\n");
-
- if (adreno_context) {
- adreno_context->flags = (adreno_context->flags &
- ~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
- }
- adreno_dev->drawctxt_active = last_active_ctx;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->good_rb_buffer, ft_data->good_rb_size);
-
- if (ret) {
- /*
- * If we fail here we can try to invalidate another
- * context and try fault tolerance again, although
- * we will only try ft with no context once to avoid
- * going into continuous loop of trying ft with no context
- */
- if (!context)
- no_context_ft = 1;
- ret = -EAGAIN;
- KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
- goto done;
- } else
- KGSL_FT_INFO(device, "Playing good commands successful\n");
-
- /* ringbuffer now has data from the last valid context id,
- * so restore the active_ctx to the last valid context */
- if (ft_data->last_valid_ctx_id) {
- struct kgsl_context *last_ctx = kgsl_context_get(device,
- ft_data->last_valid_ctx_id);
-
- if (last_ctx)
- adreno_dev->drawctxt_active = last_ctx->devctxt;
-
- kgsl_context_put(last_ctx);
- }
-
-done:
- /* Turn off iommu clocks */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
- kgsl_context_put(context);
- return ret;
-}
-
-static int
-adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- KGSL_FT_INFO(device,
- "Start Parameters: IB1: 0x%X, "
- "Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id, ft_data->global_eop);
-
- KGSL_FT_INFO(device, "Last issued global timestamp: %x\n",
- rb->global_ts);
-
- /* We may need to replay commands multiple times based on whether
- * multiple contexts hang the GPU */
- while (true) {
-
- ret = _adreno_ft(device, ft_data);
-
- if (-EAGAIN == ret) {
- /* setup new fault tolerance parameters and retry, this
- * means more than 1 contexts are causing hang */
- adreno_destroy_ft_data(ft_data);
- adreno_setup_ft_data(device, ft_data);
- KGSL_FT_INFO(device,
- "Retry. Parameters: "
- "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id,
- ft_data->global_eop);
- } else {
- break;
- }
- }
-
- if (ret)
- goto done;
-
- /* Restore correct states after fault tolerance */
- if (adreno_dev->drawctxt_active)
- kgsl_mmu_setstate(&device->mmu,
- adreno_dev->drawctxt_active->pagetable,
- adreno_dev->drawctxt_active->id);
- else
- kgsl_mmu_setstate(&device->mmu,
- device->mmu.defaultpagetable, KGSL_MEMSTORE_GLOBAL);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp), rb->global_ts);
-
- /* switch to NULL ctxt */
- if (adreno_dev->drawctxt_active != NULL)
- adreno_drawctxt_switch(adreno_dev, NULL, 0);
-
-done:
- adreno_set_max_ts_for_bad_ctxs(device);
- adreno_mark_context_status(device, ret);
- KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
- ft_data->ft_policy, ret);
- return ret;
-}
-
-int
-adreno_dump_and_exec_ft(struct kgsl_device *device)
-{
- int result = -ETIMEDOUT;
- struct adreno_ft_data ft_data;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int curr_pwrlevel;
-
- if (device->state == KGSL_STATE_HUNG)
- goto done;
- if (device->state == KGSL_STATE_DUMP_AND_FT) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->ft_gate);
- mutex_lock(&device->mutex);
- if (device->state != KGSL_STATE_HUNG)
- result = 0;
- } else {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
- INIT_COMPLETION(device->ft_gate);
- /* Detected a hang */
-
- /* Run fault tolerance at max power level */
- curr_pwrlevel = pwr->active_pwrlevel;
- kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
-
- /* Get the fault tolerance data as soon as hang is detected */
- adreno_setup_ft_data(device, &ft_data);
-
- /*
- * If long ib is detected, do not attempt postmortem or
- * snapshot, if GPU is still executing commands
- * we will get errors
- */
- if (!adreno_dev->long_ib) {
- /*
- * Trigger an automatic dump of the state to
- * the console
- */
- kgsl_postmortem_dump(device, 0);
-
- /*
- * Make a GPU snapshot. For now, do it after the
- * PM dump so we can at least be sure the PM dump
- * will work as it always has
- */
- kgsl_device_snapshot(device, 1);
- }
-
- result = adreno_ft(device, &ft_data);
- adreno_destroy_ft_data(&ft_data);
-
- /* restore power level */
- kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
-
- if (result) {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
- } else {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
- mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
- mod_timer(&device->hang_timer,
- (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT)));
- }
- complete_all(&device->ft_gate);
- }
-done:
- return result;
-}
-EXPORT_SYMBOL(adreno_dump_and_exec_ft);
-
-/**
- * _ft_sysfs_store() - Common routine to write to FT sysfs files
- * @buf: value to write
- * @count: size of the value to write
- * @sysfs_cfg: KGSL FT sysfs config to write
- *
- * This is a common routine to write to FT sysfs files.
- */
-static int _ft_sysfs_store(const char *buf, size_t count, unsigned int *ptr)
-{
- char temp[20];
- unsigned long val;
- int rc;
-
- snprintf(temp, sizeof(temp), "%.*s",
- (int)min(count, sizeof(temp) - 1), buf);
- rc = kstrtoul(temp, 0, &val);
- if (rc)
- return rc;
-
- *ptr = val;
-
- return count;
-}
-
-/**
- * _get_adreno_dev() - Routine to get a pointer to adreno dev
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value to write
- * @count: size of the value to write
- */
-struct adreno_device *_get_adreno_dev(struct device *dev)
-{
- struct kgsl_device *device = kgsl_device_from_dev(dev);
- return device ? ADRENO_DEVICE(device) : NULL;
-}
-
-/**
- * _ft_policy_store() - Routine to configure FT policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value to write
- * @count: size of the value to write
- *
- * FT policy can be set to any of the options below.
- * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
- * KGSL_FT_REPLAY -> BIT(1) Set to enable replay
- * KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
- * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
- * by default set FT policy to KGSL_FT_DEFAULT_POLICY
- */
-static int _ft_policy_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- int ret;
- if (adreno_dev == NULL)
- return 0;
-
- mutex_lock(&adreno_dev->dev.mutex);
- ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_policy);
- mutex_unlock(&adreno_dev->dev.mutex);
-
- return ret;
-}
-
-/**
- * _ft_policy_show() - Routine to read FT policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value read
- *
- * This is a routine to read current FT policy
- */
-static int _ft_policy_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- if (adreno_dev == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_policy);
-}
-
-/**
- * _ft_pagefault_policy_store() - Routine to configure FT
- * pagefault policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value to write
- * @count: size of the value to write
- *
- * FT pagefault policy can be set to any of the options below.
- * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
- * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
- * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
- * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
- * pagefault per page.
- * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
- * pagefault per INT.
- */
-static int _ft_pagefault_policy_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- int ret;
- if (adreno_dev == NULL)
- return 0;
-
- mutex_lock(&adreno_dev->dev.mutex);
- ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_pf_policy);
- mutex_unlock(&adreno_dev->dev.mutex);
-
- return ret;
-}
-
-/**
- * _ft_pagefault_policy_show() - Routine to read FT pagefault
- * policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value read
- *
- * This is a routine to read current FT pagefault policy
- */
-static int _ft_pagefault_policy_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- if (adreno_dev == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_pf_policy);
-}
-
-/**
- * _ft_fast_hang_detect_store() - Routine to configure FT fast
- * hang detect policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value to write
- * @count: size of the value to write
- *
- * 0x1 - Enable fast hang detection
- * 0x0 - Disable fast hang detection
- */
-static int _ft_fast_hang_detect_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- int ret;
- if (adreno_dev == NULL)
- return 0;
-
- mutex_lock(&adreno_dev->dev.mutex);
- ret = _ft_sysfs_store(buf, count, &adreno_dev->fast_hang_detect);
- mutex_unlock(&adreno_dev->dev.mutex);
-
- return ret;
-
-}
-
-/**
- * _ft_fast_hang_detect_show() - Routine to read FT fast
- * hang detect policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value read
- */
-static int _ft_fast_hang_detect_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- if (adreno_dev == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (adreno_dev->fast_hang_detect ? 1 : 0));
-}
-
-/**
- * _ft_long_ib_detect_store() - Routine to configure FT long IB
- * detect policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value to write
- * @count: size of the value to write
- *
- * 0x0 - Enable long IB detection
- * 0x1 - Disable long IB detection
- */
-static int _ft_long_ib_detect_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- int ret;
- if (adreno_dev == NULL)
- return 0;
-
- mutex_lock(&adreno_dev->dev.mutex);
- ret = _ft_sysfs_store(buf, count, &adreno_dev->long_ib_detect);
- mutex_unlock(&adreno_dev->dev.mutex);
-
- return ret;
-
-}
-
-/**
- * _ft_long_ib_detect_show() - Routine to read FT long IB
- * detect policy
- * @dev: device ptr
- * @attr: Device attribute
- * @buf: value read
- */
-static int _ft_long_ib_detect_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct adreno_device *adreno_dev = _get_adreno_dev(dev);
- if (adreno_dev == NULL)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (adreno_dev->long_ib_detect ? 1 : 0));
-}
-
-
-#define FT_DEVICE_ATTR(name) \
- DEVICE_ATTR(name, 0644, _ ## name ## _show, _ ## name ## _store);
-
-FT_DEVICE_ATTR(ft_policy);
-FT_DEVICE_ATTR(ft_pagefault_policy);
-FT_DEVICE_ATTR(ft_fast_hang_detect);
-FT_DEVICE_ATTR(ft_long_ib_detect);
-
-
-const struct device_attribute *ft_attr_list[] = {
- &dev_attr_ft_policy,
- &dev_attr_ft_pagefault_policy,
- &dev_attr_ft_fast_hang_detect,
- &dev_attr_ft_long_ib_detect,
- NULL,
-};
-
-int adreno_ft_init_sysfs(struct kgsl_device *device)
-{
- return kgsl_create_device_sysfs_files(device->dev, ft_attr_list);
-}
-
-void adreno_ft_uninit_sysfs(struct kgsl_device *device)
-{
- kgsl_remove_device_sysfs_files(device->dev, ft_attr_list);
-}
-
-static int adreno_getproperty(struct kgsl_device *device,
- enum kgsl_property_type type,
- void *value,
- unsigned int sizebytes)
-{
- int status = -EINVAL;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- switch (type) {
- case KGSL_PROP_DEVICE_INFO:
- {
- struct kgsl_devinfo devinfo;
-
- if (sizebytes != sizeof(devinfo)) {
- status = -EINVAL;
- break;
- }
-
- memset(&devinfo, 0, sizeof(devinfo));
- devinfo.device_id = device->id+1;
- devinfo.chip_id = adreno_dev->chip_id;
- devinfo.mmu_enabled = kgsl_mmu_enabled();
- devinfo.gpu_id = adreno_dev->gpurev;
- devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
- devinfo.gmem_sizebytes = adreno_dev->gmem_size;
-
- if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
- 0) {
- status = -EFAULT;
- break;
- }
- status = 0;
- }
- break;
- case KGSL_PROP_DEVICE_SHADOW:
- {
- struct kgsl_shadowprop shadowprop;
-
- if (sizebytes != sizeof(shadowprop)) {
- status = -EINVAL;
- break;
- }
- memset(&shadowprop, 0, sizeof(shadowprop));
- if (device->memstore.hostptr) {
- /*NOTE: with mmu enabled, gpuaddr doesn't mean
- * anything to mmap().
- */
- shadowprop.gpuaddr = device->memstore.gpuaddr;
- shadowprop.size = device->memstore.size;
- /* GSL needs this to be set, even if it
- appears to be meaningless */
- shadowprop.flags = KGSL_FLAGS_INITIALIZED |
- KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
- }
- if (copy_to_user(value, &shadowprop,
- sizeof(shadowprop))) {
- status = -EFAULT;
- break;
- }
- status = 0;
- }
- break;
- case KGSL_PROP_MMU_ENABLE:
- {
- int mmu_prop = kgsl_mmu_enabled();
-
- if (sizebytes != sizeof(int)) {
- status = -EINVAL;
- break;
- }
- if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
- status = -EFAULT;
- break;
- }
- status = 0;
- }
- break;
- case KGSL_PROP_INTERRUPT_WAITS:
- {
- int int_waits = 1;
- if (sizebytes != sizeof(int)) {
- status = -EINVAL;
- break;
- }
- if (copy_to_user(value, &int_waits, sizeof(int))) {
- status = -EFAULT;
- break;
- }
- status = 0;
- }
- break;
- default:
- status = -EINVAL;
- }
-
- return status;
-}
-
-static int adreno_setproperty(struct kgsl_device *device,
- enum kgsl_property_type type,
- void *value,
- unsigned int sizebytes)
-{
- int status = -EINVAL;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- switch (type) {
- case KGSL_PROP_PWRCTRL: {
- unsigned int enable;
- struct kgsl_device_platform_data *pdata =
- kgsl_device_get_drvdata(device);
-
- if (sizebytes != sizeof(enable))
- break;
-
- if (copy_from_user(&enable, (void __user *) value,
- sizeof(enable))) {
- status = -EFAULT;
- break;
- }
-
- if (enable) {
- if (pdata->nap_allowed)
- device->pwrctrl.nap_allowed = true;
- adreno_dev->fast_hang_detect = 1;
- kgsl_pwrscale_enable(device);
- } else {
- device->pwrctrl.nap_allowed = false;
- adreno_dev->fast_hang_detect = 0;
- kgsl_pwrscale_disable(device);
- }
-
- status = 0;
- }
- break;
- default:
- break;
- }
-
- return status;
-}
-
-static int adreno_ringbuffer_drain(struct kgsl_device *device,
- unsigned int *regs)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned long wait;
- unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
-
- /*
- * The first time into the loop, wait for 100 msecs and kick wptr again
- * to ensure that the hardware has updated correctly. After that, kick
- * it periodically every KGSL_TIMEOUT_HANG_DETECT msecs until the timeout
- * expires
- */
-
- wait = jiffies + msecs_to_jiffies(100);
-
- do {
- if (time_after(jiffies, wait)) {
- /* Check to see if the core is hung */
- if (adreno_ft_detect(device, regs))
- return -ETIMEDOUT;
-
- wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT);
- }
- GSL_RB_GET_READPTR(rb, &rb->rptr);
-
- if (time_after(jiffies, timeout)) {
- KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
- rb->rptr, rb->wptr);
- return -ETIMEDOUT;
- }
- } while (rb->rptr != rb->wptr);
-
- return 0;
-}
-
-/* Caller must hold the device mutex. */
-int adreno_idle(struct kgsl_device *device)
-{
- unsigned long wait_time;
- unsigned long wait_time_part;
- unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
-
-
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
-
- kgsl_cffdump_regpoll(device->id,
- adreno_dev->gpudev->reg_rbbm_status << 2,
- 0x00000000, 0x80000000);
-
-retry:
- /* First, wait for the ringbuffer to drain */
- if (adreno_ringbuffer_drain(device, prev_reg_val))
- goto err;
-
- /* now, wait for the GPU to finish its operations */
- wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT);
-
- while (time_before(jiffies, wait_time)) {
- if (adreno_isidle(device))
- return 0;
-
- /* Dont wait for timeout, detect hang faster. */
- if (time_after(jiffies, wait_time_part)) {
- wait_time_part = jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT);
- if ((adreno_ft_detect(device, prev_reg_val)))
- goto err;
- }
-
- }
-
-err:
- KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
- if (KGSL_STATE_DUMP_AND_FT != device->state &&
- !adreno_dump_and_exec_ft(device)) {
- wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- goto retry;
- }
- return -ETIMEDOUT;
-}
-
-/**
- * is_adreno_rbbm_status_idle - Check if GPU core is idle by probing
- * rbbm_status register
- * @device - Pointer to the GPU device whose idle status is to be
- * checked
- * @returns - Returns whether the core is idle (based on rbbm_status)
- * false if the core is active, true if the core is idle
- */
-static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
-{
- unsigned int reg_rbbm_status;
- bool status = false;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- /* Is the core idle? */
- adreno_regread(device,
- adreno_dev->gpudev->reg_rbbm_status,
- &reg_rbbm_status);
-
- if (adreno_is_a2xx(adreno_dev)) {
- if (reg_rbbm_status == 0x110)
- status = true;
- } else {
- if (!(reg_rbbm_status & 0x80000000))
- status = true;
- }
- return status;
-}
-
-static unsigned int adreno_isidle(struct kgsl_device *device)
-{
- int status = false;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- /* If the device isn't active, don't force it on. */
- if (kgsl_pwrctrl_isenabled(device)) {
- /* Is the ring buffer is empty? */
- GSL_RB_GET_READPTR(rb, &rb->rptr);
- if (rb->rptr == rb->wptr) {
- /*
- * Are there interrupts pending? If so then pretend we
- * are not idle - this avoids the possiblity that we go
- * to a lower power state without handling interrupts
- * first.
- */
-
- if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
- /* Is the core idle? */
- status = is_adreno_rbbm_status_idle(device);
- }
- }
- } else {
- status = true;
- }
- return status;
-}
-
-/* Caller must hold the device mutex. */
-static int adreno_suspend_context(struct kgsl_device *device)
-{
- int status = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- /* switch to NULL ctxt */
- if (adreno_dev->drawctxt_active != NULL) {
- adreno_drawctxt_switch(adreno_dev, NULL, 0);
- status = adreno_idle(device);
- }
-
- return status;
-}
-
-/* Find a memory structure attached to an adreno context */
-
-struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
- unsigned int pt_base, unsigned int gpuaddr, unsigned int size)
-{
- struct kgsl_context *context;
- struct adreno_context *adreno_context = NULL;
- int next = 0;
- struct kgsl_memdesc *desc = NULL;
-
-
- read_lock(&device->context_lock);
- while (1) {
- context = idr_get_next(&device->context_idr, &next);
- if (context == NULL)
- break;
-
- adreno_context = (struct adreno_context *)context->devctxt;
-
- if (kgsl_mmu_pt_equal(&device->mmu, adreno_context->pagetable,
- pt_base)) {
- desc = &adreno_context->gpustate;
- if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
- break;
-
- desc = &adreno_context->context_gmem_shadow.gmemshadow;
- if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
- break;
- }
- next = next + 1;
- desc = NULL;
- }
- read_unlock(&device->context_lock);
- return desc;
-}
-
-struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
- unsigned int pt_base,
- unsigned int gpuaddr,
- unsigned int size)
-{
- struct kgsl_mem_entry *entry;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
-
- if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
- return &ringbuffer->buffer_desc;
-
- if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr, size))
- return &ringbuffer->memptrs_desc;
-
- if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
- return &device->memstore;
-
- if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr,
- size))
- return &device->mmu.setstate_memory;
-
- entry = kgsl_get_mem_entry(device, pt_base, gpuaddr, size);
-
- if (entry)
- return &entry->memdesc;
-
- return adreno_find_ctxtmem(device, pt_base, gpuaddr, size);
-}
-
-uint8_t *adreno_convertaddr(struct kgsl_device *device, unsigned int pt_base,
- unsigned int gpuaddr, unsigned int size)
-{
- struct kgsl_memdesc *memdesc;
-
- memdesc = adreno_find_region(device, pt_base, gpuaddr, size);
-
- return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
-}
-
-void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
- unsigned int *value)
-{
- unsigned int *reg;
- BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
- reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
-
- if (!in_interrupt())
- kgsl_pre_hwaccess(device);
-
- /*ensure this read finishes before the next one.
- * i.e. act like normal readl() */
- *value = __raw_readl(reg);
- rmb();
-}
-
-void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
- unsigned int value)
-{
- unsigned int *reg;
-
- BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
-
- if (!in_interrupt())
- kgsl_pre_hwaccess(device);
-
- kgsl_trace_regwrite(device, offsetwords, value);
-
- kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
- reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
-
- /*ensure previous writes post before this one,
- * i.e. act like normal writel() */
- wmb();
- __raw_writel(value, reg);
-}
-
-static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
-{
- unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
- if (k_ctxt != NULL) {
- struct adreno_context *a_ctxt = k_ctxt->devctxt;
- if (k_ctxt->id == KGSL_CONTEXT_INVALID || a_ctxt == NULL)
- context_id = KGSL_CONTEXT_INVALID;
- else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- context_id = k_ctxt->id;
- }
-
- return context_id;
-}
-
-static unsigned int adreno_check_hw_ts(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status = 0;
- unsigned int ref_ts, enableflag;
- unsigned int context_id = _get_context_id(context);
-
- /*
- * If the context ID is invalid, we are in a race with
- * the context being destroyed by userspace so bail.
- */
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- return -EINVAL;
- }
-
- status = kgsl_check_timestamp(device, context, timestamp);
- if (status)
- return status;
-
- kgsl_sharedmem_readl(&device->memstore, &enableflag,
- KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
- /*
- * Barrier is needed here to make sure the read from memstore
- * has posted
- */
-
- mb();
-
- if (enableflag) {
- kgsl_sharedmem_readl(&device->memstore, &ref_ts,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts));
-
- /* Make sure the memstore read has posted */
- mb();
- if (timestamp_cmp(ref_ts, timestamp) >= 0) {
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- /* Make sure the memstore write is posted */
- wmb();
- }
- } else {
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- enableflag = 1;
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), enableflag);
- /* Make sure the memstore write gets posted */
- wmb();
-
- /*
- * submit a dummy packet so that even if all
- * commands upto timestamp get executed we will still
- * get an interrupt
- */
-
- if (context && device->state != KGSL_STATE_SLUMBER)
- adreno_ringbuffer_issuecmds(device, context->devctxt,
- KGSL_CMD_FLAGS_GET_INT, NULL, 0);
- }
-
- return 0;
-}
-
-/* Return 1 if the event timestmp has already passed, 0 if it was marked */
-static int adreno_next_event(struct kgsl_device *device,
- struct kgsl_event *event)
-{
- return adreno_check_hw_ts(device, event->context, event->timestamp);
-}
-
-static int adreno_check_interrupt_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status;
-
- mutex_lock(&device->mutex);
- status = adreno_check_hw_ts(device, context, timestamp);
- mutex_unlock(&device->mutex);
-
- return status;
-}
-
-/*
- wait_event_interruptible_timeout checks for the exit condition before
- placing a process in wait q. For conditional interrupts we expect the
- process to already be in its wait q when its exit condition checking
- function is called.
-*/
-#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
-({ \
- long __ret = timeout; \
- if (io) \
- __wait_io_event_interruptible_timeout(wq, condition, __ret);\
- else \
- __wait_event_interruptible_timeout(wq, condition, __ret);\
- __ret; \
-})
-
-
-
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned int curr_reg_val[FT_DETECT_REGS_COUNT];
- unsigned int fast_hang_detected = 1;
- unsigned int long_ib_detected = 1;
- unsigned int i;
- static unsigned long next_hang_detect_time;
- static unsigned int prev_global_ts;
- unsigned int curr_global_ts = 0;
- unsigned int curr_context_id = 0;
- static struct adreno_context *curr_context;
- static struct kgsl_context *context;
-
- if (!adreno_dev->fast_hang_detect)
- fast_hang_detected = 0;
-
- if (!adreno_dev->long_ib_detect)
- long_ib_detected = 0;
-
- if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
- return 0;
-
- if (is_adreno_rbbm_status_idle(device) &&
- (kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED)
- == rb->global_ts)) {
-
- /*
- * On A20X if the RPTR != WPTR and the device is idle, then
- * the last write to WPTR probably failed to latch so write it
- * again
- */
-
- if (adreno_is_a2xx(adreno_dev)) {
- unsigned int rptr;
- adreno_regread(device, REG_CP_RB_RPTR, &rptr);
- if (rptr != adreno_dev->ringbuffer.wptr)
- adreno_regwrite(device, REG_CP_RB_WPTR,
- adreno_dev->ringbuffer.wptr);
- }
-
- return 0;
- }
-
- /*
- * Time interval between hang detection should be KGSL_TIMEOUT_HANG_DETECT
- * or more, if next hang detection is requested < KGSL_TIMEOUT_HANG_DETECT
- * from the last time do nothing.
- */
- if ((next_hang_detect_time) &&
- (time_before(jiffies, next_hang_detect_time)))
- return 0;
- else
- next_hang_detect_time = (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_HANG_DETECT));
-
- /* Read the current Hang detect reg values here */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (ft_detect_regs[i] == 0)
- continue;
- adreno_regread(device, ft_detect_regs[i],
- &curr_reg_val[i]);
- }
-
- /* Read the current global timestamp here */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
-
- mb();
-
- if (curr_global_ts == prev_global_ts) {
-
- /* Get the current context here */
- if (context == NULL) {
- kgsl_sharedmem_readl(&device->memstore,
- &curr_context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- /* Make sure the memstore read has posted */
- mb();
-
- context = kgsl_context_get(device, curr_context_id);
- if (context != NULL) {
- curr_context = context->devctxt;
- curr_context->ib_gpu_time_used = 0;
- } else {
- KGSL_DRV_ERR(device,
- "Fault tolerance no context found\n");
- }
- }
-
- mb();
-
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (curr_reg_val[i] != prev_reg_val[i]) {
- fast_hang_detected = 0;
-
- /* Check for long IB here */
- if ((i >=
- LONG_IB_DETECT_REG_INDEX_START)
- &&
- (i <=
- LONG_IB_DETECT_REG_INDEX_END))
- long_ib_detected = 0;
- }
- }
-
- if (fast_hang_detected) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
- " on global ts %d\n",
- curr_context ? curr_context->pid_name : "",
- curr_context ? curr_context->id : 0,
- (kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED) + 1),
- curr_global_ts + 1);
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- return 1;
- }
-
- if (curr_context != NULL) {
-
- curr_context->ib_gpu_time_used += KGSL_TIMEOUT_HANG_DETECT;
- KGSL_FT_INFO(device,
- "Proc %s used GPU Time %d ms on timestamp 0x%X\n",
- curr_context->pid_name, curr_context->ib_gpu_time_used,
- curr_global_ts+1);
-
- if ((long_ib_detected) &&
- (!(curr_context->flags &
- CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
- if (curr_context->ib_gpu_time_used >
- KGSL_TIMEOUT_LONG_IB_DETECTION) {
- if (adreno_dev->long_ib_ts !=
- curr_global_ts) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d"
- "used GPU for %d ms long ib "
- "detected on global ts %d\n",
- curr_context->pid_name,
- curr_context->id,
- (kgsl_readtimestamp(device,
- context,
- KGSL_TIMESTAMP_RETIRED)+1),
- curr_context->ib_gpu_time_used,
- curr_global_ts+1);
- adreno_dev->long_ib = 1;
- adreno_dev->long_ib_ts =
- curr_global_ts;
- curr_context->ib_gpu_time_used =
- 0;
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- return 1;
- }
- }
- }
- }
- } else {
- /* GPU is moving forward */
- prev_global_ts = curr_global_ts;
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- adreno_dev->long_ib = 0;
- adreno_dev->long_ib_ts = 0;
- }
-
-
- /* If hangs are not detected copy the current reg values
- * to previous values and return no hang */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
- prev_reg_val[i] = curr_reg_val[i];
- return 0;
-}
-
-static int _check_pending_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int context_id = _get_context_id(context);
- unsigned int ts_issued;
-
- if (context_id == KGSL_CONTEXT_INVALID)
- return -EINVAL;
-
- ts_issued = adreno_context_timestamp(context, &adreno_dev->ringbuffer);
-
- if (timestamp_cmp(timestamp, ts_issued) <= 0)
- return 0;
-
- if (context && !context->wait_on_invalid_ts) {
- KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
- context_id, timestamp, context_id, ts_issued);
-
- /* Only print this message once */
- context->wait_on_invalid_ts = true;
- }
-
- return -EINVAL;
-}
-
-/**
- * adreno_waittimestamp - sleep while waiting for the specified timestamp
- * @device - pointer to a KGSL device structure
- * @context - pointer to the active kgsl context
- * @timestamp - GPU timestamp to wait for
- * @msecs - amount of time to wait (in milliseconds)
- *
- * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
- * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
- * one if it happened. Otherwise, spend most of our time in an interruptible
- * wait for the timestamp interrupt to be processed. This function must be
- * called with the mutex already held.
- */
-static int adreno_waittimestamp(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int timestamp,
- unsigned int msecs)
-{
- static unsigned int io_cnt;
- struct adreno_context *adreno_ctx = context ? context->devctxt : NULL;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int context_id = _get_context_id(context);
- unsigned int time_elapsed = 0;
- unsigned int wait;
- int ts_compare = 1;
- int io, ret = -ETIMEDOUT;
-
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- return -EINVAL;
- }
-
- /*
- * Check to see if the requested timestamp is "newer" then the last
- * timestamp issued. If it is complain once and return error. Only
- * print the message once per context so that badly behaving
- * applications don't spam the logs
- */
-
- if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
- if (_check_pending_timestamp(device, context, timestamp))
- return -EINVAL;
-
- /* Reset the invalid timestamp flag on a valid wait */
- context->wait_on_invalid_ts = false;
- }
-
- /*
- * On the first time through the loop only wait 100ms.
- * this gives enough time for the engine to start moving and oddly
- * provides better hang detection results than just going the full
- * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
- * is if msecs happens to be < 100ms then just use the full timeout
- */
-
- wait = 100;
-
- do {
- long status;
-
- /*
- * if the timestamp happens while we're not
- * waiting, there's a chance that an interrupt
- * will not be generated and thus the timestamp
- * work needs to be queued.
- */
-
- if (kgsl_check_timestamp(device, context, timestamp)) {
- queue_work(device->work_queue, &device->ts_expired_ws);
- ret = 0;
- break;
- }
-
- /*
- * For proper power accounting sometimes we need to call
- * io_wait_interruptible_timeout and sometimes we need to call
- * plain old wait_interruptible_timeout. We call the regular
- * timeout N times out of 100, where N is a number specified by
- * the current power level
- */
-
- io_cnt = (io_cnt + 1) % 100;
- io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
- ? 0 : 1;
-
- mutex_unlock(&device->mutex);
-
- /* Wait for a timestamp event */
- status = kgsl_wait_event_interruptible_timeout(
- device->wait_queue,
- adreno_check_interrupt_timestamp(device, context,
- timestamp), msecs_to_jiffies(wait), io);
-
- mutex_lock(&device->mutex);
-
- /*
- * If status is non zero then either the condition was satisfied
- * or there was an error. In either event, this is the end of
- * the line for us
- */
-
- if (status != 0) {
- ret = (status > 0) ? 0 : (int) status;
- break;
- }
- time_elapsed += wait;
-
-
- /* If user specified timestamps are being used, wait at least
- * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
- * issue a IB for a timestamp before checking to see if the
- * current timestamp we are waiting for is valid or not
- */
-
- if (ts_compare && (adreno_ctx &&
- (adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
- if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
- ret = _check_pending_timestamp(device, context,
- timestamp);
- if (ret)
- break;
-
- /* Don't do this check again */
- ts_compare = 0;
-
- /*
- * Reset the invalid timestamp flag on a valid
- * wait
- */
-
- context->wait_on_invalid_ts = false;
- }
- }
-
- /*
- * We want to wait the floor of KGSL_TIMEOUT_PART
- * and (msecs - time_elapsed).
- */
-
- if (KGSL_TIMEOUT_PART < (msecs - time_elapsed))
- wait = KGSL_TIMEOUT_PART;
- else
- wait = (msecs - time_elapsed);
-
- } while (!msecs || time_elapsed < msecs);
-
- return ret;
-}
-
-static unsigned int adreno_readtimestamp(struct kgsl_device *device,
- struct kgsl_context *context, enum kgsl_timestamp_type type)
-{
- unsigned int timestamp = 0;
- unsigned int context_id = _get_context_id(context);
-
- /*
- * If the context ID is invalid, we are in a race with
- * the context being destroyed by userspace so bail.
- */
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- return timestamp;
- }
- switch (type) {
- case KGSL_TIMESTAMP_QUEUED: {
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- timestamp = adreno_context_timestamp(context,
- &adreno_dev->ringbuffer);
- break;
- }
- case KGSL_TIMESTAMP_CONSUMED:
- kgsl_sharedmem_readl(&device->memstore, &timestamp,
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
- break;
- case KGSL_TIMESTAMP_RETIRED:
- kgsl_sharedmem_readl(&device->memstore, &timestamp,
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
- break;
- }
-
- rmb();
-
- return timestamp;
-}
-
-static long adreno_ioctl(struct kgsl_device_private *dev_priv,
- unsigned int cmd, void *data)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int result = 0;
-
- switch (cmd) {
- case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: {
- struct kgsl_drawctxt_set_bin_base_offset *binbase = data;
- struct kgsl_context *context;
-
- binbase = data;
-
- context = kgsl_context_get_owner(dev_priv,
- binbase->drawctxt_id);
- if (context) {
- adreno_drawctxt_set_bin_base_offset(
- device, context, binbase->offset);
- } else {
- result = -EINVAL;
- KGSL_DRV_ERR(device,
- "invalid drawctxt drawctxt_id %d "
- "device_id=%d\n",
- binbase->drawctxt_id, device->id);
- }
-
- kgsl_context_put(context);
- break;
- }
- case IOCTL_KGSL_PERFCOUNTER_GET: {
- struct kgsl_perfcounter_get *get = data;
- result = adreno_perfcounter_get(adreno_dev, get->groupid,
- get->countable, &get->offset, PERFCOUNTER_FLAG_NONE);
- break;
- }
- case IOCTL_KGSL_PERFCOUNTER_PUT: {
- struct kgsl_perfcounter_put *put = data;
- result = adreno_perfcounter_put(adreno_dev, put->groupid,
- put->countable);
- break;
- }
- case IOCTL_KGSL_PERFCOUNTER_QUERY: {
- struct kgsl_perfcounter_query *query = data;
- result = adreno_perfcounter_query_group(adreno_dev,
- query->groupid, query->countables,
- query->count, &query->max_counters);
- break;
- }
- case IOCTL_KGSL_PERFCOUNTER_READ: {
- struct kgsl_perfcounter_read *read = data;
- result = adreno_perfcounter_read_group(adreno_dev,
- read->reads, read->count);
- break;
- }
- default:
- KGSL_DRV_INFO(dev_priv->device,
- "invalid ioctl code %08x\n", cmd);
- result = -ENOIOCTLCMD;
- break;
- }
- return result;
-
-}
-
-static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
-{
- gpu_freq /= 1000000;
- return ticks / gpu_freq;
-}
-
-static void adreno_power_stats(struct kgsl_device *device,
- struct kgsl_power_stats *stats)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int cycles = 0;
-
- /*
- * Get the busy cycles counted since the counter was last reset.
- * If we're not currently active, there shouldn't have been
- * any cycles since the last time this function was called.
- */
- if (device->state == KGSL_STATE_ACTIVE)
- cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
-
- /*
- * In order to calculate idle you have to have run the algorithm
- * at least once to get a start time.
- */
- if (pwr->time != 0) {
- s64 tmp = ktime_to_us(ktime_get());
- stats->total_time = tmp - pwr->time;
- pwr->time = tmp;
- stats->busy_time = adreno_ticks_to_us(cycles, device->pwrctrl.
- pwrlevels[device->pwrctrl.active_pwrlevel].
- gpu_freq);
- } else {
- stats->total_time = 0;
- stats->busy_time = 0;
- pwr->time = ktime_to_us(ktime_get());
- }
-}
-
-void adreno_irqctrl(struct kgsl_device *device, int state)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- adreno_dev->gpudev->irq_control(adreno_dev, state);
-}
-
-static unsigned int adreno_gpuid(struct kgsl_device *device,
- unsigned int *chipid)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- /* Some applications need to know the chip ID too, so pass
- * that as a parameter */
-
- if (chipid != NULL)
- *chipid = adreno_dev->chip_id;
-
- /* Standard KGSL gpuid format:
- * top word is 0x0002 for 2D or 0x0003 for 3D
- * Bottom word is core specific identifer
- */
-
- return (0x0003 << 16) | ((int) adreno_dev->gpurev);
-}
-
-static const struct kgsl_functable adreno_functable = {
- /* Mandatory functions */
- .regread = adreno_regread,
- .regwrite = adreno_regwrite,
- .idle = adreno_idle,
- .isidle = adreno_isidle,
- .suspend_context = adreno_suspend_context,
- .init = adreno_init,
- .start = adreno_start,
- .stop = adreno_stop,
- .getproperty = adreno_getproperty,
- .waittimestamp = adreno_waittimestamp,
- .readtimestamp = adreno_readtimestamp,
- .issueibcmds = adreno_ringbuffer_issueibcmds,
- .ioctl = adreno_ioctl,
- .setup_pt = adreno_setup_pt,
- .cleanup_pt = adreno_cleanup_pt,
- .power_stats = adreno_power_stats,
- .irqctrl = adreno_irqctrl,
- .gpuid = adreno_gpuid,
- .snapshot = adreno_snapshot,
- .irq_handler = adreno_irq_handler,
- /* Optional functions */
- .setstate = adreno_setstate,
- .drawctxt_create = adreno_drawctxt_create,
- .drawctxt_destroy = adreno_drawctxt_destroy,
- .setproperty = adreno_setproperty,
- .postmortem_dump = adreno_dump,
- .next_event = adreno_next_event,
-};
-
-static struct platform_driver adreno_platform_driver = {
- .probe = adreno_probe,
- .remove = __devexit_p(adreno_remove),
- .suspend = kgsl_suspend_driver,
- .resume = kgsl_resume_driver,
- .id_table = adreno_id_table,
- .driver = {
- .owner = THIS_MODULE,
- .name = DEVICE_3D_NAME,
- .pm = &kgsl_pm_ops,
- .of_match_table = adreno_match_table,
- }
-};
-
-static int __init kgsl_3d_init(void)
-{
- return platform_driver_register(&adreno_platform_driver);
-}
-
-static void __exit kgsl_3d_exit(void)
-{
- platform_driver_unregister(&adreno_platform_driver);
-}
-
-module_init(kgsl_3d_init);
-module_exit(kgsl_3d_exit);
-
-MODULE_DESCRIPTION("3D Graphics driver");
-MODULE_VERSION("1.2");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kgsl_3d");
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
deleted file mode 100644
index a6d14ef..0000000
--- a/drivers/gpu/msm/adreno.h
+++ /dev/null
@@ -1,569 +0,0 @@
-/* Copyright (c) 2008-2012,2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __ADRENO_H
-#define __ADRENO_H
-
-#include "kgsl_device.h"
-#include "adreno_drawctxt.h"
-#include "adreno_ringbuffer.h"
-#include "kgsl_iommu.h"
-#include <mach/ocmem.h>
-
-#include "a3xx_reg.h"
-
-#define DEVICE_3D_NAME "kgsl-3d"
-#define DEVICE_3D0_NAME "kgsl-3d0"
-
-#define ADRENO_DEVICE(device) \
- KGSL_CONTAINER_OF(device, struct adreno_device, dev)
-
-#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
-#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
-#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
-#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
-
-/* Flags to control command packet settings */
-#define KGSL_CMD_FLAGS_NONE 0x00000000
-#define KGSL_CMD_FLAGS_PMODE 0x00000001
-#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
-#define KGSL_CMD_FLAGS_GET_INT 0x00000004
-#define KGSL_CMD_FLAGS_EOF 0x00000100
-
-/* Command identifiers */
-#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
-#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
-#define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
-#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
-#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
-#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
-#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
-#define KGSL_NOP_DATA_FILLER 0xFEEDFACE
-
-#ifdef CONFIG_MSM_SCM
-#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
-#elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
-#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
-#else
-#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
-#endif
-
-#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
-
-#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
-
-/* One cannot wait forever for the core to idle, so set an upper limit to the
- * amount of time to wait for the core to go idle
- */
-
-#define ADRENO_IDLE_TIMEOUT (20 * 1000)
-
-enum adreno_gpurev {
- ADRENO_REV_UNKNOWN = 0,
- ADRENO_REV_A200 = 200,
- ADRENO_REV_A203 = 203,
- ADRENO_REV_A205 = 205,
- ADRENO_REV_A220 = 220,
- ADRENO_REV_A225 = 225,
- ADRENO_REV_A305 = 305,
- ADRENO_REV_A320 = 320,
- ADRENO_REV_A330 = 330,
-};
-
-struct adreno_gpudev;
-
-struct adreno_device {
- struct kgsl_device dev; /* Must be first field in this struct */
- unsigned int chip_id;
- enum adreno_gpurev gpurev;
- unsigned long gmem_base;
- unsigned int gmem_size;
- struct adreno_context *drawctxt_active;
- const char *pfp_fwfile;
- unsigned int *pfp_fw;
- size_t pfp_fw_size;
- unsigned int pfp_fw_version;
- const char *pm4_fwfile;
- unsigned int *pm4_fw;
- size_t pm4_fw_size;
- unsigned int pm4_fw_version;
- struct adreno_ringbuffer ringbuffer;
- unsigned int mharb;
- struct adreno_gpudev *gpudev;
- unsigned int wait_timeout;
- unsigned int istore_size;
- unsigned int pix_shader_start;
- unsigned int instruction_size;
- unsigned int ib_check_level;
- unsigned int fast_hang_detect;
- unsigned int ft_policy;
- unsigned int ft_user_control;
- unsigned int long_ib_detect;
- unsigned int long_ib;
- unsigned int long_ib_ts;
- unsigned int ft_pf_policy;
- unsigned int gpulist_index;
- struct ocmem_buf *ocmem_hdl;
- unsigned int ocmem_base;
- unsigned int gpu_cycles;
-};
-
-#define PERFCOUNTER_FLAG_NONE 0x0
-#define PERFCOUNTER_FLAG_KERNEL 0x1
-
-/* Structs to maintain the list of active performance counters */
-
-/**
- * struct adreno_perfcount_register: register state
- * @countable: countable the register holds
- * @refcount: number of users of the register
- * @offset: register hardware offset
- */
-struct adreno_perfcount_register {
- unsigned int countable;
- unsigned int refcount;
- unsigned int offset;
- unsigned int flags;
-};
-
-/**
- * struct adreno_perfcount_group: registers for a hardware group
- * @regs: available registers for this group
- * @reg_count: total registers for this group
- */
-struct adreno_perfcount_group {
- struct adreno_perfcount_register *regs;
- unsigned int reg_count;
-};
-
-/**
- * adreno_perfcounts: all available perfcounter groups
- * @groups: available groups for this device
- * @group_count: total groups for this device
- */
-struct adreno_perfcounters {
- struct adreno_perfcount_group *groups;
- unsigned int group_count;
-};
-
-struct adreno_gpudev {
- /*
- * These registers are in a different location on A3XX, so define
- * them in the structure and use them as variables.
- */
- unsigned int reg_rbbm_status;
- unsigned int reg_cp_pfp_ucode_data;
- unsigned int reg_cp_pfp_ucode_addr;
- /* keeps track of when we need to execute the draw workaround code */
- int ctx_switches_since_last_draw;
-
- struct adreno_perfcounters *perfcounters;
-
- /* GPU specific function hooks */
- int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_draw_workaround)(struct adreno_device *,
- struct adreno_context *);
- irqreturn_t (*irq_handler)(struct adreno_device *);
- void (*irq_control)(struct adreno_device *, int);
- unsigned int (*irq_pending)(struct adreno_device *);
- void * (*snapshot)(struct adreno_device *, void *, int *, int);
- int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
- void (*perfcounter_init)(struct adreno_device *);
- void (*start)(struct adreno_device *);
- unsigned int (*busy_cycles)(struct adreno_device *);
- void (*perfcounter_enable)(struct adreno_device *, unsigned int group,
- unsigned int counter, unsigned int countable);
- uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
- unsigned int group, unsigned int counter,
- unsigned int offset);
-};
-
-/*
- * struct adreno_ft_data - Structure that contains all information to
- * perform gpu fault tolerance
- * @ib1 - IB1 that the GPU was executing when hang happened
- * @context_id - Context which caused the hang
- * @global_eop - eoptimestamp at time of hang
- * @rb_buffer - Buffer that holds the commands from good contexts
- * @rb_size - Number of valid dwords in rb_buffer
- * @bad_rb_buffer - Buffer that holds commands from the hanging context
- * bad_rb_size - Number of valid dwords in bad_rb_buffer
- * @good_rb_buffer - Buffer that holds commands from good contexts
- * good_rb_size - Number of valid dwords in good_rb_buffer
- * @last_valid_ctx_id - The last context from which commands were placed in
- * ringbuffer before the GPU hung
- * @step - Current fault tolerance step being executed
- * @err_code - Fault tolerance error code
- * @fault - Indicates whether the hang was caused due to a pagefault
- * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
- * replayed during fault tolerance
- * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
- * replaying with snapshot
- */
-struct adreno_ft_data {
- unsigned int ib1;
- unsigned int context_id;
- unsigned int global_eop;
- unsigned int *rb_buffer;
- unsigned int rb_size;
- unsigned int *bad_rb_buffer;
- unsigned int bad_rb_size;
- unsigned int *good_rb_buffer;
- unsigned int good_rb_size;
- unsigned int last_valid_ctx_id;
- unsigned int status;
- unsigned int ft_policy;
- unsigned int err_code;
- unsigned int start_of_replay_cmds;
- unsigned int replay_for_snapshot;
-};
-
-#define FT_DETECT_REGS_COUNT 12
-
-/* Fault Tolerance policy flags */
-#define KGSL_FT_DISABLE BIT(0)
-#define KGSL_FT_REPLAY BIT(1)
-#define KGSL_FT_SKIPIB BIT(2)
-#define KGSL_FT_SKIPFRAME BIT(3)
-#define KGSL_FT_TEMP_DISABLE BIT(4)
-#define KGSL_FT_DEFAULT_POLICY (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
-
-/* Pagefault policy flags */
-#define KGSL_FT_PAGEFAULT_INT_ENABLE BIT(0)
-#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE BIT(1)
-#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE BIT(2)
-#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT BIT(3)
-#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY (KGSL_FT_PAGEFAULT_INT_ENABLE + \
- KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
-
-extern struct adreno_gpudev adreno_a2xx_gpudev;
-extern struct adreno_gpudev adreno_a3xx_gpudev;
-
-/* A2XX register sets defined in adreno_a2xx.c */
-extern const unsigned int a200_registers[];
-extern const unsigned int a220_registers[];
-extern const unsigned int a225_registers[];
-extern const unsigned int a200_registers_count;
-extern const unsigned int a220_registers_count;
-extern const unsigned int a225_registers_count;
-
-/* A3XX register set defined in adreno_a3xx.c */
-extern const unsigned int a3xx_registers[];
-extern const unsigned int a3xx_registers_count;
-
-extern const unsigned int a3xx_hlsq_registers[];
-extern const unsigned int a3xx_hlsq_registers_count;
-
-extern const unsigned int a330_registers[];
-extern const unsigned int a330_registers_count;
-
-extern unsigned int ft_detect_regs[];
-
-
-int adreno_idle(struct kgsl_device *device);
-void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
- unsigned int *value);
-void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
- unsigned int value);
-
-int adreno_dump(struct kgsl_device *device, int manual);
-unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
- *adreno_dev);
-
-struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
- unsigned int pt_base,
- unsigned int gpuaddr,
- unsigned int size);
-
-uint8_t *adreno_convertaddr(struct kgsl_device *device,
- unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
-
-struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
- unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
-
-void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
- int hang);
-
-int adreno_dump_and_exec_ft(struct kgsl_device *device);
-
-void adreno_dump_rb(struct kgsl_device *device, const void *buf,
- size_t len, int start, int size);
-
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val);
-
-int adreno_perfcounter_get(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable, unsigned int *offset,
- unsigned int flags);
-
-int adreno_perfcounter_put(struct adreno_device *adreno_dev,
- unsigned int groupid, unsigned int countable);
-
-int adreno_ft_init_sysfs(struct kgsl_device *device);
-void adreno_ft_uninit_sysfs(struct kgsl_device *device);
-
-static inline int adreno_is_a200(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A200);
-}
-
-static inline int adreno_is_a203(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A203);
-}
-
-static inline int adreno_is_a205(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A205);
-}
-
-static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev <= 209);
-}
-
-static inline int adreno_is_a220(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A220);
-}
-
-static inline int adreno_is_a225(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A225);
-}
-
-static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A220 ||
- adreno_dev->gpurev == ADRENO_REV_A225);
-}
-
-static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev <= 299);
-}
-
-static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev >= 300);
-}
-
-static inline int adreno_is_a305(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A305);
-}
-
-static inline int adreno_is_a320(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A320);
-}
-
-static inline int adreno_is_a330(struct adreno_device *adreno_dev)
-{
- return (adreno_dev->gpurev == ADRENO_REV_A330);
-}
-
-static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
-{
- return ((adreno_dev->gpurev == ADRENO_REV_A330) &&
- (ADRENO_CHIPID_PATCH(adreno_dev->chip_id) > 0));
-}
-
-static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
-{
- return (cmd[0] == cp_nop_packet(1) &&
- cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
-}
-
-static inline int adreno_context_timestamp(struct kgsl_context *k_ctxt,
- struct adreno_ringbuffer *rb)
-{
- struct adreno_context *a_ctxt = NULL;
-
- if (k_ctxt)
- a_ctxt = k_ctxt->devctxt;
-
- if (a_ctxt && a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- return a_ctxt->timestamp;
-
- return rb->global_ts;
-}
-
-/**
- * adreno_encode_istore_size - encode istore size in CP format
- * @adreno_dev - The 3D device.
- *
- * Encode the istore size into the format expected that the
- * CP_SET_SHADER_BASES and CP_ME_INIT commands:
- * bits 31:29 - istore size as encoded by this function
- * bits 27:16 - vertex shader start offset in instructions
- * bits 11:0 - pixel shader start offset in instructions.
- */
-static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
-{
- unsigned int size;
- /* in a225 the CP microcode multiplies the encoded
- * value by 3 while decoding.
- */
- if (adreno_is_a225(adreno_dev))
- size = adreno_dev->istore_size/3;
- else
- size = adreno_dev->istore_size;
-
- return (ilog2(size) - 5) << 29;
-}
-
-static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
- unsigned int nop_gpuaddr)
-{
- /* Adding an indirect buffer ensures that the prefetch stalls until
- * the commands in indirect buffer have completed. We need to stall
- * prefetch with a nop indirect buffer when updating pagetables
- * because it provides stabler synchronization */
- *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
- *cmds++ = nop_gpuaddr;
- *cmds++ = 2;
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
- return 5;
-}
-
-static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
- unsigned int new_phys_limit,
- unsigned int nop_gpuaddr)
-{
- unsigned int *start = cmds;
-
- *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
- *cmds++ = new_phys_limit;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
- return cmds - start;
-}
-
-static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
- int cur_ctx_bank,
- unsigned int nop_gpuaddr)
-{
- unsigned int *start = cmds;
-
- *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
- *cmds++ = (cur_ctx_bank ? 0 : 0x20);
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
- return cmds - start;
-}
-
-/*
- * adreno_read_cmds - Add pm4 packets to perform read
- * @device - Pointer to device structure
- * @cmds - Pointer to memory where read commands need to be added
- * @addr - gpu address of the read
- * @val - The GPU will wait until the data at address addr becomes
- * equal to value
- */
-static inline int adreno_add_read_cmds(struct kgsl_device *device,
- unsigned int *cmds, unsigned int addr,
- unsigned int val, unsigned int nop_gpuaddr)
-{
- unsigned int *start = cmds;
-
- *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
- /* MEM SPACE = memory, FUNCTION = equals */
- *cmds++ = 0x13;
- *cmds++ = addr;
- *cmds++ = val;
- *cmds++ = 0xFFFFFFFF;
- *cmds++ = 0xFFFFFFFF;
-
- /* WAIT_REG_MEM turns back on protected mode - push it off */
- *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
- *cmds++ = 0;
-
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
- return cmds - start;
-}
-
-/*
- * adreno_idle_cmds - Add pm4 packets for GPU idle
- * @adreno_dev - Pointer to device structure
- * @cmds - Pointer to memory where idle commands need to be added
- */
-static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
- unsigned int *cmds)
-{
- unsigned int *start = cmds;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
- (adreno_dev->gpurev == ADRENO_REV_A320)) {
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
- *cmds++ = 0x00000000;
- }
-
- return cmds - start;
-}
-
-#ifdef CONFIG_DEBUG_FS
-void adreno_debugfs_init(struct kgsl_device *device);
-#else
-static inline void adreno_debugfs_init(struct kgsl_device *device) { }
-#endif
-
-/**
- * adreno_set_protected_registers() - Protect the specified range of registers
- * from being accessed by the GPU
- * @device: pointer to the KGSL device
- * @index: Pointer to the index of the protect mode register to write to
- * @reg: Starting dword register to write
- * @mask: Size of the mask to protect (A3xx# of registers = 2 ** mask,
- * A2xx# Contains the address mask used to mask the protect base address)
- *
- * Add the range of registers to the list of protected mode registers that will
- * cause an exception if the GPU accesses them. There are 16 available
- * protected mode registers. Index is used to specify which register to write
- * to - the intent is to call this function multiple times with the same index
- * pointer for each range and the registers will be magically programmed in
- * incremental fashion
- */
-static inline void adreno_set_protected_registers(struct kgsl_device *device,
- unsigned int *index, unsigned int reg, int mask)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int val;
- unsigned int protect_reg_offset;
-
- /* There are only 16 registers available */
- BUG_ON(*index >= 16);
-
- if (adreno_is_a3xx(adreno_dev)) {
- val = 0x60000000 | ((mask & 0x1F) << 24) |
- ((reg << 2) & 0x1FFFF);
- protect_reg_offset = A3XX_CP_PROTECT_REG_0;
- } else if (adreno_is_a2xx(adreno_dev)) {
- val = 0xc0000000 | ((reg << 2) << 16) | (mask & 0xffff);
- protect_reg_offset = REG_RBBM_PROTECT_0;
- } else {
- return;
- }
-
- /*
- * Write the protection range to the next available protection
- * register
- */
-
- kgsl_regwrite(device, protect_reg_offset + *index, val);
- *index = *index + 1;
-}
-#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
deleted file mode 100644
index 05aa505..0000000
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ /dev/null
@@ -1,2114 +0,0 @@
-/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <mach/socinfo.h>
-
-#include "kgsl.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_cffdump.h"
-#include "adreno.h"
-#include "adreno_a2xx_trace.h"
-
-/*
- * These are the registers that are dumped with GPU snapshot
- * and postmortem. The lists are dword offset pairs in the
- * form of {start offset, end offset} inclusive.
- */
-
-/* A200, A205 */
-const unsigned int a200_registers[] = {
- 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
- 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
- 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
- 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
- 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
- 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
- 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
- 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
- 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
- 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
- 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
- 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
- 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
- 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
- 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
- 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
- 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
- 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
- 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
- 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
- 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
- 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
- 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
- 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
- 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
-};
-
-const unsigned int a220_registers[] = {
- 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
- 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
- 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
- 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
- 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
- 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
- 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
- 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
- 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
- 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
- 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
- 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
- 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
- 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
- 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
- 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
- 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
- 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
- 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
- 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
- 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
- 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
- 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
- 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
- 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
- 0x4900, 0x4900, 0x4908, 0x4908,
-};
-
-const unsigned int a225_registers[] = {
- 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
- 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
- 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
- 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
- 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
- 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
- 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
- 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
- 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
- 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
- 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
- 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
- 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
- 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
- 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
- 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
- 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
- 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
- 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
- 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
- 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
- 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
- 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
- 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
- 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
- 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
- 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
- 0x4908, 0x4908,
-};
-
-const unsigned int a200_registers_count = ARRAY_SIZE(a200_registers) / 2;
-const unsigned int a220_registers_count = ARRAY_SIZE(a220_registers) / 2;
-const unsigned int a225_registers_count = ARRAY_SIZE(a225_registers) / 2;
-
-/*
- *
- * Memory Map for Register, Constant & Instruction Shadow, and Command Buffers
- * (34.5KB)
- *
- * +---------------------+------------+-------------+---+---------------------+
- * | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow |
- * +---------------------+------------+-------------+---+---------------------+
- * ________________________________/ \____________________
- * / |
- * +--------------+-----------+------+-----------+------------------------+
- * | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused |
- * +--------------+-----------+------+-----------+------------------------+
- *
- * 8K - ALU Constant Shadow (8K aligned)
- * 4K - H/W Register Shadow (8K aligned)
- * 4K - Command and Vertex Buffers
- * - Indirect command buffer : Const/Reg restore
- * - includes Loop & Bool const shadows
- * - Indirect command buffer : Const/Reg save
- * - Quad vertices & texture coordinates
- * - Indirect command buffer : Gmem save
- * - Indirect command buffer : Gmem restore
- * - Unused (padding to 8KB boundary)
- * <1K - Texture Constant Shadow (768 bytes) (8K aligned)
- * 18K - Shader Instruction Shadow
- * - 6K vertex (32 byte aligned)
- * - 6K pixel (32 byte aligned)
- * - 6K shared (32 byte aligned)
- *
- * Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes
- * 3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of
- * 16 bytes per constant. If the texture constants were transfered this way,
- * the Command & Vertex Buffers section would extend past the 16K boundary.
- * By moving the texture constant shadow area to start at 16KB boundary, we
- * only require approximately 40 bytes more memory, but are able to use the
- * LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up
- * context switching.
- *
- * [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool
- * constants would require an additional 8KB each, for alignment.]
- *
- */
-
-/* Constants */
-
-#define ALU_CONSTANTS 2048 /* DWORDS */
-#define NUM_REGISTERS 1024 /* DWORDS */
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
-#define CMD_BUFFER_LEN 9216 /* DWORDS */
-#else
-#define CMD_BUFFER_LEN 3072 /* DWORDS */
-#endif
-#define TEX_CONSTANTS (32*6) /* DWORDS */
-#define BOOL_CONSTANTS 8 /* DWORDS */
-#define LOOP_CONSTANTS 56 /* DWORDS */
-
-/* LOAD_CONSTANT_CONTEXT shadow size */
-#define LCC_SHADOW_SIZE 0x2000 /* 8KB */
-
-#define ALU_SHADOW_SIZE LCC_SHADOW_SIZE /* 8KB */
-#define REG_SHADOW_SIZE 0x1000 /* 4KB */
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
-#define CMD_BUFFER_SIZE 0x9000 /* 36KB */
-#else
-#define CMD_BUFFER_SIZE 0x3000 /* 12KB */
-#endif
-#define TEX_SHADOW_SIZE (TEX_CONSTANTS*4) /* 768 bytes */
-
-#define REG_OFFSET LCC_SHADOW_SIZE
-#define CMD_OFFSET (REG_OFFSET + REG_SHADOW_SIZE)
-#define TEX_OFFSET (CMD_OFFSET + CMD_BUFFER_SIZE)
-#define SHADER_OFFSET ((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
-
-static inline int _shader_shadow_size(struct adreno_device *adreno_dev)
-{
- return adreno_dev->istore_size *
- (adreno_dev->instruction_size * sizeof(unsigned int));
-}
-
-static inline int _context_size(struct adreno_device *adreno_dev)
-{
- return SHADER_OFFSET + 3*_shader_shadow_size(adreno_dev);
-}
-
-/* A scratchpad used to build commands during context create */
-
-static struct tmp_ctx {
- unsigned int *start; /* Command & Vertex buffer start */
- unsigned int *cmd; /* Next available dword in C&V buffer */
-
- /* address of buffers, needed when creating IB1 command buffers. */
- uint32_t bool_shadow; /* bool constants */
- uint32_t loop_shadow; /* loop constants */
-
- uint32_t shader_shared; /* shared shader instruction shadow */
- uint32_t shader_vertex; /* vertex shader instruction shadow */
- uint32_t shader_pixel; /* pixel shader instruction shadow */
-
- /* Addresses in command buffer where separately handled registers
- * are saved
- */
- uint32_t reg_values[33];
- uint32_t chicken_restore;
-
- uint32_t gmem_base; /* Base gpu address of GMEM */
-
-} tmp_ctx;
-
-/* context save (gmem -> sys) */
-
-/* pre-compiled vertex shader program
-*
-* attribute vec4 P;
-* void main(void)
-* {
-* gl_Position = P;
-* }
-*/
-#define GMEM2SYS_VTX_PGM_LEN 0x12
-
-static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
- 0x00011003, 0x00001000, 0xc2000000,
- 0x00001004, 0x00001000, 0xc4000000,
- 0x00001005, 0x00002000, 0x00000000,
- 0x1cb81000, 0x00398a88, 0x00000003,
- 0x140f803e, 0x00000000, 0xe2010100,
- 0x14000000, 0x00000000, 0xe2000000
-};
-
-/* pre-compiled fragment shader program
-*
-* precision highp float;
-* uniform vec4 clear_color;
-* void main(void)
-* {
-* gl_FragColor = clear_color;
-* }
-*/
-
-#define GMEM2SYS_FRAG_PGM_LEN 0x0c
-
-static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
- 0x00000000, 0x1002c400, 0x10000000,
- 0x00001003, 0x00002000, 0x00000000,
- 0x140f8000, 0x00000000, 0x22000000,
- 0x14000000, 0x00000000, 0xe2000000
-};
-
-/* context restore (sys -> gmem) */
-/* pre-compiled vertex shader program
-*
-* attribute vec4 position;
-* attribute vec4 texcoord;
-* varying vec4 texcoord0;
-* void main()
-* {
-* gl_Position = position;
-* texcoord0 = texcoord;
-* }
-*/
-
-#define SYS2GMEM_VTX_PGM_LEN 0x18
-
-static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
- 0x00052003, 0x00001000, 0xc2000000, 0x00001005,
- 0x00001000, 0xc4000000, 0x00001006, 0x10071000,
- 0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
- 0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
- 0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
- 0xe2020200, 0x14000000, 0x00000000, 0xe2000000
-};
-
-/* pre-compiled fragment shader program
-*
-* precision mediump float;
-* uniform sampler2D tex0;
-* varying vec4 texcoord0;
-* void main()
-* {
-* gl_FragColor = texture2D(tex0, texcoord0.xy);
-* }
-*/
-
-#define SYS2GMEM_FRAG_PGM_LEN 0x0f
-
-static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
- 0x00011002, 0x00001000, 0xc4000000, 0x00001003,
- 0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
- 0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
- 0x14000000, 0x00000000, 0xe2000000
-};
-
-/* shader texture constants (sysmem -> gmem) */
-#define SYS2GMEM_TEX_CONST_LEN 6
-
-static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = {
- /* Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat,
- * RFMode=ZeroClamp-1, Dim=1:2d
- */
- 0x00000002, /* Pitch = TBD */
-
- /* Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0,
- * NearestClamp=1:OGL Mode
- */
- 0x00000800, /* Address[31:12] = TBD */
-
- /* Width, Height, EndianSwap=0:None */
- 0, /* Width & Height = TBD */
-
- /* NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point,
- * Mip=2:BaseMap
- */
- 0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
-
- /* VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0,
- * Dim3d=0
- */
- 0,
-
- /* BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0,
- * Dim=1:2d, MipPacking=0
- */
- 1 << 9 /* Mip Address[31:12] = TBD */
-};
-
-#define NUM_COLOR_FORMATS 13
-
-static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = {
- FMT_4_4_4_4, /* COLORX_4_4_4_4 */
- FMT_1_5_5_5, /* COLORX_1_5_5_5 */
- FMT_5_6_5, /* COLORX_5_6_5 */
- FMT_8, /* COLORX_8 */
- FMT_8_8, /* COLORX_8_8 */
- FMT_8_8_8_8, /* COLORX_8_8_8_8 */
- FMT_8_8_8_8, /* COLORX_S8_8_8_8 */
- FMT_16_FLOAT, /* COLORX_16_FLOAT */
- FMT_16_16_FLOAT, /* COLORX_16_16_FLOAT */
- FMT_16_16_16_16_FLOAT, /* COLORX_16_16_16_16_FLOAT */
- FMT_32_FLOAT, /* COLORX_32_FLOAT */
- FMT_32_32_FLOAT, /* COLORX_32_32_FLOAT */
- FMT_32_32_32_32_FLOAT, /* COLORX_32_32_32_32_FLOAT */
-};
-
-static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = {
- 2, /* COLORX_4_4_4_4 */
- 2, /* COLORX_1_5_5_5 */
- 2, /* COLORX_5_6_5 */
- 1, /* COLORX_8 */
- 2, /* COLORX_8_8 8*/
- 4, /* COLORX_8_8_8_8 */
- 4, /* COLORX_S8_8_8_8 */
- 2, /* COLORX_16_FLOAT */
- 4, /* COLORX_16_16_FLOAT */
- 8, /* COLORX_16_16_16_16_FLOAT */
- 4, /* COLORX_32_FLOAT */
- 8, /* COLORX_32_32_FLOAT */
- 16, /* COLORX_32_32_32_32_FLOAT */
-};
-
-/* shader linkage info */
-#define SHADER_CONST_ADDR (11 * 6 + 3)
-
-
-static unsigned int *program_shader(unsigned int *cmds, int vtxfrag,
- unsigned int *shader_pgm, int dwords)
-{
- /* load the patched vertex shader stream */
- *cmds++ = cp_type3_packet(CP_IM_LOAD_IMMEDIATE, 2 + dwords);
- /* 0=vertex shader, 1=fragment shader */
- *cmds++ = vtxfrag;
- /* instruction start & size (in 32-bit words) */
- *cmds++ = ((0 << 16) | dwords);
-
- memcpy(cmds, shader_pgm, dwords << 2);
- cmds += dwords;
-
- return cmds;
-}
-
-static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst,
- uint32_t src, int dwords)
-{
- while (dwords-- > 0) {
- *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmds++ = src++;
- *cmds++ = dst;
- dst += 4;
- }
-
- return cmds;
-}
-
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
-
-static void build_reg_to_mem_range(unsigned int start, unsigned int end,
- unsigned int **cmd,
- struct adreno_context *drawctxt)
-{
- unsigned int i = start;
-
- for (i = start; i <= end; i++) {
- *(*cmd)++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *(*cmd)++ = i;
- *(*cmd)++ =
- ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
- (i - 0x2000) * 4;
- }
-}
-
-#endif
-
-/* chicken restore */
-static unsigned int *build_chicken_restore_cmds(
- struct adreno_context *drawctxt)
-{
- unsigned int *start = tmp_ctx.cmd;
- unsigned int *cmds = start;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0;
-
- *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
- tmp_ctx.chicken_restore = virt2gpu(cmds, &drawctxt->gpustate);
- *cmds++ = 0x00000000;
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
-
- return cmds;
-}
-
-/****************************************************************************/
-/* context save */
-/****************************************************************************/
-
-static const unsigned int register_ranges_a20x[] = {
- REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
- REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
- REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
- REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
- REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
- REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
- REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
- REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
- REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
- REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR,
- REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL,
- REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE,
- REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY,
- REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR
-};
-
-static const unsigned int register_ranges_a220[] = {
- REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
- REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
- REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
- REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
- REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
- REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
- REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
- REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
- REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
- REG_A220_PC_MAX_VTX_INDX, REG_A220_PC_INDX_OFFSET,
- REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
- REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
- REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
- REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
- REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
- REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
- REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
- REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR
-};
-
-static const unsigned int register_ranges_a225[] = {
- REG_RB_SURFACE_INFO, REG_A225_RB_COLOR_INFO3,
- REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
- REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
- REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
- REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
- REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
- REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
- REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
- REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
- REG_A220_PC_MAX_VTX_INDX, REG_A225_PC_MULTI_PRIM_IB_RESET_INDX,
- REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
- REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
- REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
- REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
- REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
- REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
- REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
- REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR,
- REG_A225_GRAS_UCP0X, REG_A225_GRAS_UCP5W,
- REG_A225_GRAS_UCP_ENABLED, REG_A225_GRAS_UCP_ENABLED
-};
-
-
-/* save h/w regs, alu constants, texture contants, etc. ...
-* requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr
-*/
-static void build_regsave_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *start = tmp_ctx.cmd;
- unsigned int *cmd = start;
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /* Make sure the HW context has the correct register values
- * before reading them. */
- *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
- *cmd++ = 0;
-
- {
- unsigned int i = 0;
- unsigned int reg_array_size = 0;
- const unsigned int *ptr_register_ranges;
-
- /* Based on chip id choose the register ranges */
- if (adreno_is_a220(adreno_dev)) {
- ptr_register_ranges = register_ranges_a220;
- reg_array_size = ARRAY_SIZE(register_ranges_a220);
- } else if (adreno_is_a225(adreno_dev)) {
- ptr_register_ranges = register_ranges_a225;
- reg_array_size = ARRAY_SIZE(register_ranges_a225);
- } else {
- ptr_register_ranges = register_ranges_a20x;
- reg_array_size = ARRAY_SIZE(register_ranges_a20x);
- }
-
-
- /* Write HW registers into shadow */
- for (i = 0; i < (reg_array_size/2) ; i++) {
- build_reg_to_mem_range(ptr_register_ranges[i*2],
- ptr_register_ranges[i*2+1],
- &cmd, drawctxt);
- }
- }
-
- /* Copy ALU constants */
- cmd =
- reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000,
- REG_SQ_CONSTANT_0, ALU_CONSTANTS);
-
- /* Copy Tex constants */
- cmd =
- reg_to_mem(cmd,
- (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000,
- REG_SQ_FETCH_0, TEX_CONSTANTS);
-#else
-
- /* Insert a wait for idle packet before reading the registers.
- * This is to fix a hang/reset seen during stress testing. In this
- * hang, CP encountered a timeout reading SQ's boolean constant
- * register. There is logic in the HW that blocks reading of this
- * register when the SQ block is not idle, which we believe is
- * contributing to the hang.*/
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- /* H/w registers are already shadowed; just need to disable shadowing
- * to prevent corruption.
- */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
- *cmd++ = 4 << 16; /* regs, start=0 */
- *cmd++ = 0x0; /* count = 0 */
-
- /* ALU constants are already shadowed; just need to disable shadowing
- * to prevent corruption.
- */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
- *cmd++ = 0 << 16; /* ALU, start=0 */
- *cmd++ = 0x0; /* count = 0 */
-
- /* Tex constants are already shadowed; just need to disable shadowing
- * to prevent corruption.
- */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
- *cmd++ = 1 << 16; /* Tex, start=0 */
- *cmd++ = 0x0; /* count = 0 */
-#endif
-
- /* Need to handle some of the registers separately */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = REG_SQ_GPR_MANAGEMENT;
- *cmd++ = tmp_ctx.reg_values[0];
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = REG_TP0_CHICKEN;
- *cmd++ = tmp_ctx.reg_values[1];
-
- if (adreno_is_a22x(adreno_dev)) {
- unsigned int i;
- unsigned int j = 2;
- for (i = REG_A220_VSC_BIN_SIZE; i <=
- REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = i;
- *cmd++ = tmp_ctx.reg_values[j];
- j++;
- }
- }
-
- /* Copy Boolean constants */
- cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS,
- BOOL_CONSTANTS);
-
- /* Copy Loop constants */
- cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow,
- REG_SQ_CF_LOOP, LOOP_CONSTANTS);
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-/*copy colour, depth, & stencil buffers from graphics memory to system memory*/
-static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt,
- struct gmem_shadow_t *shadow)
-{
- unsigned int *cmds = shadow->gmem_save_commands;
- unsigned int *start = cmds;
- /* Calculate the new offset based on the adjusted base */
- unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
- unsigned int addr = shadow->gmemshadow.gpuaddr;
- unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
-
- if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
- /* Store TP0_CHICKEN register */
- *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmds++ = REG_TP0_CHICKEN;
-
- *cmds++ = tmp_ctx.chicken_restore;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0;
- }
-
- /* Set TP0_CHICKEN to zero */
- *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
- *cmds++ = 0x00000000;
-
- /* Set PA_SC_AA_CONFIG to 0 */
- *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
- *cmds++ = 0x00000000;
-
- /* program shader */
-
- /* load shader vtx constants ... 5 dwords */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
- *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
- *cmds++ = 0;
- /* valid(?) vtx constant flag & addr */
- *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
- /* limit = 12 dwords */
- *cmds++ = 0x00000030;
-
- /* Invalidate L2 cache to make sure vertices are updated */
- *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
- *cmds++ = 0x1;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
- *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
- *cmds++ = 0x00ffffff; /* REG_VGT_MAX_VTX_INDX */
- *cmds++ = 0x0; /* REG_VGT_MIN_VTX_INDX */
- *cmds++ = 0x00000000; /* REG_VGT_INDX_OFFSET */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
- *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLORCONTROL);
- *cmds++ = 0x00000c20;
-
- /* Repartition shaders */
- *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
- *cmds++ = adreno_dev->pix_shader_start;
-
- /* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmds++ = 0x00003F00;
-
- *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
- *cmds++ = adreno_encode_istore_size(adreno_dev)
- | adreno_dev->pix_shader_start;
-
- /* load the patched vertex shader stream */
- cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
-
- /* Load the patched fragment shader stream */
- cmds =
- program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
-
- /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
- if (adreno_is_a22x(adreno_dev))
- *cmds++ = 0x10018001;
- else
- *cmds++ = 0x10010001;
- *cmds++ = 0x00000008;
-
- /* resolve */
-
- /* PA_CL_VTE_CNTL */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
- /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
- *cmds++ = 0x00000b00;
-
- /* program surface info */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
- *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
-
- /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
- * Base=gmem_base
- */
- /* gmem base assumed 4K aligned. */
- BUG_ON(tmp_ctx.gmem_base & 0xFFF);
- *cmds++ =
- (shadow->
- format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
-
- /* disable Z */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
- if (adreno_is_a22x(adreno_dev))
- *cmds++ = 0x08;
- else
- *cmds++ = 0;
-
- /* set REG_PA_SU_SC_MODE_CNTL
- * Front_ptype = draw triangles
- * Back_ptype = draw triangles
- * Provoking vertex = last
- */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
- *cmds++ = 0x00080240;
-
- /* Use maximum scissor values -- quad vertices already have the
- * correct bounds */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
- *cmds++ = (0 << 16) | 0;
- *cmds++ = (0x1fff << 16) | (0x1fff);
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
- *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
- *cmds++ = (0x1fff << 16) | (0x1fff);
-
- /* load the viewport so that z scale = clear depth and
- * z offset = 0.0f
- */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
- *cmds++ = 0xbf800000; /* -1.0f */
- *cmds++ = 0x0;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLOR_MASK);
- *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
- *cmds++ = 0xffffffff;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
-
- /* load the stencil ref value
- * $AAM - do this later
- */
-
- /* load the COPY state */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
- *cmds++ = CP_REG(REG_RB_COPY_CONTROL);
- *cmds++ = 0; /* RB_COPY_CONTROL */
- *cmds++ = addr & 0xfffff000; /* RB_COPY_DEST_BASE */
- *cmds++ = shadow->pitch >> 5; /* RB_COPY_DEST_PITCH */
-
- /* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,
- * MaskWrite:R=G=B=A=1
- */
- *cmds++ = 0x0003c008 |
- (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
- /* Make sure we stay in offsetx field. */
- BUG_ON(offset & 0xfffff000);
- *cmds++ = offset;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_MODECONTROL);
- *cmds++ = 0x6; /* EDRAM copy */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
- *cmds++ = 0x00010000;
-
- if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
- *cmds++ = 0x0000000;
-
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
- *cmds++ = 0; /* viz query info. */
- /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
- *cmds++ = 0x00004088;
- *cmds++ = 3; /* NumIndices=3 */
- } else {
- /* queue the draw packet */
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
- *cmds++ = 0; /* viz query info. */
- /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
- *cmds++ = 0x00030088;
- }
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, shadow->gmem_save, start, cmds);
-
- return cmds;
-}
-
-/* context restore */
-
-/*copy colour, depth, & stencil buffers from system memory to graphics memory*/
-static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt,
- struct gmem_shadow_t *shadow)
-{
- unsigned int *cmds = shadow->gmem_restore_commands;
- unsigned int *start = cmds;
-
- if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
- /* Store TP0_CHICKEN register */
- *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmds++ = REG_TP0_CHICKEN;
- *cmds++ = tmp_ctx.chicken_restore;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0;
- }
-
- /* Set TP0_CHICKEN to zero */
- *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
- *cmds++ = 0x00000000;
-
- /* Set PA_SC_AA_CONFIG to 0 */
- *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
- *cmds++ = 0x00000000;
- /* shader constants */
-
- /* vertex buffer constants */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
-
- *cmds++ = (0x1 << 16) | (9 * 6);
- /* valid(?) vtx constant flag & addr */
- *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
- /* limit = 12 dwords */
- *cmds++ = 0x00000030;
- /* valid(?) vtx constant flag & addr */
- *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
- /* limit = 8 dwords */
- *cmds++ = 0x00000020;
- *cmds++ = 0;
- *cmds++ = 0;
-
- /* Invalidate L2 cache to make sure vertices are updated */
- *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
- *cmds++ = 0x1;
-
- cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
-
- /* Repartition shaders */
- *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
- *cmds++ = adreno_dev->pix_shader_start;
-
- /* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmds++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
-
- *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
- *cmds++ = adreno_encode_istore_size(adreno_dev)
- | adreno_dev->pix_shader_start;
-
- /* Load the patched fragment shader stream */
- cmds =
- program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
-
- /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
- *cmds++ = 0x10030002;
- *cmds++ = 0x00000008;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
- *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
-
- if (!adreno_is_a22x(adreno_dev)) {
- /* PA_SC_VIZ_QUERY */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SC_VIZ_QUERY);
- *cmds++ = 0x0; /*REG_PA_SC_VIZ_QUERY */
- }
-
- /* RB_COLORCONTROL */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLORCONTROL);
- *cmds++ = 0x00000c20;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
- *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
- *cmds++ = 0x00ffffff; /* mmVGT_MAX_VTX_INDX */
- *cmds++ = 0x0; /* mmVGT_MIN_VTX_INDX */
- *cmds++ = 0x00000000; /* mmVGT_INDX_OFFSET */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
- *cmds++ = 0x00000002; /* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
- *cmds++ = 0x00000002; /* mmVGT_OUT_DEALLOC_CNTL */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_SQ_INTERPOLATOR_CNTL);
- *cmds++ = 0xffffffff; /* mmSQ_INTERPOLATOR_CNTL */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SC_AA_CONFIG);
- *cmds++ = 0x00000000; /* REG_PA_SC_AA_CONFIG */
-
- /* set REG_PA_SU_SC_MODE_CNTL
- * Front_ptype = draw triangles
- * Back_ptype = draw triangles
- * Provoking vertex = last
- */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
- *cmds++ = 0x00080240;
-
- /* texture constants */
- *cmds++ =
- cp_type3_packet(CP_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
- *cmds++ = (0x1 << 16) | (0 * 6);
- memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
- cmds[0] |= (shadow->pitch >> 5) << 22;
- cmds[1] |=
- shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
- cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
- cmds += SYS2GMEM_TEX_CONST_LEN;
-
- /* program surface info */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
- *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
-
- /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
- * Base=gmem_base
- */
- *cmds++ =
- (shadow->
- format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
-
- /* RB_DEPTHCONTROL */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
-
- if (adreno_is_a22x(adreno_dev))
- *cmds++ = 8; /* disable Z */
- else
- *cmds++ = 0; /* disable Z */
-
- /* Use maximum scissor values -- quad vertices already
- * have the correct bounds */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
- *cmds++ = (0 << 16) | 0;
- *cmds++ = ((0x1fff) << 16) | 0x1fff;
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
- *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
- *cmds++ = ((0x1fff) << 16) | 0x1fff;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
- /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
- *cmds++ = 0x00000b00;
-
- /*load the viewport so that z scale = clear depth and z offset = 0.0f */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
- *cmds++ = 0xbf800000;
- *cmds++ = 0x0;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLOR_MASK);
- *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
- *cmds++ = 0xffffffff;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
-
- /* load the stencil ref value
- * $AAM - do this later
- */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_RB_MODECONTROL);
- /* draw pixels with color and depth/stencil component */
- *cmds++ = 0x4;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
- *cmds++ = 0x00010000;
-
- if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
- *cmds++ = 0x0000000;
-
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
- *cmds++ = 0; /* viz query info. */
- /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
- *cmds++ = 0x00004088;
- *cmds++ = 3; /* NumIndices=3 */
- } else {
- /* queue the draw packet */
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
- *cmds++ = 0; /* viz query info. */
- /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
- *cmds++ = 0x00030088;
- }
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
-
- return cmds;
-}
-
-static void build_regrestore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *start = tmp_ctx.cmd;
- unsigned int *cmd = start;
-
- unsigned int i = 0;
- unsigned int reg_array_size = 0;
- const unsigned int *ptr_register_ranges;
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- /* H/W Registers */
- /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
- cmd++;
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /* Force mismatch */
- *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
-#else
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
-#endif
-
- /* Based on chip id choose the registers ranges*/
- if (adreno_is_a220(adreno_dev)) {
- ptr_register_ranges = register_ranges_a220;
- reg_array_size = ARRAY_SIZE(register_ranges_a220);
- } else if (adreno_is_a225(adreno_dev)) {
- ptr_register_ranges = register_ranges_a225;
- reg_array_size = ARRAY_SIZE(register_ranges_a225);
- } else {
- ptr_register_ranges = register_ranges_a20x;
- reg_array_size = ARRAY_SIZE(register_ranges_a20x);
- }
-
-
- for (i = 0; i < (reg_array_size/2); i++) {
- cmd = reg_range(cmd, ptr_register_ranges[i*2],
- ptr_register_ranges[i*2+1]);
- }
-
- /* Now we know how many register blocks we have, we can compute command
- * length
- */
- start[2] =
- cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
- /* Enable shadowing for the entire register block. */
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- start[4] |= (0 << 24) | (4 << 16); /* Disable shadowing. */
-#else
- start[4] |= (1 << 24) | (4 << 16);
-#endif
-
- /* Need to handle some of the registers separately */
- *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
- tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0x00040400;
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
- *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
- tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0x00000000;
-
- if (adreno_is_a22x(adreno_dev)) {
- unsigned int i;
- unsigned int j = 2;
- for (i = REG_A220_VSC_BIN_SIZE; i <=
- REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
- *cmd++ = cp_type0_packet(i, 1);
- tmp_ctx.reg_values[j] = virt2gpu(cmd,
- &drawctxt->gpustate);
- *cmd++ = 0x00000000;
- j++;
- }
- }
-
- /* ALU Constants */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- *cmd++ = (0 << 24) | (0 << 16) | 0; /* Disable shadowing */
-#else
- *cmd++ = (1 << 24) | (0 << 16) | 0;
-#endif
- *cmd++ = ALU_CONSTANTS;
-
- /* Texture Constants */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /* Disable shadowing */
- *cmd++ = (0 << 24) | (1 << 16) | 0;
-#else
- *cmd++ = (1 << 24) | (1 << 16) | 0;
-#endif
- *cmd++ = TEX_CONSTANTS;
-
- /* Boolean Constants */
- *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS);
- *cmd++ = (2 << 16) | 0;
-
- /* the next BOOL_CONSTANT dwords is the shadow area for
- * boolean constants.
- */
- tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate);
- cmd += BOOL_CONSTANTS;
-
- /* Loop Constants */
- *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS);
- *cmd++ = (3 << 16) | 0;
-
- /* the next LOOP_CONSTANTS dwords is the shadow area for
- * loop constants.
- */
- tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate);
- cmd += LOOP_CONSTANTS;
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-static void
-build_shader_save_restore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *save, *restore, *fixup;
- unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
- unsigned int *partition1;
- unsigned int *shaderBases, *partition2;
-
- /* compute vertex, pixel and shared instruction shadow GPU addresses */
- tmp_ctx.shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
- tmp_ctx.shader_pixel = tmp_ctx.shader_vertex
- + _shader_shadow_size(adreno_dev);
- tmp_ctx.shader_shared = tmp_ctx.shader_pixel
- + _shader_shadow_size(adreno_dev);
-
- /* restore shader partitioning and instructions */
-
- restore = cmd; /* start address */
-
- /* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmd++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
- *cmd++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
-
- /* Restore previous shader vertex & pixel instruction bases. */
- *cmd++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
- shaderBases = cmd++; /* TBD #5: shader bases (from fixup) */
-
- /* write the shader partition information to a scratch register */
- *cmd++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
- partition1 = cmd++; /* TBD #4a: partition info (from save) */
-
- /* load vertex shader instructions from the shadow. */
- *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
- *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
- startSizeVtx = cmd++; /* TBD #1: start/size (from save) */
-
- /* load pixel shader instructions from the shadow. */
- *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
- *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
- startSizePix = cmd++; /* TBD #2: start/size (from save) */
-
- /* load shared shader instructions from the shadow. */
- *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
- *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
- startSizeShared = cmd++; /* TBD #3: start/size (from save) */
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
-
- /*
- * fixup SET_SHADER_BASES data
- *
- * since self-modifying PM4 code is being used here, a seperate
- * command buffer is used for this fixup operation, to ensure the
- * commands are not read by the PM4 engine before the data fields
- * have been written.
- */
-
- fixup = cmd; /* start address */
-
- /* write the shader partition information to a scratch register */
- *cmd++ = cp_type0_packet(REG_SCRATCH_REG2, 1);
- partition2 = cmd++; /* TBD #4b: partition info (from save) */
-
- /* mask off unused bits, then OR with shader instruction memory size */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = REG_SCRATCH_REG2;
- /* AND off invalid bits. */
- *cmd++ = 0x0FFF0FFF;
- /* OR in instruction memory size. */
- *cmd++ = adreno_encode_istore_size(adreno_dev);
-
- /* write the computed value to the SET_SHADER_BASES data field */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = REG_SCRATCH_REG2;
- /* TBD #5: shader bases (to restore) */
- *cmd++ = virt2gpu(shaderBases, &drawctxt->gpustate);
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
-
- /* save shader partitioning and instructions */
-
- save = cmd; /* start address */
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- /* fetch the SQ_INST_STORE_MANAGMENT register value,
- * store the value in the data fields of the SET_CONSTANT commands
- * above.
- */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
- /* TBD #4a: partition info (to restore) */
- *cmd++ = virt2gpu(partition1, &drawctxt->gpustate);
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
- /* TBD #4b: partition info (to fixup) */
- *cmd++ = virt2gpu(partition2, &drawctxt->gpustate);
-
-
- /* store the vertex shader instructions */
- *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
- *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
- /* TBD #1: start/size (to restore) */
- *cmd++ = virt2gpu(startSizeVtx, &drawctxt->gpustate);
-
- /* store the pixel shader instructions */
- *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
- *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
- /* TBD #2: start/size (to restore) */
- *cmd++ = virt2gpu(startSizePix, &drawctxt->gpustate);
-
- /* store the shared shader instructions if vertex base is nonzero */
-
- *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
- *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
- /* TBD #3: start/size (to restore) */
- *cmd++ = virt2gpu(startSizeShared, &drawctxt->gpustate);
-
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- /* create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-/* create buffers for saving/restoring registers, constants, & GMEM */
-static int a2xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
-
- /* build indirect command buffers to save & restore regs/constants */
- build_regrestore_cmds(adreno_dev, drawctxt);
- build_regsave_cmds(adreno_dev, drawctxt);
-
- build_shader_save_restore_cmds(adreno_dev, drawctxt);
-
- return 0;
-}
-
-/* create buffers for saving/restoring registers, constants, & GMEM */
-static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- int result;
-
- calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
- tmp_ctx.gmem_base = adreno_dev->gmem_base;
-
- result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
- drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
-
- if (result)
- return result;
-
- /* set the gmem shadow flag for the context */
- drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
-
- /* blank out gmem shadow. */
- kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
- drawctxt->context_gmem_shadow.size);
-
- /* build quad vertex buffer */
- build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
- &tmp_ctx.cmd);
-
- /* build TP0_CHICKEN register restore command buffer */
- if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE))
- tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);
-
- /* build indirect command buffers to save & restore gmem */
- drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
- tmp_ctx.cmd =
- build_gmem2sys_cmds(adreno_dev, drawctxt,
- &drawctxt->context_gmem_shadow);
- drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
- tmp_ctx.cmd =
- build_sys2gmem_cmds(adreno_dev, drawctxt,
- &drawctxt->context_gmem_shadow);
-
- kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
- KGSL_CACHE_OP_FLUSH);
-
- kgsl_cffdump_syncmem(NULL,
- &drawctxt->context_gmem_shadow.gmemshadow,
- drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
- drawctxt->context_gmem_shadow.gmemshadow.size, false);
-
- return 0;
-}
-
-static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- int ret;
-
- /*
- * Allocate memory for the GPU state and the context commands.
- * Despite the name, this is much more then just storage for
- * the gpustate. This contains command space for gmem save
- * and texture and vertex buffer storage too
- */
-
- ret = kgsl_allocate(&drawctxt->gpustate,
- drawctxt->pagetable, _context_size(adreno_dev));
-
- if (ret)
- return ret;
-
- kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
- _context_size(adreno_dev));
-
- tmp_ctx.cmd = tmp_ctx.start
- = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
-
- if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
- ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt);
- if (ret)
- goto done;
-
- drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
- }
-
- if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
- ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt);
- if (ret)
- goto done;
- }
-
- /* Flush and sync the gpustate memory */
-
- kgsl_cache_range_op(&drawctxt->gpustate,
- KGSL_CACHE_OP_FLUSH);
-
- kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
- drawctxt->gpustate.gpuaddr,
- drawctxt->gpustate.size, false);
-
-done:
- if (ret)
- kgsl_sharedmem_free(&drawctxt->gpustate);
-
- return ret;
-}
-
-static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
- struct adreno_context *context)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int cmd[11];
- unsigned int *cmds = &cmd[0];
-
- if (adreno_is_a225(adreno_dev)) {
- adreno_dev->gpudev->ctx_switches_since_last_draw++;
- /* If there have been > than
- * ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW calls to context
- * switches w/o gmem being saved then we need to execute
- * this workaround */
- if (adreno_dev->gpudev->ctx_switches_since_last_draw >
- ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
- adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
- else
- return;
- /*
- * Issue an empty draw call to avoid possible hangs due to
- * repeated idles without intervening draw calls.
- * On adreno 225 the PC block has a cache that is only
- * flushed on draw calls and repeated idles can make it
- * overflow. The gmem save path contains draw calls so
- * this workaround isn't needed there.
- */
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = (0x4 << 16) | (REG_PA_SU_SC_MODE_CNTL - 0x2000);
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 5);
- *cmds++ = 0;
- *cmds++ = 1<<14;
- *cmds++ = 0;
- *cmds++ = device->mmu.setstate_memory.gpuaddr;
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
- } else {
- /* On Adreno 20x/220, if the events for shader space reuse
- * gets dropped, the CP block would wait indefinitely.
- * Sending CP_SET_SHADER_BASES packet unblocks the CP from
- * this wait.
- */
- *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
- *cmds++ = adreno_encode_istore_size(adreno_dev)
- | adreno_dev->pix_shader_start;
- }
-
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
- &cmd[0], cmds - cmd);
-}
-
-static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
- struct adreno_context *context)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
-
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
-
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->reg_save[1],
- context->reg_save[2] << 2, true);
- /* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->reg_save, 3);
-
- if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->shader_save[1],
- context->shader_save[2] << 2, true);
- /* save shader partitioning and instructions. */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE,
- context->shader_save, 3);
-
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->shader_fixup[1],
- context->shader_fixup[2] << 2, true);
- /*
- * fixup shader partitioning parameter for
- * SET_SHADER_BASES.
- */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->shader_fixup, 3);
-
- context->flags |= CTXT_FLAGS_SHADER_RESTORE;
- }
- }
-
- if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
- (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->context_gmem_shadow.gmem_save[1],
- context->context_gmem_shadow.gmem_save[2] << 2, true);
- /* save gmem.
- * (note: changes shader. shader must already be saved.)
- */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE,
- context->context_gmem_shadow.gmem_save, 3);
-
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->chicken_restore[1],
- context->chicken_restore[2] << 2, true);
-
- /* Restore TP0_CHICKEN */
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->chicken_restore, 3);
- }
- adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
-
- context->flags |= CTXT_FLAGS_GMEM_RESTORE;
- } else if (adreno_is_a2xx(adreno_dev))
- a2xx_drawctxt_draw_workaround(adreno_dev, context);
-}
-
-static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
- struct adreno_context *context)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int link[10];
- unsigned int *cmds = &link[0];
- unsigned int sizedwords = 0;
-
- if (context == NULL) {
- /* No context - set the default apgetable and thats it */
- kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
- adreno_dev->drawctxt_active->id);
- return;
- }
-
- KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
-
- /* Reset VSC Binning cntrol Regiseter */
- if (adreno_is_a225(adreno_dev)) {
- *cmds++ = cp_type0_packet(REG_VSC_BINNING_ENABLE, 1);
- *cmds++ = 0;
- }
-
- *cmds++ = cp_nop_packet(1);
- *cmds++ = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
- *cmds++ = context->id;
- sizedwords = (cmds - &link[0]);
-
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
- &link[0], sizedwords);
- kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
-
- /* restore gmem.
- * (note: changes shader. shader must not already be restored.)
- */
- if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->context_gmem_shadow.gmem_restore[1],
- context->context_gmem_shadow.gmem_restore[2] << 2,
- true);
-
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE,
- context->context_gmem_shadow.gmem_restore, 3);
-
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->chicken_restore[1],
- context->chicken_restore[2] << 2, true);
-
- /* Restore TP0_CHICKEN */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->chicken_restore, 3);
- }
-
- context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
- }
-
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->reg_restore[1],
- context->reg_restore[2] << 2, true);
-
- /* restore registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
-
- /* restore shader instructions & partitioning. */
- if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
- kgsl_cffdump_syncmem(NULL, &context->gpustate,
- context->shader_restore[1],
- context->shader_restore[2] << 2, true);
-
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->shader_restore, 3);
- }
- }
-
- if (adreno_is_a20x(adreno_dev)) {
- cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
- cmds[1] = context->bin_base_offset;
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE, cmds, 2);
- }
-}
-
-/*
- * Interrupt management
- *
- * a2xx interrupt control is distributed among the various
- * hardware components (RB, CP, MMU). The main interrupt
- * tells us which component fired the interrupt, but one needs
- * to go to the individual component to find out why. The
- * following functions provide the broken out support for
- * managing the interrupts
- */
-
-#define RBBM_INT_MASK (RBBM_INT_CNTL__RDERR_INT_MASK | \
- RBBM_INT_CNTL__PROTECT_INT_MASK)
-
-#define CP_INT_MASK \
- (CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
- CP_INT_CNTL__OPCODE_ERROR_MASK | \
- CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
- CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
- CP_INT_CNTL__IB_ERROR_MASK | \
- CP_INT_CNTL__IB1_INT_MASK | \
- CP_INT_CNTL__RB_INT_MASK)
-
-#define VALID_STATUS_COUNT_MAX 10
-
-static struct {
- unsigned int mask;
- const char *message;
-} kgsl_cp_error_irqs[] = {
- { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
- "ringbuffer TO packet in IB interrupt" },
- { CP_INT_CNTL__OPCODE_ERROR_MASK,
- "ringbuffer opcode error interrupt" },
- { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
- "ringbuffer protected mode error interrupt" },
- { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
- "ringbuffer reserved bit error interrupt" },
- { CP_INT_CNTL__IB_ERROR_MASK,
- "ringbuffer IB error interrupt" },
-};
-
-static void a2xx_cp_intrcallback(struct kgsl_device *device)
-{
- unsigned int status = 0, num_reads = 0, master_status = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- int i;
-
- adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
- while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
- (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
- adreno_regread(device, REG_CP_INT_STATUS, &status);
- adreno_regread(device, REG_MASTER_INT_SIGNAL,
- &master_status);
- num_reads++;
- }
- if (num_reads > 1)
- KGSL_DRV_WARN(device,
- "Looped %d times to read REG_CP_INT_STATUS\n",
- num_reads);
-
- trace_kgsl_a2xx_irq_status(device, master_status, status);
-
- if (!status) {
- if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
- /* This indicates that we could not read CP_INT_STAT.
- * As a precaution just wake up processes so
- * they can check their timestamps. Since, we
- * did not ack any interrupts this interrupt will
- * be generated again */
- KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
- wake_up_interruptible_all(&device->wait_queue);
- } else
- KGSL_DRV_WARN(device, "Spurious interrput detected\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
- if (status & kgsl_cp_error_irqs[i].mask) {
- KGSL_CMD_CRIT(rb->device, "%s\n",
- kgsl_cp_error_irqs[i].message);
- /*
- * on fatal errors, turn off the interrupts to
- * avoid storming. This has the side effect of
- * forcing a PM dump when the timestamp times out
- */
-
- kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
- }
- }
-
- /* only ack bits we understand */
- status &= CP_INT_MASK;
- adreno_regwrite(device, REG_CP_INT_ACK, status);
-
- if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
- KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
- queue_work(device->work_queue, &device->ts_expired_ws);
- wake_up_interruptible_all(&device->wait_queue);
- }
-}
-
-static void a2xx_rbbm_intrcallback(struct kgsl_device *device)
-{
- unsigned int status = 0;
- unsigned int rderr = 0;
- unsigned int addr = 0;
- const char *source;
-
- adreno_regread(device, REG_RBBM_INT_STATUS, &status);
-
- if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
- adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
- source = (rderr & RBBM_READ_ERROR_REQUESTER)
- ? "host" : "cp";
- /* convert to dword address */
- addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2;
-
- /*
- * Log CP_INT_STATUS interrupts from the CP at a
- * lower level because they can happen frequently
- * and are worked around in a2xx_irq_handler.
- */
- if (addr == REG_CP_INT_STATUS &&
- rderr & RBBM_READ_ERROR_ERROR &&
- rderr & RBBM_READ_ERROR_REQUESTER)
- KGSL_DRV_WARN(device,
- "rbbm read error interrupt: %s reg: %04X\n",
- source, addr);
- else
- KGSL_DRV_CRIT(device,
- "rbbm read error interrupt: %s reg: %04X\n",
- source, addr);
- } else if (status & RBBM_INT_CNTL__PROTECT_INT_MASK) {
- adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
- source = (rderr & RBBM_READ_ERROR_REQUESTER)
- ? "host" : "cp";
- /* convert to dword address */
- addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2;
- KGSL_DRV_CRIT(device,
- "RBBM | Protected mode error |%s|%s| addr=%x\n",
- rderr & (1 << 31) ? "WRITE" : "READ", source,
- addr);
- }
-
- status &= RBBM_INT_MASK;
- adreno_regwrite(device, REG_RBBM_INT_ACK, status);
-}
-
-irqreturn_t a2xx_irq_handler(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- irqreturn_t result = IRQ_NONE;
- unsigned int status;
-
- adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
-
- if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
- kgsl_mh_intrcallback(device);
- result = IRQ_HANDLED;
- }
-
- if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
- a2xx_cp_intrcallback(device);
- result = IRQ_HANDLED;
- }
-
- if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
- a2xx_rbbm_intrcallback(device);
- result = IRQ_HANDLED;
- }
-
- return result;
-}
-
-static void a2xx_irq_control(struct adreno_device *adreno_dev, int state)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- if (state) {
- adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
- adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
- adreno_regwrite(device, MH_INTERRUPT_MASK,
- kgsl_mmu_get_int_mask());
- } else {
- adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
- adreno_regwrite(device, REG_CP_INT_CNTL, 0);
- adreno_regwrite(device, MH_INTERRUPT_MASK, 0);
- }
-
- /* Force the writes to post before touching the IRQ line */
- wmb();
-}
-
-static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int status;
-
- adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
-
- return (status &
- (MASTER_INT_SIGNAL__MH_INT_STAT |
- MASTER_INT_SIGNAL__CP_INT_STAT |
- MASTER_INT_SIGNAL__RBBM_INT_STAT)) ? 1 : 0;
-}
-
-static int a2xx_rb_init(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- unsigned int *cmds, cmds_gpu;
-
- /* ME_INIT */
- cmds = adreno_ringbuffer_allocspace(rb, NULL, 19);
- if (cmds == NULL)
- return -ENOMEM;
-
- cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
-
- GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 18));
- /* All fields present (bits 9:0) */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
- /* Disable/Enable Real-Time Stream processing (present but ignored) */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
- GSL_RB_WRITE(cmds, cmds_gpu,
- SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
-
- /* Instruction memory size: */
- GSL_RB_WRITE(cmds, cmds_gpu,
- (adreno_encode_istore_size(adreno_dev)
- | adreno_dev->pix_shader_start));
- /* Maximum Contexts */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
- /* Write Confirm Interval and The CP will wait the
- * wait_interval * 16 clocks between polling */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
- /* NQ and External Memory Swap */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
- /* Enable Protected mode registers for A2xx */
- GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
-
- /* Disable header dumping and Header dump address */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- /* Header dump size */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
- adreno_ringbuffer_submit(rb);
-
- return 0;
-}
-
-static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int reg, val;
-
- /* Freeze the counter */
- adreno_regwrite(device, REG_CP_PERFMON_CNTL,
- REG_PERF_MODE_CNT | REG_PERF_STATE_FREEZE);
-
- /* Get the value */
- adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &val);
-
- /* Reset the counter */
- adreno_regwrite(device, REG_CP_PERFMON_CNTL,
- REG_PERF_MODE_CNT | REG_PERF_STATE_RESET);
-
- /* Re-Enable the performance monitors */
- adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
- adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
- adreno_regwrite(device, REG_CP_PERFMON_CNTL,
- REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
-
- return val;
-}
-
-static void a2xx_gmeminit(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- union reg_rb_edram_info rb_edram_info;
- unsigned int gmem_size;
- unsigned int edram_value = 0;
-
- /* get edram_size value equivalent */
- gmem_size = (adreno_dev->gmem_size >> 14);
- while (gmem_size >>= 1)
- edram_value++;
-
- rb_edram_info.val = 0;
-
- rb_edram_info.f.edram_size = edram_value;
- rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
-
- /* must be aligned to size */
- rb_edram_info.f.edram_range = (adreno_dev->gmem_base >> 14);
-
- adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
-}
-
-/**
- * a2xx_protect_init() - Initializes register protection on a3xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
-static void a2xx_protect_init(struct kgsl_device *device)
-{
- int index = 0;
-
- /* Enable access protection to privileged registers */
- kgsl_regwrite(device, REG_RBBM_INT_CNTL,
- RBBM_INT_CNTL__PROTECT_INT_MASK);
-
- /* RBBM_SOFT_RESET register */
- adreno_set_protected_registers(device, &index, 0x03C, 0x0);
- /* RBBM_INT_CNTL & RBBM_INT_STATUS */
- adreno_set_protected_registers(device, &index, 0x3B4, 0x1);
- /* RBBM_PROTECT_ registers */
- adreno_set_protected_registers(device, &index, 0x140, 0xF);
-
- /* CP registers */
- adreno_set_protected_registers(device, &index, 0x1C0, 0x20);
- /* CP_STATE_DEBUG_INDEX & CP_STATE_DEBUG_DATA */
- adreno_set_protected_registers(device, &index, 0x1EC, 0x1);
- /* CP_ME_CNTL,CP_ME_STATUS, CP_ME_RAM_ and CP_DEBUG registers */
- adreno_set_protected_registers(device, &index, 0x1F6, 0x7);
-
- /* MH_MMU_PT_BASE register */
- adreno_set_protected_registers(device, &index, 0x042, 0x0);
-}
-
-static void a2xx_start(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- /*
- * We need to make sure all blocks are powered up and clocked
- * before issuing a soft reset. The overrides will then be
- * turned off (set to 0)
- */
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
-
- /*
- * Only reset CP block if all blocks have previously been
- * reset
- */
- if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
- !adreno_is_a22x(adreno_dev)) {
- adreno_regwrite(device, REG_RBBM_SOFT_RESET,
- 0xFFFFFFFF);
- device->flags |= KGSL_FLAGS_SOFT_RESET;
- } else {
- adreno_regwrite(device, REG_RBBM_SOFT_RESET,
- 0x00000001);
- }
- /*
- * The core is in an indeterminate state until the reset
- * completes after 30ms.
- */
- msleep(30);
-
- adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
-
- if (adreno_is_a225(adreno_dev)) {
- /* Enable large instruction store for A225 */
- adreno_regwrite(device, REG_SQ_FLOW_CONTROL,
- 0x18000000);
- }
-
- if (adreno_is_a20x(adreno_dev))
- /* For A20X based targets increase number of clocks
- * that RBBM will wait before de-asserting Register
- * Clock Active signal */
- adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF);
- else
- adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
-
- adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
- adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
-
- if (cpu_is_msm8960())
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
- else
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
-
- if (!adreno_is_a22x(adreno_dev))
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
- else
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
-
- adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
-
- /* Turn on protection */
- a2xx_protect_init(device);
-
- /* Make sure interrupts are disabled */
- adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
- adreno_regwrite(device, REG_CP_INT_CNTL, 0);
- adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
-
- a2xx_gmeminit(adreno_dev);
-}
-
-/* Defined in adreno_a2xx_snapshot.c */
-void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
- int *remain, int hang);
-
-struct adreno_gpudev adreno_a2xx_gpudev = {
- .reg_rbbm_status = REG_RBBM_STATUS,
- .reg_cp_pfp_ucode_addr = REG_CP_PFP_UCODE_ADDR,
- .reg_cp_pfp_ucode_data = REG_CP_PFP_UCODE_DATA,
-
- .ctxt_create = a2xx_drawctxt_create,
- .ctxt_save = a2xx_drawctxt_save,
- .ctxt_restore = a2xx_drawctxt_restore,
- .ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
- .irq_handler = a2xx_irq_handler,
- .irq_control = a2xx_irq_control,
- .irq_pending = a2xx_irq_pending,
- .snapshot = a2xx_snapshot,
- .rb_init = a2xx_rb_init,
- .busy_cycles = a2xx_busy_cycles,
- .start = a2xx_start,
-};
diff --git a/drivers/gpu/msm/adreno_a2xx_snapshot.c b/drivers/gpu/msm/adreno_a2xx_snapshot.c
deleted file mode 100644
index 2c86f82..0000000
--- a/drivers/gpu/msm/adreno_a2xx_snapshot.c
+++ /dev/null
@@ -1,382 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "kgsl.h"
-#include "adreno.h"
-#include "kgsl_snapshot.h"
-
-#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
- + sizeof(struct kgsl_snapshot_debug))
-
-/* Dump the SX debug registers into a GPU snapshot debug section */
-
-#define SXDEBUG_COUNT 0x1B
-
-static int a2xx_snapshot_sxdebug(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i;
-
- if (remain < DEBUG_SECTION_SZ(SXDEBUG_COUNT)) {
- SNAPSHOT_ERR_NOMEM(device, "SX DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_SX;
- header->size = SXDEBUG_COUNT;
-
- for (i = 0; i < SXDEBUG_COUNT; i++) {
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
- adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
- }
-
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
-
- return DEBUG_SECTION_SZ(SXDEBUG_COUNT);
-}
-
-#define CPDEBUG_COUNT 0x20
-
-static int a2xx_snapshot_cpdebug(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i;
-
- if (remain < DEBUG_SECTION_SZ(CPDEBUG_COUNT)) {
- SNAPSHOT_ERR_NOMEM(device, "CP DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP;
- header->size = CPDEBUG_COUNT;
-
- for (i = 0; i < CPDEBUG_COUNT; i++) {
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
- adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
- }
-
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
-
- return DEBUG_SECTION_SZ(CPDEBUG_COUNT);
-}
-
-/*
- * The contents of the SQ debug sections are dword pairs:
- * [register offset]:[value]
- * This macro writes both dwords for the given register
- */
-
-#define SQ_DEBUG_WRITE(_device, _reg, _data, _offset) \
- do { _data[(_offset)++] = (_reg); \
- adreno_regread(_device, (_reg), &_data[(_offset)++]); } while (0)
-
-#define SQ_DEBUG_BANK_SIZE 23
-
-static int a2xx_snapshot_sqdebug(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, offset = 0;
- int size = SQ_DEBUG_BANK_SIZE * 2 * 2;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "SQ Debug");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_SQ;
- header->size = size;
-
- for (i = 0; i < 2; i++) {
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_CONST_MGR_FSM+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_EXP_ALLOC+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_0+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_1+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_PIX+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_VTX+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_INPUT_FSM+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_0+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_1+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_0+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device,
- REG_SQ_DEBUG_PIX_TB_STATUS_REG_0+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device,
- REG_SQ_DEBUG_PIX_TB_STATUS_REG_1+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device,
- REG_SQ_DEBUG_PIX_TB_STATUS_REG_2+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device,
- REG_SQ_DEBUG_PIX_TB_STATUS_REG_3+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PTR_BUFF+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TB_STATUS_SEL+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TP_FSM+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_0+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_1+i*0x1000,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM+i*0x1000,
- data, offset);
- }
-
- return DEBUG_SECTION_SZ(size);
-}
-
-#define SQ_DEBUG_THREAD_SIZE 7
-
-static int a2xx_snapshot_sqthreaddebug(struct kgsl_device *device,
- void *snapshot, int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, offset = 0;
- int size = SQ_DEBUG_THREAD_SIZE * 2 * 16;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "SQ THREAD DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_SQTHREAD;
- header->size = size;
-
- for (i = 0; i < 16; i++) {
- adreno_regwrite(device, REG_SQ_DEBUG_TB_STATUS_SEL,
- i | (6<<4) | (i<<7) | (1<<11) | (1<<12)
- | (i<<16) | (6<<20) | (i<<23));
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATUS_REG,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_0,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_1,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_2,
- data, offset);
- SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_3,
- data, offset);
- }
-
- return DEBUG_SECTION_SZ(size);
-}
-
-#define MIUDEBUG_COUNT 0x10
-
-static int a2xx_snapshot_miudebug(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i;
-
- if (remain < DEBUG_SECTION_SZ(MIUDEBUG_COUNT)) {
- SNAPSHOT_ERR_NOMEM(device, "MIU DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_MIU;
- header->size = MIUDEBUG_COUNT;
-
- for (i = 0; i < MIUDEBUG_COUNT; i++) {
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1600 | i);
- adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
- }
-
- adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
-
- return DEBUG_SECTION_SZ(MIUDEBUG_COUNT);
-}
-
-/* Snapshot the istore memory */
-static int a2xx_snapshot_istore(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_istore *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int count, i;
-
- count = adreno_dev->istore_size * adreno_dev->instruction_size;
-
- if (remain < (count * 4) + sizeof(*header)) {
- KGSL_DRV_ERR(device,
- "snapshot: Not enough memory for the istore section");
- return 0;
- }
-
- header->count = adreno_dev->istore_size;
-
- for (i = 0; i < count; i++)
- kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]);
-
- return (count * 4) + sizeof(*header);
-}
-
-/* A2XX GPU snapshot function - this is where all of the A2XX specific
- * bits and pieces are grabbed into the snapshot memory
- */
-
-void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
- int *remain, int hang)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct kgsl_snapshot_registers_list list;
- struct kgsl_snapshot_registers regs;
- unsigned int pmoverride;
-
- /* Choose the register set to dump */
-
- if (adreno_is_a20x(adreno_dev)) {
- regs.regs = (unsigned int *) a200_registers;
- regs.count = a200_registers_count;
- } else if (adreno_is_a220(adreno_dev)) {
- regs.regs = (unsigned int *) a220_registers;
- regs.count = a220_registers_count;
- } else if (adreno_is_a225(adreno_dev)) {
- regs.regs = (unsigned int *) a225_registers;
- regs.count = a225_registers_count;
- }
-
- list.registers = &regs;
- list.count = 1;
-
- /* Master set of (non debug) registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
- kgsl_snapshot_dump_regs, &list);
-
- /* CP_STATE_DEBUG indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_CP_STATE_DEBUG_INDEX,
- REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
-
- /* CP_ME indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
- 64, 44);
-
- /*
- * Need to temporarily turn off clock gating for the debug bus to
- * work
- */
-
- adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride);
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
-
- /* SX debug registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a2xx_snapshot_sxdebug, NULL);
-
- /* SU debug indexed registers (only for < 470) */
- if (!adreno_is_a22x(adreno_dev))
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_PA_SU_DEBUG_CNTL,
- REG_PA_SU_DEBUG_DATA,
- 0, 0x1B);
-
- /* CP debug registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a2xx_snapshot_cpdebug, NULL);
-
- /* MH debug indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40);
-
- /* Leia only register sets */
- if (adreno_is_a22x(adreno_dev)) {
- /* RB DEBUG indexed regisers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8);
-
- /* RB DEBUG indexed registers bank 2 */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000,
- 0, 8);
-
- /* PC_DEBUG indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8);
-
- /* GRAS_DEBUG indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4);
-
- /* MIU debug registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a2xx_snapshot_miudebug, NULL);
-
- /* SQ DEBUG debug registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a2xx_snapshot_sqdebug, NULL);
-
- /*
- * Reading SQ THREAD causes bad things to happen on a running
- * system, so only read it if the GPU is already hung
- */
-
- if (hang) {
- /* SQ THREAD debug registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a2xx_snapshot_sqthreaddebug, NULL);
- }
- }
-
- /*
- * Only dump the istore on a hang - reading it on a running system
- * has a non zero chance of hanging the GPU.
- */
-
- if (adreno_is_a2xx(adreno_dev) && hang) {
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain,
- a2xx_snapshot_istore, NULL);
- }
-
-
- /* Reset the clock gating */
- adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride);
-
- return snapshot;
-}
diff --git a/drivers/gpu/msm/adreno_a2xx_trace.c b/drivers/gpu/msm/adreno_a2xx_trace.c
deleted file mode 100644
index 87c930b..0000000
--- a/drivers/gpu/msm/adreno_a2xx_trace.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "kgsl.h"
-#include "adreno.h"
-
-/* Instantiate tracepoints */
-#define CREATE_TRACE_POINTS
-#include "adreno_a2xx_trace.h"
diff --git a/drivers/gpu/msm/adreno_a2xx_trace.h b/drivers/gpu/msm/adreno_a2xx_trace.h
deleted file mode 100644
index af355d6..0000000
--- a/drivers/gpu/msm/adreno_a2xx_trace.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#if !defined(_ADRENO_A2XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _ADRENO_A2XX_TRACE_H
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kgsl
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE adreno_a2xx_trace
-
-#include <linux/tracepoint.h>
-
-struct kgsl_device;
-
-/*
- * Tracepoint for a2xx irq. Includes status info
- */
-TRACE_EVENT(kgsl_a2xx_irq_status,
-
- TP_PROTO(struct kgsl_device *device, unsigned int master_status,
- unsigned int status),
-
- TP_ARGS(device, master_status, status),
-
- TP_STRUCT__entry(
- __string(device_name, device->name)
- __field(unsigned int, master_status)
- __field(unsigned int, status)
- ),
-
- TP_fast_assign(
- __assign_str(device_name, device->name);
- __entry->master_status = master_status;
- __entry->status = status;
- ),
-
- TP_printk(
- "d_name=%s master=%s status=%s",
- __get_str(device_name),
- __entry->master_status ? __print_flags(__entry->master_status,
- "|",
- { MASTER_INT_SIGNAL__MH_INT_STAT, "MH" },
- { MASTER_INT_SIGNAL__SQ_INT_STAT, "SQ" },
- { MASTER_INT_SIGNAL__CP_INT_STAT, "CP" },
- { MASTER_INT_SIGNAL__RBBM_INT_STAT, "RBBM" }) : "None",
- __entry->status ? __print_flags(__entry->status, "|",
- { CP_INT_CNTL__SW_INT_MASK, "SW" },
- { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
- "T0_PACKET_IN_IB" },
- { CP_INT_CNTL__OPCODE_ERROR_MASK, "OPCODE_ERROR" },
- { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
- "PROTECTED_MODE_ERROR" },
- { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
- "RESERVED_BIT_ERROR" },
- { CP_INT_CNTL__IB_ERROR_MASK, "IB_ERROR" },
- { CP_INT_CNTL__IB2_INT_MASK, "IB2" },
- { CP_INT_CNTL__IB1_INT_MASK, "IB1" },
- { CP_INT_CNTL__RB_INT_MASK, "RB" }) : "None"
- )
-);
-
-#endif /* _ADRENO_A2XX_TRACE_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
deleted file mode 100644
index 9c1341d..0000000
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ /dev/null
@@ -1,3409 +0,0 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <mach/socinfo.h>
-
-#include "kgsl.h"
-#include "adreno.h"
-#include "kgsl_sharedmem.h"
-#include "kgsl_cffdump.h"
-#include "a3xx_reg.h"
-#include "adreno_a3xx_trace.h"
-
-/*
- * Set of registers to dump for A3XX on postmortem and snapshot.
- * Registers in pairs - first value is the start offset, second
- * is the stop offset (inclusive)
- */
-
-const unsigned int a3xx_registers[] = {
- 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
- 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
- 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
- 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
- 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
- 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
- 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
- 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
- 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
- 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
- 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
- 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5,
- 0x0e41, 0x0e45, 0x0e64, 0x0e65,
- 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
- 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
- 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
- 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
- 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
- 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
- 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
- 0x2240, 0x227e,
- 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
- 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
- 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
- 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
- 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
- 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
- 0x25f0, 0x25f0,
- 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
- 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
- 0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D,
- 0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
- 0x303C, 0x303C, 0x305E, 0x305F,
-};
-
-const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
-
-/* Removed the following HLSQ register ranges from being read during
- * fault tolerance since reading the registers may cause the device to hang:
- */
-const unsigned int a3xx_hlsq_registers[] = {
- 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
- 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a,
- 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
-};
-
-const unsigned int a3xx_hlsq_registers_count =
- ARRAY_SIZE(a3xx_hlsq_registers) / 2;
-
-/* The set of additional registers to be dumped for A330 */
-
-const unsigned int a330_registers[] = {
- 0x1d0, 0x1d0, 0x1d4, 0x1d4, 0x453, 0x453,
-};
-
-const unsigned int a330_registers_count = ARRAY_SIZE(a330_registers) / 2;
-
-/* Simple macro to facilitate bit setting in the gmem2sys and sys2gmem
- * functions.
- */
-
-#define _SET(_shift, _val) ((_val) << (_shift))
-
-/*
- ****************************************************************************
- *
- * Context state shadow structure:
- *
- * +---------------------+------------+-------------+---------------------+---+
- * | ALU Constant Shadow | Reg Shadow | C&V Buffers | Shader Instr Shadow |Tex|
- * +---------------------+------------+-------------+---------------------+---+
- *
- * 8K - ALU Constant Shadow (8K aligned)
- * 4K - H/W Register Shadow (8K aligned)
- * 5K - Command and Vertex Buffers
- * 8K - Shader Instruction Shadow
- * ~6K - Texture Constant Shadow
- *
- *
- ***************************************************************************
- */
-
-/* Sizes of all sections in state shadow memory */
-#define ALU_SHADOW_SIZE (8*1024) /* 8KB */
-#define REG_SHADOW_SIZE (4*1024) /* 4KB */
-#define CMD_BUFFER_SIZE (5*1024) /* 5KB */
-#define TEX_SIZE_MEM_OBJECTS 896 /* bytes */
-#define TEX_SIZE_MIPMAP 1936 /* bytes */
-#define TEX_SIZE_SAMPLER_OBJ 256 /* bytes */
-#define TEX_SHADOW_SIZE \
- ((TEX_SIZE_MEM_OBJECTS + TEX_SIZE_MIPMAP + \
- TEX_SIZE_SAMPLER_OBJ)*2) /* ~6KB */
-#define SHADER_SHADOW_SIZE (8*1024) /* 8KB */
-
-/* Total context size, excluding GMEM shadow */
-#define CONTEXT_SIZE \
- (ALU_SHADOW_SIZE+REG_SHADOW_SIZE + \
- CMD_BUFFER_SIZE+SHADER_SHADOW_SIZE + \
- TEX_SHADOW_SIZE)
-
-/* Offsets to different sections in context shadow memory */
-#define REG_OFFSET ALU_SHADOW_SIZE
-#define CMD_OFFSET (REG_OFFSET+REG_SHADOW_SIZE)
-#define SHADER_OFFSET (CMD_OFFSET+CMD_BUFFER_SIZE)
-#define TEX_OFFSET (SHADER_OFFSET+SHADER_SHADOW_SIZE)
-#define VS_TEX_OFFSET_MEM_OBJECTS TEX_OFFSET
-#define VS_TEX_OFFSET_MIPMAP (VS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
-#define VS_TEX_OFFSET_SAMPLER_OBJ (VS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
-#define FS_TEX_OFFSET_MEM_OBJECTS \
- (VS_TEX_OFFSET_SAMPLER_OBJ+TEX_SIZE_SAMPLER_OBJ)
-#define FS_TEX_OFFSET_MIPMAP (FS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
-#define FS_TEX_OFFSET_SAMPLER_OBJ (FS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
-
-/* The offset for fragment shader data in HLSQ context */
-#define SSIZE (16*1024)
-
-#define HLSQ_SAMPLER_OFFSET 0x000
-#define HLSQ_MEMOBJ_OFFSET 0x400
-#define HLSQ_MIPMAP_OFFSET 0x800
-
-/* Use shadow RAM */
-#define HLSQ_SHADOW_BASE (0x10000+SSIZE*2)
-
-#define REG_TO_MEM_LOOP_COUNT_SHIFT 18
-
-#define BUILD_PC_DRAW_INITIATOR(prim_type, source_select, index_size, \
- vis_cull_mode) \
- (((prim_type) << PC_DRAW_INITIATOR_PRIM_TYPE) | \
- ((source_select) << PC_DRAW_INITIATOR_SOURCE_SELECT) | \
- ((index_size & 1) << PC_DRAW_INITIATOR_INDEX_SIZE) | \
- ((index_size >> 1) << PC_DRAW_INITIATOR_SMALL_INDEX) | \
- ((vis_cull_mode) << PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE) | \
- (1 << PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE))
-
-/*
- * List of context registers (starting from dword offset 0x2000).
- * Each line contains start and end of a range of registers.
- */
-static const unsigned int context_register_ranges[] = {
- A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_CLIP_CNTL,
- A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_GB_CLIP_ADJ,
- A3XX_GRAS_CL_VPORT_XOFFSET, A3XX_GRAS_CL_VPORT_ZSCALE,
- A3XX_GRAS_SU_POINT_MINMAX, A3XX_GRAS_SU_POINT_SIZE,
- A3XX_GRAS_SU_POLY_OFFSET_SCALE, A3XX_GRAS_SU_POLY_OFFSET_OFFSET,
- A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SU_MODE_CONTROL,
- A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_CONTROL,
- A3XX_GRAS_SC_SCREEN_SCISSOR_TL, A3XX_GRAS_SC_SCREEN_SCISSOR_BR,
- A3XX_GRAS_SC_WINDOW_SCISSOR_TL, A3XX_GRAS_SC_WINDOW_SCISSOR_BR,
- A3XX_RB_MODE_CONTROL, A3XX_RB_MRT_BLEND_CONTROL3,
- A3XX_RB_BLEND_RED, A3XX_RB_COPY_DEST_INFO,
- A3XX_RB_DEPTH_CONTROL, A3XX_RB_DEPTH_CONTROL,
- A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VSTREAM_CONTROL,
- A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL,
- A3XX_PC_PRIM_VTX_CNTL, A3XX_PC_RESTART_INDEX,
- A3XX_HLSQ_CONTROL_0_REG, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG,
- A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_0_REG,
- A3XX_HLSQ_CL_NDRANGE_2_REG, A3XX_HLSQ_CL_CONTROL_1_REG,
- A3XX_HLSQ_CL_KERNEL_CONST_REG, A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG,
- A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_HLSQ_CL_WG_OFFSET_REG,
- A3XX_VFD_CONTROL_0, A3XX_VFD_VS_THREADING_THRESHOLD,
- A3XX_SP_SP_CTRL_REG, A3XX_SP_SP_CTRL_REG,
- A3XX_SP_VS_CTRL_REG0, A3XX_SP_VS_OUT_REG_7,
- A3XX_SP_VS_VPC_DST_REG_0, A3XX_SP_VS_PVT_MEM_SIZE_REG,
- A3XX_SP_VS_LENGTH_REG, A3XX_SP_FS_PVT_MEM_SIZE_REG,
- A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, A3XX_SP_FS_FLAT_SHAD_MODE_REG_1,
- A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_OUTPUT_REG,
- A3XX_SP_FS_MRT_REG_0, A3XX_SP_FS_IMAGE_OUTPUT_REG_3,
- A3XX_SP_FS_LENGTH_REG, A3XX_SP_FS_LENGTH_REG,
- A3XX_TPL1_TP_VS_TEX_OFFSET, A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
- A3XX_VPC_ATTR, A3XX_VPC_VARY_CYLWRAP_ENABLE_1,
-};
-
-/* Global registers that need to be saved separately */
-static const unsigned int global_registers[] = {
- A3XX_GRAS_CL_USER_PLANE_X0, A3XX_GRAS_CL_USER_PLANE_Y0,
- A3XX_GRAS_CL_USER_PLANE_Z0, A3XX_GRAS_CL_USER_PLANE_W0,
- A3XX_GRAS_CL_USER_PLANE_X1, A3XX_GRAS_CL_USER_PLANE_Y1,
- A3XX_GRAS_CL_USER_PLANE_Z1, A3XX_GRAS_CL_USER_PLANE_W1,
- A3XX_GRAS_CL_USER_PLANE_X2, A3XX_GRAS_CL_USER_PLANE_Y2,
- A3XX_GRAS_CL_USER_PLANE_Z2, A3XX_GRAS_CL_USER_PLANE_W2,
- A3XX_GRAS_CL_USER_PLANE_X3, A3XX_GRAS_CL_USER_PLANE_Y3,
- A3XX_GRAS_CL_USER_PLANE_Z3, A3XX_GRAS_CL_USER_PLANE_W3,
- A3XX_GRAS_CL_USER_PLANE_X4, A3XX_GRAS_CL_USER_PLANE_Y4,
- A3XX_GRAS_CL_USER_PLANE_Z4, A3XX_GRAS_CL_USER_PLANE_W4,
- A3XX_GRAS_CL_USER_PLANE_X5, A3XX_GRAS_CL_USER_PLANE_Y5,
- A3XX_GRAS_CL_USER_PLANE_Z5, A3XX_GRAS_CL_USER_PLANE_W5,
- A3XX_VSC_BIN_SIZE,
- A3XX_VSC_PIPE_CONFIG_0, A3XX_VSC_PIPE_CONFIG_1,
- A3XX_VSC_PIPE_CONFIG_2, A3XX_VSC_PIPE_CONFIG_3,
- A3XX_VSC_PIPE_CONFIG_4, A3XX_VSC_PIPE_CONFIG_5,
- A3XX_VSC_PIPE_CONFIG_6, A3XX_VSC_PIPE_CONFIG_7,
- A3XX_VSC_PIPE_DATA_ADDRESS_0, A3XX_VSC_PIPE_DATA_ADDRESS_1,
- A3XX_VSC_PIPE_DATA_ADDRESS_2, A3XX_VSC_PIPE_DATA_ADDRESS_3,
- A3XX_VSC_PIPE_DATA_ADDRESS_4, A3XX_VSC_PIPE_DATA_ADDRESS_5,
- A3XX_VSC_PIPE_DATA_ADDRESS_6, A3XX_VSC_PIPE_DATA_ADDRESS_7,
- A3XX_VSC_PIPE_DATA_LENGTH_0, A3XX_VSC_PIPE_DATA_LENGTH_1,
- A3XX_VSC_PIPE_DATA_LENGTH_2, A3XX_VSC_PIPE_DATA_LENGTH_3,
- A3XX_VSC_PIPE_DATA_LENGTH_4, A3XX_VSC_PIPE_DATA_LENGTH_5,
- A3XX_VSC_PIPE_DATA_LENGTH_6, A3XX_VSC_PIPE_DATA_LENGTH_7,
- A3XX_VSC_SIZE_ADDRESS
-};
-
-#define GLOBAL_REGISTER_COUNT ARRAY_SIZE(global_registers)
-
-/* A scratchpad used to build commands during context create */
-static struct tmp_ctx {
- unsigned int *cmd; /* Next available dword in C&V buffer */
-
- /* Addresses in comamnd buffer where registers are saved */
- uint32_t reg_values[GLOBAL_REGISTER_COUNT];
- uint32_t gmem_base; /* Base GPU address of GMEM */
-} tmp_ctx;
-
-#ifndef GSL_CONTEXT_SWITCH_CPU_SYNC
-/*
- * Function for executing dest = ( (reg & and) ROL rol ) | or
- */
-static unsigned int *rmw_regtomem(unsigned int *cmd,
- unsigned int reg, unsigned int and,
- unsigned int rol, unsigned int or,
- unsigned int dest)
-{
- /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | reg */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
- *cmd++ = 0x00000000; /* AND value */
- *cmd++ = reg; /* OR address */
-
- /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & and) ROL rol ) | or */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = (rol << 24) | A3XX_CP_SCRATCH_REG2;
- *cmd++ = and; /* AND value */
- *cmd++ = or; /* OR value */
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_CP_SCRATCH_REG2;
- *cmd++ = dest;
-
- return cmd;
-}
-#endif
-
-static void build_regconstantsave_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start;
- unsigned int i;
-
- drawctxt->constant_save_commands[0].hostptr = cmd;
- drawctxt->constant_save_commands[0].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- cmd++;
-
- start = cmd;
-
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
-#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /*
- * Context registers are already shadowed; just need to
- * disable shadowing to prevent corruption.
- */
-
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
- *cmd++ = 4 << 16; /* regs, start=0 */
- *cmd++ = 0x0; /* count = 0 */
-
-#else
- /*
- * Make sure the HW context has the correct register values before
- * reading them.
- */
-
- /* Write context registers into shadow */
- for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
- unsigned int start = context_register_ranges[i * 2];
- unsigned int end = context_register_ranges[i * 2 + 1];
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = ((end - start + 1) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- start;
- *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET)
- & 0xFFFFE000) + (start - 0x2000) * 4;
- }
-#endif
-
- /* Need to handle some of the global registers separately */
- for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = global_registers[i];
- *cmd++ = tmp_ctx.reg_values[i];
- }
-
- /* Save vertex shader constants */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
- *cmd++ = 0x0000FFFF;
- *cmd++ = 3; /* EXEC_COUNT */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- drawctxt->constant_save_commands[1].hostptr = cmd;
- drawctxt->constant_save_commands[1].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- dwords = SP_VS_CTRL_REG1.VSCONSTLENGTH / 4
- src = (HLSQ_SHADOW_BASE + 0x2000) / 4
-
- From register spec:
- SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
- */
- *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
- /* ALU constant shadow base */
- *cmd++ = drawctxt->gpustate.gpuaddr & 0xfffffffc;
-
- /* Save fragment shader constants */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
- *cmd++ = 0x0000FFFF;
- *cmd++ = 3; /* EXEC_COUNT */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- drawctxt->constant_save_commands[2].hostptr = cmd;
- drawctxt->constant_save_commands[2].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- dwords = SP_FS_CTRL_REG1.FSCONSTLENGTH / 4
- src = (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4
-
- From register spec:
- SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
- */
- *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
-
- /*
- From fixup:
-
- base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
- offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
-
- From register spec:
- SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
- start offset in on chip RAM,
- 128bit aligned
-
- dst = base + offset
- Because of the base alignment we can use
- dst = base | offset
- */
- *cmd++ = 0; /* dst */
-
- /* Save VS texture memory objects */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ =
- ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr +
- VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
-
- /* Save VS texture mipmap pointers */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ =
- ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
-
- /* Save VS texture sampler objects */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr +
- VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
-
- /* Save FS texture memory objects */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ =
- ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET + SSIZE) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr +
- FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
-
- /* Save FS texture mipmap pointers */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ =
- ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET + SSIZE) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
-
- /* Save FS texture sampler objects */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ =
- ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
- ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET + SSIZE) / 4);
- *cmd++ =
- (drawctxt->gpustate.gpuaddr +
- FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
-
- /* Create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->regconstant_save, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
- *adreno_dev)
-{
- if (adreno_is_a305(adreno_dev))
- return A305_RBBM_CLOCK_CTL_DEFAULT;
- else if (adreno_is_a320(adreno_dev))
- return A320_RBBM_CLOCK_CTL_DEFAULT;
- else if (adreno_is_a330v2(adreno_dev))
- return A330v2_RBBM_CLOCK_CTL_DEFAULT;
- else if (adreno_is_a330(adreno_dev))
- return A330_RBBM_CLOCK_CTL_DEFAULT;
-
- BUG_ON(1);
-}
-
-/* Copy GMEM contents to system memory shadow. */
-static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt,
- struct gmem_shadow_t *shadow)
-{
- unsigned int *cmds = tmp_ctx.cmd;
- unsigned int *start = cmds;
-
- *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
- *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
-
- /* RB_MODE_CONTROL */
- *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RESOLVE_PASS) |
- _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1) |
- _SET(RB_MODECONTROL_PACKER_TIMER_ENABLE, 1);
- /* RB_RENDER_CONTROL */
- *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
- _SET(RB_RENDERCONTROL_DISABLE_COLOR_PIPE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_RB_COPY_CONTROL);
- /* RB_COPY_CONTROL */
- *cmds++ = _SET(RB_COPYCONTROL_RESOLVE_CLEAR_MODE,
- RB_CLEAR_MODE_RESOLVE) |
- _SET(RB_COPYCONTROL_COPY_GMEM_BASE,
- tmp_ctx.gmem_base >> 14);
- /* RB_COPY_DEST_BASE */
- *cmds++ = _SET(RB_COPYDESTBASE_COPY_DEST_BASE,
- shadow->gmemshadow.gpuaddr >> 5);
- /* RB_COPY_DEST_PITCH */
- *cmds++ = _SET(RB_COPYDESTPITCH_COPY_DEST_PITCH,
- (shadow->pitch * 4) / 32);
- /* RB_COPY_DEST_INFO */
- *cmds++ = _SET(RB_COPYDESTINFO_COPY_DEST_TILE,
- RB_TILINGMODE_LINEAR) |
- _SET(RB_COPYDESTINFO_COPY_DEST_FORMAT, RB_R8G8B8A8_UNORM) |
- _SET(RB_COPYDESTINFO_COPY_COMPONENT_ENABLE, 0X0F) |
- _SET(RB_COPYDESTINFO_COPY_DEST_ENDIAN, RB_ENDIAN_NONE);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
- /* GRAS_SC_CONTROL */
- *cmds++ = _SET(GRAS_SC_CONTROL_RENDER_MODE, 2);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
- /* VFD_CONTROL_0 */
- *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 4) |
- _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
- _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 1) |
- _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 1);
- /* VFD_CONTROL_1 */
- *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 1) |
- _SET(VFD_CTRLREG1_REGID4VTX, 252) |
- _SET(VFD_CTRLREG1_REGID4INST, 252);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
- /* VFD_FETCH_INSTR_0_0 */
- *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
- _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
- _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
- /* VFD_FETCH_INSTR_1_0 */
- *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
- shadow->quad_vertices.gpuaddr);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
- /* VFD_DECODE_INSTR_0 */
- *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
- _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
- _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
- _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
- _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
- /* HLSQ_CONTROL_0_REG */
- *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
- _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
- _SET(HLSQ_CTRL0REG_RESERVED2, 1) |
- _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
- /* HLSQ_CONTROL_1_REG */
- *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
- _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
- /* HLSQ_CONTROL_2_REG */
- *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
- /* HLSQ_CONTROL_3_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
- /* HLSQ_VS_CONTROL_REG */
- *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
- /* HLSQ_FS_CONTROL_REG */
- *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
- _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
- _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 1);
- /* HLSQ_CONST_VSPRESV_RANGE_REG */
- *cmds++ = 0x00000000;
- /* HLSQ_CONST_FSPRESV_RANGE_REQ */
- *cmds++ = _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY, 32) |
- _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY, 32);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
- /* SP_FS_LENGTH_REG */
- *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
- /* SP_SP_CTRL_REG */
- *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
- _SET(SP_SPCTRLREG_LOMODE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
- *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
- /* SP_VS_CTRL_REG0 */
- *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
- _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
- _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
- _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
- _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
- _SET(SP_VSCTRLREG0_VSLENGTH, 1);
- /* SP_VS_CTRL_REG1 */
- *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 4);
- /* SP_VS_PARAM_REG */
- *cmds++ = _SET(SP_VSPARAMREG_PSIZEREGID, 252);
- /* SP_VS_OUT_REG_0 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_1 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_2 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_3 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_4 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_5 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_6 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG_7 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
- *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
- /* SP_VS_VPC_DST_REG_0 */
- *cmds++ = 0x00000000;
- /* SP_VS_VPC_DST_REG_1 */
- *cmds++ = 0x00000000;
- /* SP_VS_VPC_DST_REG_2 */
- *cmds++ = 0x00000000;
- /* SP_VS_VPC_DST_REG_3 */
- *cmds++ = 0x00000000;
- /* SP_VS_OBJ_OFFSET_REG */
- *cmds++ = 0x00000000;
- /* SP_VS_OBJ_START_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
- *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
- /* SP_VS_LENGTH_REG */
- *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
- /* SP_FS_CTRL_REG0 */
- *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
- _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
- _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
- _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
- _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
- _SET(SP_FSCTRLREG0_FSLENGTH, 1);
- /* SP_FS_CTRL_REG1 */
- *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
- _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
- /* SP_FS_OBJ_OFFSET_REG */
- *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
- _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 127);
- /* SP_FS_OBJ_START_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
- /* SP_FS_FLAT_SHAD_MODE_REG_0 */
- *cmds++ = 0x00000000;
- /* SP_FS_FLAT_SHAD_MODE_REG_1 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
- /* SP_FS_OUTPUT_REG */
- *cmds++ = _SET(SP_IMAGEOUTPUTREG_DEPTHOUTMODE, SP_PIXEL_BASED);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
- /* SP_FS_MRT_REG_0 */
- *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
-
- /* SP_FS_MRT_REG_1 */
- *cmds++ = 0x00000000;
- /* SP_FS_MRT_REG_2 */
- *cmds++ = 0x00000000;
- /* SP_FS_MRT_REG_3 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
- *cmds++ = CP_REG(A3XX_VPC_ATTR);
- /* VPC_ATTR */
- *cmds++ = _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
- _SET(VPC_VPCATTR_LMSIZE, 1);
- /* VPC_PACK */
- *cmds++ = 0x00000000;
- /* VPC_VARRYING_INTERUPT_MODE_0 */
- *cmds++ = 0x00000000;
- /* VPC_VARRYING_INTERUPT_MODE_1 */
- *cmds++ = 0x00000000;
- /* VPC_VARRYING_INTERUPT_MODE_2 */
- *cmds++ = 0x00000000;
- /* VPC_VARRYING_INTERUPT_MODE_3 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_PS_REPL_MODE_0 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_PS_REPL_MODE_1 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_PS_REPL_MODE_2 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_PS_REPL_MODE_3 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
- *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
-
- /* (sy)(rpt3)mov.f32f32 r0.y, (r)r1.y; */
- *cmds++ = 0x00000000; *cmds++ = 0x13001000;
- /* end; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
-
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
- *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
-
- /* (sy)(rpt3)mov.f32f32 r0.y, (r)c0.x; */
- *cmds++ = 0x00000000; *cmds++ = 0x30201b00;
- /* end; */
- *cmds++ = 0x00000000; *cmds++ = 0x03000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
-
-
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
- /* RB_MSAA_CONTROL */
- *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
- _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
- /* RB_DEPTH_CONTROL */
- *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
- /* RB_STENCIL_CONTROL */
- *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) |
- _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) |
- _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
- /* GRAS_SU_MODE_CONTROL */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
- /* RB_MRT_CONTROL0 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
- /* RB_MRT_BLEND_CONTROL0 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL1 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
- /* RB_MRT_BLEND_CONTROL1 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL2 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
- /* RB_MRT_BLEND_CONTROL2 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL3 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
- /* RB_MRT_BLEND_CONTROL3 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
- /* VFD_INDEX_MIN */
- *cmds++ = 0x00000000;
- /* VFD_INDEX_MAX */
- *cmds++ = 0x155;
- /* VFD_INSTANCEID_OFFSET */
- *cmds++ = 0x00000000;
- /* VFD_INDEX_OFFSET */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
- /* VFD_VS_THREADING_THRESHOLD */
- *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
- _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
- /* TPL1_TP_VS_TEX_OFFSET */
- *cmds++ = 0;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
- /* TPL1_TP_FS_TEX_OFFSET */
- *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
- _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
- _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
- /* PC_PRIM_VTX_CNTL */
- *cmds++ = _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
- PC_DRAW_TRIANGLES) |
- _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
- PC_DRAW_TRIANGLES) |
- _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
- /* GRAS_SC_WINDOW_SCISSOR_TL */
- *cmds++ = 0x00000000;
- /* GRAS_SC_WINDOW_SCISSOR_BR */
- *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
- _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
- /* GRAS_SC_SCREEN_SCISSOR_TL */
- *cmds++ = 0x00000000;
- /* GRAS_SC_SCREEN_SCISSOR_BR */
- *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
- _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
- /* GRAS_CL_VPORT_XOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_XSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3f800000);
- /* GRAS_CL_VPORT_YOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_YSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3f800000);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
- /* GRAS_CL_VPORT_ZOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_ZSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3f800000);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
- /* GRAS_CL_CLIP_CNTL */
- *cmds++ = _SET(GRAS_CL_CLIP_CNTL_CLIP_DISABLE, 1) |
- _SET(GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE, 1) |
- _SET(GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE, 1) |
- _SET(GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE, 1) |
- _SET(GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_CL_GB_CLIP_ADJ);
- /* GRAS_CL_GB_CLIP_ADJ */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
-
- /* oxili_generate_context_roll_packets */
- *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
- *cmds++ = 0x00000400;
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
- *cmds++ = 0x00000400;
-
- *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */
-
- /* Clear cache invalidate bit when re-loading the shader control regs */
- *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
- *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
- _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
- _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
- _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
- _SET(SP_VSCTRLREG0_VSLENGTH, 1);
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
- *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
- _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
- _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
- _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
- _SET(SP_FSCTRLREG0_FSLENGTH, 1);
-
- *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */
-
- /* end oxili_generate_context_roll_packets */
-
- /*
- * Resolve using two draw calls with a dummy register
- * write in between. This is a HLM workaround
- * that should be removed later.
- */
- *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
- *cmds++ = 0x00000000; /* Viz query info */
- *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
- PC_DI_SRC_SEL_IMMEDIATE,
- PC_DI_INDEX_SIZE_32_BIT,
- PC_DI_IGNORE_VISIBILITY);
- *cmds++ = 0x00000003; /* Num indices */
- *cmds++ = 0x00000000; /* Index 0 */
- *cmds++ = 0x00000001; /* Index 1 */
- *cmds++ = 0x00000002; /* Index 2 */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
- *cmds++ = 0x00000000; /* Viz query info */
- *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
- PC_DI_SRC_SEL_IMMEDIATE,
- PC_DI_INDEX_SIZE_32_BIT,
- PC_DI_IGNORE_VISIBILITY);
- *cmds++ = 0x00000003; /* Num indices */
- *cmds++ = 0x00000002; /* Index 0 */
- *cmds++ = 0x00000001; /* Index 1 */
- *cmds++ = 0x00000003; /* Index 2 */
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- /* Create indirect buffer command for above command sequence */
- create_ib1(drawctxt, shadow->gmem_save, start, cmds);
-
- return cmds;
-}
-static void build_shader_save_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start;
-
- /* Reserve space for boolean values used for COND_EXEC packet */
- drawctxt->cond_execs[0].hostptr = cmd;
- drawctxt->cond_execs[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
- drawctxt->cond_execs[1].hostptr = cmd;
- drawctxt->cond_execs[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
-
- drawctxt->shader_save_commands[0].hostptr = cmd;
- drawctxt->shader_save_commands[0].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
- drawctxt->shader_save_commands[1].hostptr = cmd;
- drawctxt->shader_save_commands[1].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
-
- start = cmd;
-
- /* Save vertex shader */
-
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
- *cmd++ = 0x0000FFFF;
- *cmd++ = 3; /* EXEC_COUNT */
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- drawctxt->shader_save_commands[2].hostptr = cmd;
- drawctxt->shader_save_commands[2].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- dwords = SP_VS_CTRL_REG0.VS_LENGTH * 8
-
- From regspec:
- SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
- If bit31 is 1, it means overflow
- or any long shader.
-
- src = (HLSQ_SHADOW_BASE + 0x1000)/4
- */
- *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
- *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
-
- /* Save fragment shader */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
- *cmd++ = 0x0000FFFF;
- *cmd++ = 3; /* EXEC_COUNT */
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- drawctxt->shader_save_commands[3].hostptr = cmd;
- drawctxt->shader_save_commands[3].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- dwords = SP_FS_CTRL_REG0.FS_LENGTH * 8
-
- From regspec:
- SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
- If bit31 is 1, it means overflow
- or any long shader.
-
- fs_offset = SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC * 32
- From regspec:
-
- SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC [31:25]:
- First instruction of the whole shader will be stored from
- the offset in instruction cache, unit = 256bits, a cache line.
- It can start from 0 if no VS available.
-
- src = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE + fs_offset)/4
- */
- *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
- *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
- + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
-
- /* Create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->shader_save, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-/*
- * Make an IB to modify context save IBs with the correct shader instruction
- * and constant sizes and offsets.
- */
-
-static void build_save_fixup_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start = cmd;
-
- /* Flush HLSQ lazy updates */
- *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
- *cmd++ = 0x7; /* HLSQ_FLUSH */
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
- *cmd++ = 0x00000000; /* No start addr for full invalidate */
- *cmd++ = (unsigned int)
- UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
- UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
- 0; /* No end addr for full invalidate */
-
- /* Make sure registers are flushed */
- *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
- *cmd++ = 0;
-
-#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
-
- /* Save shader sizes */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_VS_CTRL_REG0;
- *cmd++ = drawctxt->shader_save_commands[2].gpuaddr;
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_CTRL_REG0;
- *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
-
- /* Save shader offsets */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
- *cmd++ = drawctxt->shader_save_commands[1].gpuaddr;
-
- /* Save constant sizes */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_VS_CTRL_REG1;
- *cmd++ = drawctxt->constant_save_commands[1].gpuaddr;
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_CTRL_REG1;
- *cmd++ = drawctxt->constant_save_commands[2].gpuaddr;
-
- /* Save FS constant offset */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
- *cmd++ = drawctxt->constant_save_commands[0].gpuaddr;
-
-
- /* Save VS instruction store mode */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_VS_CTRL_REG0;
- *cmd++ = drawctxt->cond_execs[0].gpuaddr;
-
- /* Save FS instruction store mode */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_CTRL_REG0;
- *cmd++ = drawctxt->cond_execs[1].gpuaddr;
-#else
-
- /* Shader save */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
- 11+REG_TO_MEM_LOOP_COUNT_SHIFT,
- (HLSQ_SHADOW_BASE + 0x1000) / 4,
- drawctxt->shader_save_commands[2].gpuaddr);
-
- /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | SP_FS_CTRL_REG0 */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
- *cmd++ = 0x00000000; /* AND value */
- *cmd++ = A3XX_SP_FS_CTRL_REG0; /* OR address */
- /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & 0x7f000000) >> 21 )
- | ((HLSQ_SHADOW_BASE+0x1000+SSIZE)/4) */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = ((11 + REG_TO_MEM_LOOP_COUNT_SHIFT) << 24) |
- A3XX_CP_SCRATCH_REG2;
- *cmd++ = 0x7f000000; /* AND value */
- *cmd++ = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE) / 4; /* OR value */
-
- /*
- * CP_SCRATCH_REG3 = (CP_SCRATCH_REG3 & 0x00000000) |
- * SP_FS_OBJ_OFFSET_REG
- */
-
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG3;
- *cmd++ = 0x00000000; /* AND value */
- *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; /* OR address */
- /*
- * CP_SCRATCH_REG3 = ( (CP_SCRATCH_REG3 & 0xfe000000) >> 25 ) |
- * 0x00000000
- */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = A3XX_CP_SCRATCH_REG3;
- *cmd++ = 0xfe000000; /* AND value */
- *cmd++ = 0x00000000; /* OR value */
- /*
- * CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0xffffffff) | CP_SCRATCH_REG3
- */
- *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
- *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
- *cmd++ = 0xffffffff; /* AND value */
- *cmd++ = A3XX_CP_SCRATCH_REG3; /* OR address */
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_CP_SCRATCH_REG2;
- *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
-
- /* Constant save */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
- 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
- (HLSQ_SHADOW_BASE + 0x2000) / 4,
- drawctxt->constant_save_commands[1].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
- 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
- (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4,
- drawctxt->constant_save_commands[2].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
- 18, drawctxt->gpustate.gpuaddr & 0xfffffe00,
- drawctxt->constant_save_commands[2].gpuaddr
- + sizeof(unsigned int));
-
- /* Modify constant save conditionals */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
- 0, 0, drawctxt->cond_execs[2].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
- 0, 0, drawctxt->cond_execs[3].gpuaddr);
-
- /* Save VS instruction store mode */
-
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x00000002,
- 31, 0, drawctxt->cond_execs[0].gpuaddr);
-
- /* Save FS instruction store mode */
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x00000002,
- 31, 0, drawctxt->cond_execs[1].gpuaddr);
-
-#endif
-
- create_ib1(drawctxt, drawctxt->save_fixup, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-/****************************************************************************/
-/* Functions to build context restore IBs */
-/****************************************************************************/
-
-static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt,
- struct gmem_shadow_t *shadow)
-{
- unsigned int *cmds = tmp_ctx.cmd;
- unsigned int *start = cmds;
-
- *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
- *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
- /* HLSQ_CONTROL_0_REG */
- *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
- _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
- _SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
- _SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
- _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
- /* HLSQ_CONTROL_1_REG */
- *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
- _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
- /* HLSQ_CONTROL_2_REG */
- *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
- /* HLSQ_CONTROL3_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BUF_INFO0);
- /* RB_MRT_BUF_INFO0 */
- *cmds++ = _SET(RB_MRTBUFINFO_COLOR_FORMAT, RB_R8G8B8A8_UNORM) |
- _SET(RB_MRTBUFINFO_COLOR_TILE_MODE, RB_TILINGMODE_32X32) |
- _SET(RB_MRTBUFINFO_COLOR_BUF_PITCH,
- (shadow->gmem_pitch * 4 * 8) / 256);
- /* RB_MRT_BUF_BASE0 */
- *cmds++ = _SET(RB_MRTBUFBASE_COLOR_BUF_BASE, tmp_ctx.gmem_base >> 5);
-
- /* Texture samplers */
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
- *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_TP_TEX_SAMPLERS << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
- *cmds++ = 0x00000240;
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- /* Texture memobjs */
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 6);
- *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_TP_TEX_MEMOBJ << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
- *cmds++ = 0x4cc06880;
- *cmds++ = shadow->height | (shadow->width << 14);
- *cmds++ = (shadow->pitch*4*8) << 9;
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- /* Mipmap bases */
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 16);
- *cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_TP_MIPMAP << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (14 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_TP_MIPMAP_BASE << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
- *cmds++ = shadow->gmemshadow.gpuaddr;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
- /* HLSQ_VS_CONTROL_REG */
- *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
- /* HLSQ_FS_CONTROL_REG */
- *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
- _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
- _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 2);
- /* HLSQ_CONST_VSPRESV_RANGE_REG */
- *cmds++ = 0x00000000;
- /* HLSQ_CONST_FSPRESV_RANGE_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
- /* SP_FS_LENGTH_REG */
- *cmds++ = _SET(SP_SHADERLENGTH_LEN, 2);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
- *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
- /* SP_VS_CTRL_REG0 */
- *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
- _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
- _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
- _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
- _SET(SP_VSCTRLREG0_VSLENGTH, 1);
- /* SP_VS_CTRL_REG1 */
- *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 8);
- /* SP_VS_PARAM_REG */
- *cmds++ = _SET(SP_VSPARAMREG_POSREGID, 4) |
- _SET(SP_VSPARAMREG_PSIZEREGID, 252) |
- _SET(SP_VSPARAMREG_TOTALVSOUTVAR, 1);
- /* SP_VS_OUT_REG0 */
- *cmds++ = _SET(SP_VSOUTREG_COMPMASK0, 3);
- /* SP_VS_OUT_REG1 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG2 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG3 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG4 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG5 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG6 */
- *cmds++ = 0x00000000;
- /* SP_VS_OUT_REG7 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
- *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
- /* SP_VS_VPC_DST_REG0 */
- *cmds++ = _SET(SP_VSVPCDSTREG_OUTLOC0, 8);
- /* SP_VS_VPC_DST_REG1 */
- *cmds++ = 0x00000000;
- /* SP_VS_VPC_DST_REG2 */
- *cmds++ = 0x00000000;
- /* SP_VS_VPC_DST_REG3 */
- *cmds++ = 0x00000000;
- /* SP_VS_OBJ_OFFSET_REG */
- *cmds++ = 0x00000000;
- /* SP_VS_OBJ_START_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
- *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
- /* SP_VS_LENGTH_REG */
- *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
- /* SP_FS_CTRL_REG0 */
- *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
- _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
- _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
- _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
- _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
- _SET(SP_FSCTRLREG0_PIXLODENABLE, 1) |
- _SET(SP_FSCTRLREG0_FSLENGTH, 2);
- /* SP_FS_CTRL_REG1 */
- *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
- _SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) |
- _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
- /* SP_FS_OBJ_OFFSET_REG */
- *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
- _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 126);
- /* SP_FS_OBJ_START_REG */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
- /* SP_FS_FLAT_SHAD_MODE_REG0 */
- *cmds++ = 0x00000000;
- /* SP_FS_FLAT_SHAD_MODE_REG1 */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
- /* SP_FS_OUT_REG */
- *cmds++ = _SET(SP_FSOUTREG_PAD0, SP_PIXEL_BASED);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
- /* SP_FS_MRT_REG0 */
- *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
- /* SP_FS_MRT_REG1 */
- *cmds++ = 0;
- /* SP_FS_MRT_REG2 */
- *cmds++ = 0;
- /* SP_FS_MRT_REG3 */
- *cmds++ = 0;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
- *cmds++ = CP_REG(A3XX_VPC_ATTR);
- /* VPC_ATTR */
- *cmds++ = _SET(VPC_VPCATTR_TOTALATTR, 2) |
- _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
- _SET(VPC_VPCATTR_LMSIZE, 1);
- /* VPC_PACK */
- *cmds++ = _SET(VPC_VPCPACK_NUMFPNONPOSVAR, 2) |
- _SET(VPC_VPCPACK_NUMNONPOSVSVAR, 2);
- /* VPC_VARYING_INTERP_MODE_0 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_INTERP_MODE1 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_INTERP_MODE2 */
- *cmds++ = 0x00000000;
- /* VPC_VARYING_IINTERP_MODE3 */
- *cmds++ = 0x00000000;
- /* VPC_VARRYING_PS_REPL_MODE_0 */
- *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
- /* VPC_VARRYING_PS_REPL_MODE_1 */
- *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
- /* VPC_VARRYING_PS_REPL_MODE_2 */
- *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
- /* VPC_VARRYING_PS_REPL_MODE_3 */
- *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
- _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
- /* SP_SP_CTRL_REG */
- *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
- _SET(SP_SPCTRLREG_LOMODE, 1);
-
- /* Load vertex shader */
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
- *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
- /* (sy)end; */
- *cmds++ = 0x00000000; *cmds++ = 0x13001000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
-
- /* Load fragment shader */
- *cmds++ = cp_type3_packet(CP_LOAD_STATE, 18);
- *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
- | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
- | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
- | (2 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
- *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
- | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
- /* (sy)(rpt1)bary.f (ei)r0.z, (r)0, r0.x; */
- *cmds++ = 0x00002000; *cmds++ = 0x57309902;
- /* (rpt5)nop; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000500;
- /* sam (f32)r0.xyzw, r0.z, s#0, t#0; */
- *cmds++ = 0x00000005; *cmds++ = 0xa0c01f00;
- /* (sy)mov.f32f32 r1.x, r0.x; */
- *cmds++ = 0x00000000; *cmds++ = 0x30040b00;
- /* mov.f32f32 r1.y, r0.y; */
- *cmds++ = 0x00000000; *cmds++ = 0x03000000;
- /* mov.f32f32 r1.z, r0.z; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* mov.f32f32 r1.w, r0.w; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
- /* end; */
- *cmds++ = 0x00000000; *cmds++ = 0x00000000;
-
- *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
- /* VFD_CONTROL_0 */
- *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 8) |
- _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
- _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 2) |
- _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 2);
- /* VFD_CONTROL_1 */
- *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 2) |
- _SET(VFD_CTRLREG1_REGID4VTX, 252) |
- _SET(VFD_CTRLREG1_REGID4INST, 252);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
- /* VFD_FETCH_INSTR_0_0 */
- *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 7) |
- _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 8) |
- _SET(VFD_FETCHINSTRUCTIONS_SWITCHNEXT, 1) |
- _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
- /* VFD_FETCH_INSTR_1_0 */
- *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
- shadow->quad_vertices_restore.gpuaddr);
- /* VFD_FETCH_INSTR_0_1 */
- *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
- _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
- _SET(VFD_FETCHINSTRUCTIONS_INDEXDECODE, 1) |
- _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
- /* VFD_FETCH_INSTR_1_1 */
- *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
- shadow->quad_vertices_restore.gpuaddr + 16);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
- /* VFD_DECODE_INSTR_0 */
- *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
- _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
- _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 1) |
- _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 8) |
- _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1) |
- _SET(VFD_DECODEINSTRUCTIONS_SWITCHNEXT, 1);
- /* VFD_DECODE_INSTR_1 */
- *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
- _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
- _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
- _SET(VFD_DECODEINSTRUCTIONS_REGID, 4) |
- _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
- _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
- /* RB_DEPTH_CONTROL */
- *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_LESS);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
- /* RB_STENCIL_CONTROL */
- *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_ALWAYS) |
- _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_ALWAYS) |
- _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
- /* RB_MODE_CONTROL */
- *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RENDERING_PASS) |
- _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_RENDER_CONTROL);
- /* RB_RENDER_CONTROL */
- *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
- _SET(RB_RENDERCONTROL_ALPHA_TEST_FUNC, 7);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
- /* RB_MSAA_CONTROL */
- *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
- _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
- /* RB_MRT_CONTROL0 */
- *cmds++ = _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
- /* RB_MRT_BLENDCONTROL0 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL1 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
- /* RB_MRT_BLENDCONTROL1 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL2 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
- /* RB_MRT_BLENDCONTROL2 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
- /* RB_MRT_CONTROL3 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
- _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
- /* RB_MRT_BLENDCONTROL3 */
- *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
- _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
- _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
- /* VFD_INDEX_MIN */
- *cmds++ = 0x00000000;
- /* VFD_INDEX_MAX */
- *cmds++ = 340;
- /* VFD_INDEX_OFFSET */
- *cmds++ = 0x00000000;
- /* TPL1_TP_VS_TEX_OFFSET */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
- /* VFD_VS_THREADING_THRESHOLD */
- *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
- _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
- /* TPL1_TP_VS_TEX_OFFSET */
- *cmds++ = 0x00000000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
- /* TPL1_TP_FS_TEX_OFFSET */
- *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
- _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
- _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
- /* GRAS_SC_CONTROL */
- /*cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1);
- *cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1) |*/
- *cmds++ = 0x04001000;
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
- /* GRAS_SU_MODE_CONTROL */
- *cmds++ = _SET(GRAS_SU_CTRLMODE_LINEHALFWIDTH, 2);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
- /* GRAS_SC_WINDOW_SCISSOR_TL */
- *cmds++ = 0x00000000;
- /* GRAS_SC_WINDOW_SCISSOR_BR */
- *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
- _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
- /* GRAS_SC_SCREEN_SCISSOR_TL */
- *cmds++ = 0x00000000;
- /* GRAS_SC_SCREEN_SCISSOR_BR */
- *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
- _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
- *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
- /* GRAS_CL_VPORT_XOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_XSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3F800000);
- /* GRAS_CL_VPORT_YOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_YSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3F800000);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
- *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
- /* GRAS_CL_VPORT_ZOFFSET */
- *cmds++ = 0x00000000;
- /* GRAS_CL_VPORT_ZSCALE */
- *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3F800000);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
- /* GRAS_CL_CLIP_CNTL */
- *cmds++ = _SET(GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER, 1);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_SP_FS_IMAGE_OUTPUT_REG_0);
- /* SP_FS_IMAGE_OUTPUT_REG_0 */
- *cmds++ = _SET(SP_IMAGEOUTPUTREG_MRTFORMAT, SP_R8G8B8A8_UNORM);
-
- *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
- /* PC_PRIM_VTX_CONTROL */
- *cmds++ = _SET(PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC, 2) |
- _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
- PC_DRAW_TRIANGLES) |
- _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
- PC_DRAW_TRIANGLES) |
- _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
-
-
- /* oxili_generate_context_roll_packets */
- *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
- *cmds++ = 0x00000400;
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
- *cmds++ = 0x00000400;
-
- *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */
-
- /* Clear cache invalidate bit when re-loading the shader control regs */
- *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
- *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
- _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
- _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
- _SET(SP_VSCTRLREG0_VSLENGTH, 1);
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
- *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
- _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
- _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
- _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
- _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
- _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
- _SET(SP_FSCTRLREG0_FSLENGTH, 2);
-
- *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */
-
- *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
- *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */
-
- /* end oxili_generate_context_roll_packets */
-
- *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
- *cmds++ = 0x00000000; /* Viz query info */
- *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST,
- PC_DI_SRC_SEL_AUTO_INDEX,
- PC_DI_INDEX_SIZE_16_BIT,
- PC_DI_IGNORE_VISIBILITY);
- *cmds++ = 0x00000002; /* Num indices */
-
- /* Create indirect buffer command for above command sequence */
- create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
-
- return cmds;
-}
-
-
-static void build_regrestore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *start = tmp_ctx.cmd;
- unsigned int *cmd = start;
- unsigned int *lcc_start;
-
- int i;
-
- /* Flush HLSQ lazy updates */
- *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
- *cmd++ = 0x7; /* HLSQ_FLUSH */
- *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmd++ = 0;
-
- *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
- *cmd++ = 0x00000000; /* No start addr for full invalidate */
- *cmd++ = (unsigned int)
- UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
- UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
- 0; /* No end addr for full invalidate */
-
- lcc_start = cmd;
-
- /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
- cmd++;
-
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /* Force mismatch */
- *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
-#else
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
-#endif
-
- for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
- cmd = reg_range(cmd, context_register_ranges[i * 2],
- context_register_ranges[i * 2 + 1]);
- }
-
- lcc_start[0] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT,
- (cmd - lcc_start) - 1);
-
-#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- lcc_start[2] |= (0 << 24) | (4 << 16); /* Disable shadowing. */
-#else
- lcc_start[2] |= (1 << 24) | (4 << 16);
-#endif
-
- for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
- *cmd++ = cp_type0_packet(global_registers[i], 1);
- tmp_ctx.reg_values[i] = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0x00000000;
- }
-
- create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
- tmp_ctx.cmd = cmd;
-}
-
-static void build_constantrestore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start = cmd;
- unsigned int mode = 4; /* Indirect mode */
- unsigned int stateblock;
- unsigned int numunits;
- unsigned int statetype;
-
- drawctxt->cond_execs[2].hostptr = cmd;
- drawctxt->cond_execs[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
- drawctxt->cond_execs[3].hostptr = cmd;
- drawctxt->cond_execs[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
-
-#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
- *cmd++ = 4 << 16;
- *cmd++ = 0x0;
-#endif
- /* HLSQ full update */
- *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
- *cmd++ = 0x68000240; /* A3XX_HLSQ_CONTROL_0_REG */
-
-#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
- /* Re-enable shadowing */
- *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
- *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
- *cmd++ = (4 << 16) | (1 << 24);
- *cmd++ = 0x0;
-#endif
-
- /* Load vertex shader constants */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
- *cmd++ = 0x0000ffff;
- *cmd++ = 3; /* EXEC_COUNT */
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- drawctxt->constant_load_commands[0].hostptr = cmd;
- drawctxt->constant_load_commands[0].gpuaddr = virt2gpu(cmd,
- &drawctxt->gpustate);
-
- /*
- From fixup:
-
- mode = 4 (indirect)
- stateblock = 4 (Vertex constants)
- numunits = SP_VS_CTRL_REG1.VSCONSTLENGTH * 2; (256bit units)
-
- From register spec:
- SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
-
- ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
- */
-
- *cmd++ = 0; /* ord1 */
- *cmd++ = ((drawctxt->gpustate.gpuaddr) & 0xfffffffc) | 1;
-
- /* Load fragment shader constants */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
- *cmd++ = 0x0000ffff;
- *cmd++ = 3; /* EXEC_COUNT */
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- drawctxt->constant_load_commands[1].hostptr = cmd;
- drawctxt->constant_load_commands[1].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- mode = 4 (indirect)
- stateblock = 6 (Fragment constants)
- numunits = SP_FS_CTRL_REG1.FSCONSTLENGTH * 2; (256bit units)
-
- From register spec:
- SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
-
- ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
- */
-
- *cmd++ = 0; /* ord1 */
- drawctxt->constant_load_commands[2].hostptr = cmd;
- drawctxt->constant_load_commands[2].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
- base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
- offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
-
- From register spec:
- SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
- start offset in on chip RAM,
- 128bit aligned
-
- ord2 = base + offset | 1
- Because of the base alignment we can use
- ord2 = base | offset | 1
- */
- *cmd++ = 0; /* ord2 */
-
- /* Restore VS texture memory objects */
- stateblock = 0;
- statetype = 1;
- numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
-
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS)
- & 0xfffffffc) | statetype;
-
- /* Restore VS texture mipmap addresses */
- stateblock = 1;
- statetype = 1;
- numunits = TEX_SIZE_MIPMAP / 4;
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP)
- & 0xfffffffc) | statetype;
-
- /* Restore VS texture sampler objects */
- stateblock = 0;
- statetype = 0;
- numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ)
- & 0xfffffffc) | statetype;
-
- /* Restore FS texture memory objects */
- stateblock = 2;
- statetype = 1;
- numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS)
- & 0xfffffffc) | statetype;
-
- /* Restore FS texture mipmap addresses */
- stateblock = 3;
- statetype = 1;
- numunits = TEX_SIZE_MIPMAP / 4;
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP)
- & 0xfffffffc) | statetype;
-
- /* Restore FS texture sampler objects */
- stateblock = 2;
- statetype = 0;
- numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
- *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ)
- & 0xfffffffc) | statetype;
-
- create_ib1(drawctxt, drawctxt->constant_restore, start, cmd);
- tmp_ctx.cmd = cmd;
-}
-
-static void build_shader_restore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start = cmd;
-
- /* Vertex shader */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
- *cmd++ = 1;
- *cmd++ = 3; /* EXEC_COUNT */
-
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- drawctxt->shader_load_commands[0].hostptr = cmd;
- drawctxt->shader_load_commands[0].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- mode = 4 (indirect)
- stateblock = 4 (Vertex shader)
- numunits = SP_VS_CTRL_REG0.VS_LENGTH
-
- From regspec:
- SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
- If bit31 is 1, it means overflow
- or any long shader.
-
- ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
- */
- *cmd++ = 0; /*ord1 */
- *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
-
- /* Fragment shader */
- *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
- *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
- *cmd++ = 1;
- *cmd++ = 3; /* EXEC_COUNT */
-
- *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
- drawctxt->shader_load_commands[1].hostptr = cmd;
- drawctxt->shader_load_commands[1].gpuaddr =
- virt2gpu(cmd, &drawctxt->gpustate);
- /*
- From fixup:
-
- mode = 4 (indirect)
- stateblock = 6 (Fragment shader)
- numunits = SP_FS_CTRL_REG0.FS_LENGTH
-
- From regspec:
- SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
- If bit31 is 1, it means overflow
- or any long shader.
-
- ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
- */
- *cmd++ = 0; /*ord1 */
- *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
- + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
-
- create_ib1(drawctxt, drawctxt->shader_restore, start, cmd);
- tmp_ctx.cmd = cmd;
-}
-
-static void build_hlsqcontrol_restore_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start = cmd;
-
- *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
- *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
- drawctxt->hlsqcontrol_restore_commands[0].hostptr = cmd;
- drawctxt->hlsqcontrol_restore_commands[0].gpuaddr
- = virt2gpu(cmd, &drawctxt->gpustate);
- *cmd++ = 0;
-
- /* Create indirect buffer command for above command sequence */
- create_ib1(drawctxt, drawctxt->hlsqcontrol_restore, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-/* IB that modifies the shader and constant sizes and offsets in restore IBs. */
-static void build_restore_fixup_cmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- unsigned int *cmd = tmp_ctx.cmd;
- unsigned int *start = cmd;
-
-#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
- /* Save shader sizes */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_VS_CTRL_REG0;
- *cmd++ = drawctxt->shader_load_commands[0].gpuaddr;
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_CTRL_REG0;
- *cmd++ = drawctxt->shader_load_commands[1].gpuaddr;
-
- /* Save constant sizes */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_VS_CTRL_REG1;
- *cmd++ = drawctxt->constant_load_commands[0].gpuaddr;
-
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_CTRL_REG1;
- *cmd++ = drawctxt->constant_load_commands[1].gpuaddr;
-
- /* Save constant offsets */
- *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
- *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
- *cmd++ = drawctxt->constant_load_commands[2].gpuaddr;
-#else
- /* Save shader sizes */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
- 30, (4 << 19) | (4 << 16),
- drawctxt->shader_load_commands[0].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x7f000000,
- 30, (6 << 19) | (4 << 16),
- drawctxt->shader_load_commands[1].gpuaddr);
-
- /* Save constant sizes */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
- 23, (4 << 19) | (4 << 16),
- drawctxt->constant_load_commands[0].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
- 23, (6 << 19) | (4 << 16),
- drawctxt->constant_load_commands[1].gpuaddr);
-
- /* Modify constant restore conditionals */
- cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
- 0, 0, drawctxt->cond_execs[2].gpuaddr);
-
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
- 0, 0, drawctxt->cond_execs[3].gpuaddr);
-
- /* Save fragment constant shadow offset */
- cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
- 18, (drawctxt->gpustate.gpuaddr & 0xfffffe00) | 1,
- drawctxt->constant_load_commands[2].gpuaddr);
-#endif
-
- /* Use mask value to avoid flushing HLSQ which would cause the HW to
- discard all the shader data */
-
- cmd = rmw_regtomem(cmd, A3XX_HLSQ_CONTROL_0_REG, 0x9ffffdff,
- 0, 0, drawctxt->hlsqcontrol_restore_commands[0].gpuaddr);
-
- create_ib1(drawctxt, drawctxt->restore_fixup, start, cmd);
-
- tmp_ctx.cmd = cmd;
-}
-
-static int a3xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
-
- build_regrestore_cmds(adreno_dev, drawctxt);
- build_constantrestore_cmds(adreno_dev, drawctxt);
- build_hlsqcontrol_restore_cmds(adreno_dev, drawctxt);
- build_regconstantsave_cmds(adreno_dev, drawctxt);
- build_shader_save_cmds(adreno_dev, drawctxt);
- build_shader_restore_cmds(adreno_dev, drawctxt);
- build_restore_fixup_cmds(adreno_dev, drawctxt);
- build_save_fixup_cmds(adreno_dev, drawctxt);
-
- return 0;
-}
-
-/* create buffers for saving/restoring registers, constants, & GMEM */
-static int a3xx_create_gmem_shadow(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- int result;
-
- calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
- tmp_ctx.gmem_base = adreno_dev->gmem_base;
-
- result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
- drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
-
- if (result)
- return result;
-
- build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
- &tmp_ctx.cmd);
-
- tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt,
- &drawctxt->context_gmem_shadow);
- tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt,
- &drawctxt->context_gmem_shadow);
-
- kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
- KGSL_CACHE_OP_FLUSH);
-
- drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
-
- return 0;
-}
-
-static int a3xx_drawctxt_create(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- int ret;
-
- /*
- * Allocate memory for the GPU state and the context commands.
- * Despite the name, this is much more then just storage for
- * the gpustate. This contains command space for gmem save
- * and texture and vertex buffer storage too
- */
-
- ret = kgsl_allocate(&drawctxt->gpustate,
- drawctxt->pagetable, CONTEXT_SIZE);
-
- if (ret)
- return ret;
-
- kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
- tmp_ctx.cmd = drawctxt->gpustate.hostptr + CMD_OFFSET;
-
- if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
- ret = a3xx_create_gpustate_shadow(adreno_dev, drawctxt);
- if (ret)
- goto done;
-
- drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
- }
-
- if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC))
- ret = a3xx_create_gmem_shadow(adreno_dev, drawctxt);
-
-done:
- if (ret)
- kgsl_sharedmem_free(&drawctxt->gpustate);
-
- return ret;
-}
-
-static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
- struct adreno_context *context)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
-
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
-
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- /* Fixup self modifying IBs for save operations */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
-
- /* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->regconstant_save, 3);
-
- if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
- /* Save shader instructions */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
-
- context->flags |= CTXT_FLAGS_SHADER_RESTORE;
- }
- }
-
- if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
- (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
- /*
- * Save GMEM (note: changes shader. shader must
- * already be saved.)
- */
-
- kgsl_cffdump_syncmem(NULL,
- &context->gpustate,
- context->context_gmem_shadow.gmem_save[1],
- context->context_gmem_shadow.gmem_save[2] << 2, true);
-
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE,
- context->context_gmem_shadow.
- gmem_save, 3);
- context->flags |= CTXT_FLAGS_GMEM_RESTORE;
- }
-}
-
-static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
- struct adreno_context *context)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int cmds[5];
-
- if (context == NULL) {
- /* No context - set the default pagetable and thats it */
- kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
- adreno_dev->drawctxt_active->id);
- return;
- }
-
- KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
-
- cmds[0] = cp_nop_packet(1);
- cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
- cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
- cmds[3] = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
- cmds[4] = context->id;
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
- cmds, 5);
- kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
-
- /*
- * Restore GMEM. (note: changes shader.
- * Shader must not already be restored.)
- */
-
- if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
- kgsl_cffdump_syncmem(NULL,
- &context->gpustate,
- context->context_gmem_shadow.gmem_restore[1],
- context->context_gmem_shadow.gmem_restore[2] << 2,
- true);
-
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE,
- context->context_gmem_shadow.
- gmem_restore, 3);
- context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
- }
-
- if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
-
- /* Fixup self modifying IBs for restore operations */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->restore_fixup, 3);
-
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->constant_restore, 3);
-
- if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->shader_restore, 3);
-
- /* Restore HLSQ_CONTROL_0 register */
- adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE,
- context->hlsqcontrol_restore, 3);
- }
-}
-
-static int a3xx_rb_init(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- unsigned int *cmds, cmds_gpu;
- cmds = adreno_ringbuffer_allocspace(rb, NULL, 18);
- if (cmds == NULL)
- return -ENOMEM;
-
- cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
-
- GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17));
- GSL_RB_WRITE(cmds, cmds_gpu, 0x000003f7);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000080);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000100);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000180);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00006600);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000150);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x0000014e);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000154);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- /* Enable protected mode registers for A3XX */
- GSL_RB_WRITE(cmds, cmds_gpu, 0x20000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
- adreno_ringbuffer_submit(rb);
-
- return 0;
-}
-
-static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- const char *err = "";
-
- switch (bit) {
- case A3XX_INT_RBBM_AHB_ERROR: {
- unsigned int reg;
-
- adreno_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, &reg);
-
- /*
- * Return the word address of the erroring register so that it
- * matches the register specification
- */
-
- KGSL_DRV_CRIT(device,
- "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
- reg & (1 << 28) ? "WRITE" : "READ",
- (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
- (reg >> 24) & 0x3);
-
- /* Clear the error */
- adreno_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
- return;
- }
- case A3XX_INT_RBBM_REG_TIMEOUT:
- err = "RBBM: AHB register timeout";
- break;
- case A3XX_INT_RBBM_ME_MS_TIMEOUT:
- err = "RBBM: ME master split timeout";
- break;
- case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
- err = "RBBM: PFP master split timeout";
- break;
- case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
- err = "RBBM: ATB bus oveflow";
- break;
- case A3XX_INT_VFD_ERROR:
- err = "VFD: Out of bounds access";
- break;
- case A3XX_INT_CP_T0_PACKET_IN_IB:
- err = "ringbuffer TO packet in IB interrupt";
- break;
- case A3XX_INT_CP_OPCODE_ERROR:
- err = "ringbuffer opcode error interrupt";
- break;
- case A3XX_INT_CP_RESERVED_BIT_ERROR:
- err = "ringbuffer reserved bit error interrupt";
- break;
- case A3XX_INT_CP_HW_FAULT:
- err = "ringbuffer hardware fault";
- break;
- case A3XX_INT_CP_REG_PROTECT_FAULT: {
- unsigned int reg;
- kgsl_regread(device, A3XX_CP_PROTECT_STATUS, &reg);
-
- KGSL_DRV_CRIT(device,
- "CP | Protected mode error| %s | addr=%x\n",
- reg & (1 << 24) ? "WRITE" : "READ",
- (reg & 0x1FFFF) >> 2);
- return;
- }
- case A3XX_INT_CP_AHB_ERROR_HALT:
- err = "ringbuffer AHB error interrupt";
- break;
- case A3XX_INT_MISC_HANG_DETECT:
- err = "MISC: GPU hang detected";
- break;
- case A3XX_INT_UCHE_OOB_ACCESS:
- err = "UCHE: Out of bounds access";
- break;
- }
-
- KGSL_DRV_CRIT(device, "%s\n", err);
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
-}
-
-static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- /* Wake up everybody waiting for the interrupt */
- wake_up_interruptible_all(&device->wait_queue);
-
- /* Schedule work to free mem and issue ibs */
- queue_work(device->work_queue, &device->ts_expired_ws);
-}
-
-/**
- * struct a3xx_perfcounter_register - Define a performance counter register
- * @load_bit: the bit to set in RBBM_LOAD_CMD0/RBBM_LOAD_CMD1 to force the RBBM
- * to load the reset value into the appropriate counter
- * @select: The dword offset of the register to write the selected
- * countable into
- */
-
-struct a3xx_perfcounter_register {
- unsigned int load_bit;
- unsigned int select;
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_cp[] = {
- { 0, A3XX_CP_PERFCOUNTER_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rbbm[] = {
- { 1, A3XX_RBBM_PERFCOUNTER0_SELECT },
- { 2, A3XX_RBBM_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_pc[] = {
- { 3, A3XX_PC_PERFCOUNTER0_SELECT },
- { 4, A3XX_PC_PERFCOUNTER1_SELECT },
- { 5, A3XX_PC_PERFCOUNTER2_SELECT },
- { 6, A3XX_PC_PERFCOUNTER3_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vfd[] = {
- { 7, A3XX_VFD_PERFCOUNTER0_SELECT },
- { 8, A3XX_VFD_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_hlsq[] = {
- { 9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
- { 10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
- { 11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
- { 12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
- { 13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
- { 14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vpc[] = {
- { 15, A3XX_VPC_PERFCOUNTER0_SELECT },
- { 16, A3XX_VPC_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tse[] = {
- { 17, A3XX_GRAS_PERFCOUNTER0_SELECT },
- { 18, A3XX_GRAS_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_ras[] = {
- { 19, A3XX_GRAS_PERFCOUNTER2_SELECT },
- { 20, A3XX_GRAS_PERFCOUNTER3_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_uche[] = {
- { 21, A3XX_UCHE_PERFCOUNTER0_SELECT },
- { 22, A3XX_UCHE_PERFCOUNTER1_SELECT },
- { 23, A3XX_UCHE_PERFCOUNTER2_SELECT },
- { 24, A3XX_UCHE_PERFCOUNTER3_SELECT },
- { 25, A3XX_UCHE_PERFCOUNTER4_SELECT },
- { 26, A3XX_UCHE_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tp[] = {
- { 27, A3XX_TP_PERFCOUNTER0_SELECT },
- { 28, A3XX_TP_PERFCOUNTER1_SELECT },
- { 29, A3XX_TP_PERFCOUNTER2_SELECT },
- { 30, A3XX_TP_PERFCOUNTER3_SELECT },
- { 31, A3XX_TP_PERFCOUNTER4_SELECT },
- { 32, A3XX_TP_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_sp[] = {
- { 33, A3XX_SP_PERFCOUNTER0_SELECT },
- { 34, A3XX_SP_PERFCOUNTER1_SELECT },
- { 35, A3XX_SP_PERFCOUNTER2_SELECT },
- { 36, A3XX_SP_PERFCOUNTER3_SELECT },
- { 37, A3XX_SP_PERFCOUNTER4_SELECT },
- { 38, A3XX_SP_PERFCOUNTER5_SELECT },
- { 39, A3XX_SP_PERFCOUNTER6_SELECT },
- { 40, A3XX_SP_PERFCOUNTER7_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rb[] = {
- { 41, A3XX_RB_PERFCOUNTER0_SELECT },
- { 42, A3XX_RB_PERFCOUNTER1_SELECT },
-};
-
-#define REGCOUNTER_GROUP(_x) { (_x), ARRAY_SIZE((_x)) }
-
-static struct {
- struct a3xx_perfcounter_register *regs;
- int count;
-} a3xx_perfcounter_reglist[] = {
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_cp),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_rbbm),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_pc),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_vfd),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_hlsq),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_vpc),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_tse),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_ras),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_uche),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_tp),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_sp),
- REGCOUNTER_GROUP(a3xx_perfcounter_reg_rb),
-};
-
-static void a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
- unsigned int countable)
-{
- unsigned int in, out;
-
- adreno_regread(device, A3XX_RBBM_RBBM_CTL, &in);
-
- if (countable == 0)
- out = in | RBBM_RBBM_CTL_RESET_PWR_CTR0;
- else
- out = in | RBBM_RBBM_CTL_RESET_PWR_CTR1;
-
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
-
- if (countable == 0)
- out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR0;
- else
- out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
-
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
-
- return;
-}
-
-static void a3xx_perfcounter_enable_vbif(struct kgsl_device *device,
- unsigned int counter,
- unsigned int countable)
-{
- unsigned int in, out, bit, sel;
-
- if (counter > 1 || countable > 0x7f)
- return;
-
- adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
- adreno_regread(device, A3XX_VBIF_PERF_CNT_SEL, &sel);
-
- if (counter == 0) {
- bit = VBIF_PERF_CNT_0;
- sel = (sel & ~VBIF_PERF_CNT_0_SEL_MASK) | countable;
- } else {
- bit = VBIF_PERF_CNT_1;
- sel = (sel & ~VBIF_PERF_CNT_1_SEL_MASK)
- | (countable << VBIF_PERF_CNT_1_SEL);
- }
-
- out = in | bit;
-
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_SEL, sel);
-
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
-
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
-}
-
-static void a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device,
- unsigned int countable)
-{
- unsigned int in, out, bit;
-
- adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
- if (countable == 0)
- bit = VBIF_PERF_PWR_CNT_0;
- else if (countable == 1)
- bit = VBIF_PERF_PWR_CNT_1;
- else
- bit = VBIF_PERF_PWR_CNT_2;
-
- out = in | bit;
-
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
-
- adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
-}
-
-/*
- * a3xx_perfcounter_enable - Configure a performance counter for a countable
- * @adreno_dev - Adreno device to configure
- * @group - Desired performance counter group
- * @counter - Desired performance counter in the group
- * @countable - Desired countable
- *
- * Physically set up a counter within a group with the desired countable
- */
-
-static void a3xx_perfcounter_enable(struct adreno_device *adreno_dev,
- unsigned int group, unsigned int counter, unsigned int countable)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int val = 0;
- struct a3xx_perfcounter_register *reg;
-
- if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
- return;
-
- if (counter >= a3xx_perfcounter_reglist[group].count)
- return;
-
- /* Special cases */
- if (group == KGSL_PERFCOUNTER_GROUP_PWR)
- return a3xx_perfcounter_enable_pwr(device, countable);
- else if (group == KGSL_PERFCOUNTER_GROUP_VBIF)
- return a3xx_perfcounter_enable_vbif(device, counter, countable);
- else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
- return a3xx_perfcounter_enable_vbif_pwr(device, countable);
-
- reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
-
- /* Select the desired perfcounter */
- adreno_regwrite(device, reg->select, countable);
-
- if (reg->load_bit < 32) {
- val = 1 << reg->load_bit;
- adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
- } else {
- val = 1 << (reg->load_bit - 32);
- adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
- }
-}
-
-static uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev,
- unsigned int group, unsigned int counter,
- unsigned int offset)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct a3xx_perfcounter_register *reg = NULL;
- unsigned int lo = 0, hi = 0;
- unsigned int val;
-
- if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
- return 0;
-
- if (counter >= a3xx_perfcounter_reglist[group].count)
- return 0;
-
- reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
-
- /* Freeze the counter */
- adreno_regread(device, A3XX_RBBM_PERFCTR_CTL, &val);
- val &= ~reg->load_bit;
- adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val);
-
- /* Read the values */
- adreno_regread(device, offset, &lo);
- adreno_regread(device, offset + 1, &hi);
-
- /* Re-Enable the counter */
- val |= reg->load_bit;
- adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val);
-
- return (((uint64_t) hi) << 32) | lo;
-}
-
-#define A3XX_IRQ_CALLBACK(_c) { .func = _c }
-
-#define A3XX_INT_MASK \
- ((1 << A3XX_INT_RBBM_AHB_ERROR) | \
- (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
- (1 << A3XX_INT_CP_T0_PACKET_IN_IB) | \
- (1 << A3XX_INT_CP_OPCODE_ERROR) | \
- (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \
- (1 << A3XX_INT_CP_HW_FAULT) | \
- (1 << A3XX_INT_CP_IB1_INT) | \
- (1 << A3XX_INT_CP_IB2_INT) | \
- (1 << A3XX_INT_CP_RB_INT) | \
- (1 << A3XX_INT_CP_REG_PROTECT_FAULT) | \
- (1 << A3XX_INT_CP_AHB_ERROR_HALT) | \
- (1 << A3XX_INT_UCHE_OOB_ACCESS))
-
-static struct {
- void (*func)(struct adreno_device *, int);
-} a3xx_irq_funcs[] = {
- A3XX_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 1 - RBBM_AHB_ERROR */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 2 - RBBM_REG_TIMEOUT */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 3 - RBBM_ME_MS_TIMEOUT */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 4 - RBBM_PFP_MS_TIMEOUT */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 5 - RBBM_ATB_BUS_OVERFLOW */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 6 - RBBM_VFD_ERROR */
- A3XX_IRQ_CALLBACK(NULL), /* 7 - CP_SW */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 8 - CP_T0_PACKET_IN_IB */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 9 - CP_OPCODE_ERROR */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 10 - CP_RESERVED_BIT_ERROR */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 11 - CP_HW_FAULT */
- A3XX_IRQ_CALLBACK(NULL), /* 12 - CP_DMA */
- A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 13 - CP_IB2_INT */
- A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 14 - CP_IB1_INT */
- A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 15 - CP_RB_INT */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 16 - CP_REG_PROTECT_FAULT */
- A3XX_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
- A3XX_IRQ_CALLBACK(NULL), /* 18 - CP_VS_DONE_TS */
- A3XX_IRQ_CALLBACK(NULL), /* 19 - CP_PS_DONE_TS */
- A3XX_IRQ_CALLBACK(NULL), /* 20 - CP_CACHE_FLUSH_TS */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 21 - CP_AHB_ERROR_FAULT */
- A3XX_IRQ_CALLBACK(NULL), /* 22 - Unused */
- A3XX_IRQ_CALLBACK(NULL), /* 23 - Unused */
- A3XX_IRQ_CALLBACK(NULL), /* 24 - MISC_HANG_DETECT */
- A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 25 - UCHE_OOB_ACCESS */
- /* 26 to 31 - Unused */
-};
-
-static irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- irqreturn_t ret = IRQ_NONE;
- unsigned int status, tmp;
- int i;
-
- adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
-
- for (tmp = status, i = 0; tmp && i < ARRAY_SIZE(a3xx_irq_funcs); i++) {
- if (tmp & 1) {
- if (a3xx_irq_funcs[i].func != NULL) {
- a3xx_irq_funcs[i].func(adreno_dev, i);
- ret = IRQ_HANDLED;
- } else {
- KGSL_DRV_CRIT(device,
- "Unhandled interrupt bit %x\n", i);
- }
- }
-
- tmp >>= 1;
- }
-
- trace_kgsl_a3xx_irq_status(device, status);
-
- if (status)
- adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD,
- status);
- return ret;
-}
-
-static void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- if (state)
- adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
- else
- adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
-}
-
-static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
-{
- unsigned int status;
-
- adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
-
- return (status & A3XX_INT_MASK) ? 1 : 0;
-}
-
-static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- unsigned int val;
- unsigned int ret = 0;
-
- /* Read the value */
- adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
-
- /* Return 0 for the first read */
- if (adreno_dev->gpu_cycles != 0) {
- if (val < adreno_dev->gpu_cycles)
- ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val;
- else
- ret = val - adreno_dev->gpu_cycles;
- }
-
- adreno_dev->gpu_cycles = val;
- return ret;
-}
-
-struct a3xx_vbif_data {
- unsigned int reg;
- unsigned int val;
-};
-
-/* VBIF registers start after 0x3000 so use 0x0 as end of list marker */
-static struct a3xx_vbif_data a305_vbif[] = {
- /* Set up 16 deep read/write request queues */
- { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
- { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
- { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
- /* Enable WR-REQ */
- { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
- /* Set up round robin arbitration between both AXI ports */
- { A3XX_VBIF_ARB_CTL, 0x00000030 },
- /* Set up AOOO */
- { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
- { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
- {0, 0},
-};
-
-static struct a3xx_vbif_data a320_vbif[] = {
- /* Set up 16 deep read/write request queues */
- { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
- { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
- { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
- { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
- /* Enable WR-REQ */
- { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
- /* Set up round robin arbitration between both AXI ports */
- { A3XX_VBIF_ARB_CTL, 0x00000030 },
- /* Set up AOOO */
- { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
- { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
- /* Enable 1K sort */
- { A3XX_VBIF_ABIT_SORT, 0x000000FF },
- { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
- {0, 0},
-};
-
-static struct a3xx_vbif_data a330_vbif[] = {
- /* Set up 16 deep read/write request queues */
- { A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
- { A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818 },
- { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818 },
- { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818 },
- { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
- { A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
- { A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818 },
- /* Enable WR-REQ */
- { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
- /* Set up round robin arbitration between both AXI ports */
- { A3XX_VBIF_ARB_CTL, 0x00000030 },
- /* Set up VBIF_ROUND_ROBIN_QOS_ARB */
- { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001 },
- /* Set up AOOO */
- { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003F },
- { A3XX_VBIF_OUT_AXI_AOOO, 0x003F003F },
- /* Enable 1K sort */
- { A3XX_VBIF_ABIT_SORT, 0x0001003F },
- { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
- /* Disable VBIF clock gating. This is to enable AXI running
- * higher frequency than GPU.
- */
- { A3XX_VBIF_CLKON, 1 },
- {0, 0},
-};
-
-/*
- * Most of the VBIF registers on 8974v2 have the correct values at power on, so
- * we won't modify those if we don't need to
- */
-static struct a3xx_vbif_data a330v2_vbif[] = {
- /* Enable 1k sort */
- { A3XX_VBIF_ABIT_SORT, 0x0001003F },
- { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
- /* Enable WR-REQ */
- { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
- { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
- /* Set up VBIF_ROUND_ROBIN_QOS_ARB */
- { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
- /* Disable VBIF clock gating. This is to enable AXI running
- * higher frequency than GPU.
- */
- { A3XX_VBIF_CLKON, 1 },
- {0, 0},
-};
-
-static struct {
- int(*devfunc)(struct adreno_device *);
- struct a3xx_vbif_data *vbif;
-} a3xx_vbif_platforms[] = {
- { adreno_is_a305, a305_vbif },
- { adreno_is_a320, a320_vbif },
- /* A330v2 needs to be ahead of A330 so the right device matches */
- { adreno_is_a330v2, a330v2_vbif },
- { adreno_is_a330, a330_vbif },
-};
-
-static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
-{
- /*
- * Set SP to count SP_ALU_ACTIVE_CYCLES, it includes
- * all ALU instruction execution regardless precision or shader ID.
- * Set SP to count SP0_ICL1_MISSES, It counts
- * USP L1 instruction miss request.
- * Set SP to count SP_FS_FULL_ALU_INSTRUCTIONS, it
- * counts USP flow control instruction execution.
- * we will use this to augment our hang detection
- */
- if (adreno_dev->fast_hang_detect) {
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6],
- PERFCOUNTER_FLAG_KERNEL);
- ft_detect_regs[7] = ft_detect_regs[6] + 1;
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP0_ICL1_MISSES, &ft_detect_regs[8],
- PERFCOUNTER_FLAG_KERNEL);
- ft_detect_regs[9] = ft_detect_regs[8] + 1;
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10],
- PERFCOUNTER_FLAG_KERNEL);
- ft_detect_regs[11] = ft_detect_regs[10] + 1;
- }
-
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
- SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL);
-
- /* Reserve and start countable 1 in the PWR perfcounter group */
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
- NULL, PERFCOUNTER_FLAG_KERNEL);
-}
-
-/**
- * a3xx_protect_init() - Initializes register protection on a3xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
-static void a3xx_protect_init(struct kgsl_device *device)
-{
- int index = 0;
-
- /* enable access protection to privileged registers */
- kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
-
- /* RBBM registers */
- adreno_set_protected_registers(device, &index, 0x18, 0);
- adreno_set_protected_registers(device, &index, 0x20, 2);
- adreno_set_protected_registers(device, &index, 0x33, 0);
- adreno_set_protected_registers(device, &index, 0x42, 0);
- adreno_set_protected_registers(device, &index, 0x50, 4);
- adreno_set_protected_registers(device, &index, 0x63, 0);
- adreno_set_protected_registers(device, &index, 0x100, 4);
-
- /* CP registers */
- adreno_set_protected_registers(device, &index, 0x1C0, 5);
- adreno_set_protected_registers(device, &index, 0x1EC, 1);
- adreno_set_protected_registers(device, &index, 0x1F6, 1);
- adreno_set_protected_registers(device, &index, 0x1F8, 2);
- adreno_set_protected_registers(device, &index, 0x45E, 2);
- adreno_set_protected_registers(device, &index, 0x460, 4);
-
- /* RB registers */
- adreno_set_protected_registers(device, &index, 0xCC0, 0);
-
- /* VBIF registers */
- adreno_set_protected_registers(device, &index, 0x3000, 6);
-
- /* SMMU registers */
- adreno_set_protected_registers(device, &index, 0x4000, 14);
-}
-
-static void a3xx_start(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct a3xx_vbif_data *vbif = NULL;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(a3xx_vbif_platforms); i++) {
- if (a3xx_vbif_platforms[i].devfunc(adreno_dev)) {
- vbif = a3xx_vbif_platforms[i].vbif;
- break;
- }
- }
-
- BUG_ON(vbif == NULL);
-
- while (vbif->reg != 0) {
- adreno_regwrite(device, vbif->reg, vbif->val);
- vbif++;
- }
-
- /* Make all blocks contribute to the GPU BUSY perf counter */
- adreno_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
-
- /* Tune the hystersis counters for SP and CP idle detection */
- adreno_regwrite(device, A3XX_RBBM_SP_HYST_CNT, 0x10);
- adreno_regwrite(device, A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
-
- /* Enable the RBBM error reporting bits. This lets us get
- useful information on failure */
-
- adreno_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
-
- /* Enable AHB error reporting */
- adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
-
- /* Turn on the power counters */
- adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00030000);
-
- /* Turn on hang detection - this spews a lot of useful information
- * into the RBBM registers on a hang */
-
- adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
- (1 << 16) | 0xFFF);
-
- /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */
- adreno_regwrite(device, A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
-
- /* Enable Clock gating */
- adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
- adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
-
- if (adreno_is_a330v2(adreno_dev))
- adreno_regwrite(device, A3XX_RBBM_GPR0_CTL,
- A330v2_RBBM_GPR0_CTL_DEFAULT);
- else if (adreno_is_a330(adreno_dev))
- adreno_regwrite(device, A3XX_RBBM_GPR0_CTL,
- A330_RBBM_GPR0_CTL_DEFAULT);
-
- /* Set the OCMEM base address for A330 */
- if (adreno_is_a330(adreno_dev)) {
- adreno_regwrite(device, A3XX_RB_GMEM_BASE_ADDR,
- (unsigned int)(adreno_dev->ocmem_base >> 14));
- }
- /* Turn on protection */
- a3xx_protect_init(device);
-
- /* Turn on performance counters */
- adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
-
- /* Turn on the GPU busy counter and let it run free */
-
- adreno_dev->gpu_cycles = 0;
-}
-
-/*
- * Define the available perfcounter groups - these get used by
- * adreno_perfcounter_get and adreno_perfcounter_put
- */
-
-static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_CP_0_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_3_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_5_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_1_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_2_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_3_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_4_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_5_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_6_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_7_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_0_LO, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 },
-};
-
-static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT0_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT1_LO },
-};
-static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
- { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
-};
-
-static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
- { a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
- { a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
- { a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) },
- { a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) },
- { a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) },
- { a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) },
- { a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) },
- { a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) },
- { a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) },
- { a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) },
- { a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
- { a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
- { a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
- { a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) },
- { a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) },
-};
-
-static struct adreno_perfcounters a3xx_perfcounters = {
- a3xx_perfcounter_groups,
- ARRAY_SIZE(a3xx_perfcounter_groups),
-};
-
-/* Defined in adreno_a3xx_snapshot.c */
-void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
- int *remain, int hang);
-
-struct adreno_gpudev adreno_a3xx_gpudev = {
- .reg_rbbm_status = A3XX_RBBM_STATUS,
- .reg_cp_pfp_ucode_addr = A3XX_CP_PFP_UCODE_ADDR,
- .reg_cp_pfp_ucode_data = A3XX_CP_PFP_UCODE_DATA,
- .perfcounters = &a3xx_perfcounters,
-
- .ctxt_create = a3xx_drawctxt_create,
- .ctxt_save = a3xx_drawctxt_save,
- .ctxt_restore = a3xx_drawctxt_restore,
- .ctxt_draw_workaround = NULL,
- .rb_init = a3xx_rb_init,
- .perfcounter_init = a3xx_perfcounter_init,
- .irq_control = a3xx_irq_control,
- .irq_handler = a3xx_irq_handler,
- .irq_pending = a3xx_irq_pending,
- .busy_cycles = a3xx_busy_cycles,
- .start = a3xx_start,
- .snapshot = a3xx_snapshot,
- .perfcounter_enable = a3xx_perfcounter_enable,
- .perfcounter_read = a3xx_perfcounter_read,
-};
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
deleted file mode 100755
index b939fa0..0000000
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/* Copyright (c) 2012,2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "kgsl.h"
-#include "adreno.h"
-#include "kgsl_snapshot.h"
-#include "a3xx_reg.h"
-
-#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
- + sizeof(struct kgsl_snapshot_debug))
-
-#define SHADER_MEMORY_SIZE 0x4000
-
-/**
- * _rbbm_debug_bus_read - Helper function to read data from the RBBM
- * debug bus.
- * @device - GPU device to read/write registers
- * @block_id - Debug bus block to read from
- * @index - Index in the debug bus block to read
- * @ret - Value of the register read
- */
-static void _rbbm_debug_bus_read(struct kgsl_device *device,
- unsigned int block_id, unsigned int index, unsigned int *val)
-{
- unsigned int block = (block_id << 8) | 1 << 16;
- adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, block | index);
- adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS, val);
-}
-
-static int a3xx_snapshot_shader_memory(struct kgsl_device *device,
- void *snapshot, int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i;
-
- if (remain < DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE)) {
- SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_SHADER_MEMORY;
- header->size = SHADER_MEMORY_SIZE;
-
- for (i = 0; i < SHADER_MEMORY_SIZE; i++)
- adreno_regread(device, 0x4000 + i, &data[i]);
-
- return DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE);
-}
-
-#define VPC_MEMORY_BANKS 4
-#define VPC_MEMORY_SIZE 512
-
-static int a3xx_snapshot_vpc_memory(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int size = VPC_MEMORY_BANKS * VPC_MEMORY_SIZE;
- int bank, addr, i = 0;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "VPC MEMORY");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_VPC_MEMORY;
- header->size = size;
-
- for (bank = 0; bank < VPC_MEMORY_BANKS; bank++) {
- for (addr = 0; addr < VPC_MEMORY_SIZE; addr++) {
- unsigned int val = bank | (addr << 4);
- adreno_regwrite(device,
- A3XX_VPC_VPC_DEBUG_RAM_SEL, val);
- adreno_regread(device,
- A3XX_VPC_VPC_DEBUG_RAM_READ, &data[i++]);
- }
- }
-
- return DEBUG_SECTION_SZ(size);
-}
-
-#define CP_MEQ_SIZE 16
-static int a3xx_snapshot_cp_meq(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i;
-
- if (remain < DEBUG_SECTION_SZ(CP_MEQ_SIZE)) {
- SNAPSHOT_ERR_NOMEM(device, "CP MEQ DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP_MEQ;
- header->size = CP_MEQ_SIZE;
-
- adreno_regwrite(device, A3XX_CP_MEQ_ADDR, 0x0);
- for (i = 0; i < CP_MEQ_SIZE; i++)
- adreno_regread(device, A3XX_CP_MEQ_DATA, &data[i]);
-
- return DEBUG_SECTION_SZ(CP_MEQ_SIZE);
-}
-
-static int a3xx_snapshot_cp_pm4_ram(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, size = adreno_dev->pm4_fw_size - 1;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
- header->size = size;
-
- /*
- * Read the firmware from the GPU rather than use our cache in order to
- * try to catch mis-programming or corruption in the hardware. We do
- * use the cached version of the size, however, instead of trying to
- * maintain always changing hardcoded constants
- */
-
- adreno_regwrite(device, REG_CP_ME_RAM_RADDR, 0x0);
- for (i = 0; i < size; i++)
- adreno_regread(device, REG_CP_ME_RAM_DATA, &data[i]);
-
- return DEBUG_SECTION_SZ(size);
-}
-
-static int a3xx_snapshot_cp_pfp_ram(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, size = adreno_dev->pfp_fw_size - 1;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "CP PFP RAM DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP_PFP_RAM;
- header->size = size;
-
- /*
- * Read the firmware from the GPU rather than use our cache in order to
- * try to catch mis-programming or corruption in the hardware. We do
- * use the cached version of the size, however, instead of trying to
- * maintain always changing hardcoded constants
- */
- kgsl_regwrite(device, A3XX_CP_PFP_UCODE_ADDR, 0x0);
- for (i = 0; i < size; i++)
- adreno_regread(device, A3XX_CP_PFP_UCODE_DATA, &data[i]);
-
- return DEBUG_SECTION_SZ(size);
-}
-
-/* This is the ROQ buffer size on both the A305 and A320 */
-#define A320_CP_ROQ_SIZE 128
-/* This is the ROQ buffer size on the A330 */
-#define A330_CP_ROQ_SIZE 512
-
-static int a3xx_snapshot_cp_roq(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, size;
-
- /* The size of the ROQ buffer is core dependent */
- size = adreno_is_a330(adreno_dev) ?
- A330_CP_ROQ_SIZE : A320_CP_ROQ_SIZE;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "CP ROQ DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP_ROQ;
- header->size = size;
-
- adreno_regwrite(device, A3XX_CP_ROQ_ADDR, 0x0);
- for (i = 0; i < size; i++)
- adreno_regread(device, A3XX_CP_ROQ_DATA, &data[i]);
-
- return DEBUG_SECTION_SZ(size);
-}
-
-#define A330_CP_MERCIU_QUEUE_SIZE 32
-
-static int a330_snapshot_cp_merciu(struct kgsl_device *device, void *snapshot,
- int remain, void *priv)
-{
- struct kgsl_snapshot_debug *header = snapshot;
- unsigned int *data = snapshot + sizeof(*header);
- int i, size;
-
- /* The MERCIU data is two dwords per entry */
- size = A330_CP_MERCIU_QUEUE_SIZE << 1;
-
- if (remain < DEBUG_SECTION_SZ(size)) {
- SNAPSHOT_ERR_NOMEM(device, "CP MERCIU DEBUG");
- return 0;
- }
-
- header->type = SNAPSHOT_DEBUG_CP_MERCIU;
- header->size = size;
-
- adreno_regwrite(device, A3XX_CP_MERCIU_ADDR, 0x0);
-
- for (i = 0; i < A330_CP_MERCIU_QUEUE_SIZE; i++) {
- adreno_regread(device, A3XX_CP_MERCIU_DATA,
- &data[(i * 2)]);
- adreno_regread(device, A3XX_CP_MERCIU_DATA2,
- &data[(i * 2) + 1]);
- }
-
- return DEBUG_SECTION_SZ(size);
-}
-
-#define DEBUGFS_BLOCK_SIZE 0x40
-
-static int a3xx_snapshot_debugbus_block(struct kgsl_device *device,
- void *snapshot, int remain, void *priv)
-{
- struct kgsl_snapshot_debugbus *header = snapshot;
- unsigned int id = (unsigned int) priv;
- unsigned int val;
- int i;
- unsigned int *data = snapshot + sizeof(*header);
- int size =
- (DEBUGFS_BLOCK_SIZE * sizeof(unsigned int)) + sizeof(*header);
-
- if (remain < size) {
- SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
- return 0;
- }
-
- val = (id << 8) | (1 << 16);
-
- header->id = id;
- header->count = DEBUGFS_BLOCK_SIZE;
-
- for (i = 0; i < DEBUGFS_BLOCK_SIZE; i++)
- _rbbm_debug_bus_read(device, id, i, &data[i]);
-
- return size;
-}
-
-static unsigned int debugbus_blocks[] = {
- RBBM_BLOCK_ID_CP,
- RBBM_BLOCK_ID_RBBM,
- RBBM_BLOCK_ID_VBIF,
- RBBM_BLOCK_ID_HLSQ,
- RBBM_BLOCK_ID_UCHE,
- RBBM_BLOCK_ID_PC,
- RBBM_BLOCK_ID_VFD,
- RBBM_BLOCK_ID_VPC,
- RBBM_BLOCK_ID_TSE,
- RBBM_BLOCK_ID_RAS,
- RBBM_BLOCK_ID_VSC,
- RBBM_BLOCK_ID_SP_0,
- RBBM_BLOCK_ID_SP_1,
- RBBM_BLOCK_ID_SP_2,
- RBBM_BLOCK_ID_SP_3,
- RBBM_BLOCK_ID_TPL1_0,
- RBBM_BLOCK_ID_TPL1_1,
- RBBM_BLOCK_ID_TPL1_2,
- RBBM_BLOCK_ID_TPL1_3,
- RBBM_BLOCK_ID_RB_0,
- RBBM_BLOCK_ID_RB_1,
- RBBM_BLOCK_ID_RB_2,
- RBBM_BLOCK_ID_RB_3,
- RBBM_BLOCK_ID_MARB_0,
- RBBM_BLOCK_ID_MARB_1,
- RBBM_BLOCK_ID_MARB_2,
- RBBM_BLOCK_ID_MARB_3,
-};
-
-static void *a3xx_snapshot_debugbus(struct kgsl_device *device,
- void *snapshot, int *remain)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(debugbus_blocks); i++) {
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUGBUS, snapshot, remain,
- a3xx_snapshot_debugbus_block,
- (void *) debugbus_blocks[i]);
- }
-
- return snapshot;
-}
-
-static void _snapshot_a3xx_regs(struct kgsl_snapshot_registers *regs,
- struct kgsl_snapshot_registers_list *list)
-{
- regs[list->count].regs = (unsigned int *) a3xx_registers;
- regs[list->count].count = a3xx_registers_count;
- list->count++;
-}
-
-static void _snapshot_hlsq_regs(struct kgsl_snapshot_registers *regs,
- struct kgsl_snapshot_registers_list *list,
- struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
-
- /*
- * Trying to read HLSQ registers when the HLSQ block is busy
- * will cause the device to hang. The RBBM_DEBUG_BUS has information
- * that will tell us if the HLSQ block is busy or not. Read values
- * from the debug bus to ensure the HLSQ block is not busy (this
- * is hardware dependent). If the HLSQ block is busy do not
- * dump the registers, otherwise dump the HLSQ registers.
- */
-
- if (adreno_is_a330(adreno_dev)) {
- /*
- * stall_ctxt_full status bit: RBBM_BLOCK_ID_HLSQ index 49 [27]
- *
- * if (!stall_context_full)
- * then dump HLSQ registers
- */
- unsigned int stall_context_full = 0;
-
- _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 49,
- &stall_context_full);
- stall_context_full &= 0x08000000;
-
- if (stall_context_full)
- return;
- } else {
- /*
- * tpif status bits: RBBM_BLOCK_ID_HLSQ index 4 [4:0]
- * spif status bits: RBBM_BLOCK_ID_HLSQ index 7 [5:0]
- *
- * if ((tpif == 0, 1, 28) && (spif == 0, 1, 10))
- * then dump HLSQ registers
- */
- unsigned int next_pif = 0;
-
- /* check tpif */
- _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 4, &next_pif);
- next_pif &= 0x1f;
- if (next_pif != 0 && next_pif != 1 && next_pif != 28)
- return;
-
- /* check spif */
- _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 7, &next_pif);
- next_pif &= 0x3f;
- if (next_pif != 0 && next_pif != 1 && next_pif != 10)
- return;
- }
-
- regs[list->count].regs = (unsigned int *) a3xx_hlsq_registers;
- regs[list->count].count = a3xx_hlsq_registers_count;
- list->count++;
-}
-
-static void _snapshot_a330_regs(struct kgsl_snapshot_registers *regs,
- struct kgsl_snapshot_registers_list *list)
-{
- /* For A330, append the additional list of new registers to grab */
- regs[list->count].regs = (unsigned int *) a330_registers;
- regs[list->count].count = a330_registers_count;
- list->count++;
-}
-
-/* A3XX GPU snapshot function - this is where all of the A3XX specific
- * bits and pieces are grabbed into the snapshot memory
- */
-
-void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
- int *remain, int hang)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct kgsl_snapshot_registers_list list;
- struct kgsl_snapshot_registers regs[5];
-
- list.registers = regs;
- list.count = 0;
-
- /* Disable Clock gating temporarily for the debug bus to work */
- adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
-
- /* Store relevant registers in list to snapshot */
- _snapshot_a3xx_regs(regs, &list);
- _snapshot_hlsq_regs(regs, &list, adreno_dev);
- if (adreno_is_a330(adreno_dev))
- _snapshot_a330_regs(regs, &list);
-
- /* Master set of (non debug) registers */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
- kgsl_snapshot_dump_regs, &list);
-
- /* CP_STATE_DEBUG indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_CP_STATE_DEBUG_INDEX,
- REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
-
- /* CP_ME indexed registers */
- snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
- remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
- 64, 44);
-
- /* VPC memory */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_vpc_memory, NULL);
-
- /* CP MEQ */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_cp_meq, NULL);
-
- /* Shader working/shadow memory */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_shader_memory, NULL);
-
-
- /* CP PFP and PM4 */
- /* Reading these will hang the GPU if it isn't already hung */
-
- if (hang) {
- unsigned int reg;
-
- /*
- * Reading the microcode while the CP will is running will
- * basically basically move the CP instruction pointer to
- * whatever address we read. Big badaboom ensues. Stop the CP
- * (if it isn't already stopped) to ensure that we are safe.
- * We do this here and not earlier to avoid corrupting the RBBM
- * status and CP registers - by the time we get here we don't
- * care about the contents of the CP anymore.
- */
-
- adreno_regread(device, REG_CP_ME_CNTL, &reg);
- reg |= (1 << 27) | (1 << 28);
- adreno_regwrite(device, REG_CP_ME_CNTL, reg);
-
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_cp_pfp_ram, NULL);
-
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_cp_pm4_ram, NULL);
- }
-
- /* CP ROQ */
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a3xx_snapshot_cp_roq, NULL);
-
- if (adreno_is_a330(adreno_dev)) {
- snapshot = kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
- a330_snapshot_cp_merciu, NULL);
- }
-
- snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
-
- /* Enable Clock gating */
- adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
- adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
-
- return snapshot;
-}
diff --git a/drivers/gpu/msm/adreno_a3xx_trace.c b/drivers/gpu/msm/adreno_a3xx_trace.c
deleted file mode 100644
index 325b068..0000000
--- a/drivers/gpu/msm/adreno_a3xx_trace.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "kgsl.h"
-#include "adreno.h"
-
-/* Instantiate tracepoints */
-#define CREATE_TRACE_POINTS
-#include "a3xx_reg.h"
-#include "adreno_a3xx_trace.h"
diff --git a/drivers/gpu/msm/adreno_a3xx_trace.h b/drivers/gpu/msm/adreno_a3xx_trace.h
deleted file mode 100644
index d48faf4..0000000
--- a/drivers/gpu/msm/adreno_a3xx_trace.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#if !defined(_ADRENO_A3XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _ADRENO_A3XX_TRACE_H
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kgsl
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE adreno_a3xx_trace
-
-#include <linux/tracepoint.h>
-
-struct kgsl_device;
-
-/*
- * Tracepoint for a3xx irq. Includes status info
- */
-TRACE_EVENT(kgsl_a3xx_irq_status,
-
- TP_PROTO(struct kgsl_device *device, unsigned int status),
-
- TP_ARGS(device, status),
-
- TP_STRUCT__entry(
- __string(device_name, device->name)
- __field(unsigned int, status)
- ),
-
- TP_fast_assign(
- __assign_str(device_name, device->name);
- __entry->status = status;
- ),
-
- TP_printk(
- "d_name=%s status=%s",
- __get_str(device_name),
- __entry->status ? __print_flags(__entry->status, "|",
- { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_GPU_IDLE" },
- { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_AHB_ERR" },
- { 1 << A3XX_INT_RBBM_REG_TIMEOUT, "RBBM_REG_TIMEOUT" },
- { 1 << A3XX_INT_RBBM_ME_MS_TIMEOUT,
- "RBBM_ME_MS_TIMEOUT" },
- { 1 << A3XX_INT_RBBM_PFP_MS_TIMEOUT,
- "RBBM_PFP_MS_TIMEOUT" },
- { 1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW,
- "RBBM_ATB_BUS_OVERFLOW" },
- { 1 << A3XX_INT_VFD_ERROR, "RBBM_VFD_ERROR" },
- { 1 << A3XX_INT_CP_SW_INT, "CP_SW" },
- { 1 << A3XX_INT_CP_T0_PACKET_IN_IB,
- "CP_T0_PACKET_IN_IB" },
- { 1 << A3XX_INT_CP_OPCODE_ERROR, "CP_OPCODE_ERROR" },
- { 1 << A3XX_INT_CP_RESERVED_BIT_ERROR,
- "CP_RESERVED_BIT_ERROR" },
- { 1 << A3XX_INT_CP_HW_FAULT, "CP_HW_FAULT" },
- { 1 << A3XX_INT_CP_DMA, "CP_DMA" },
- { 1 << A3XX_INT_CP_IB2_INT, "CP_IB2_INT" },
- { 1 << A3XX_INT_CP_IB1_INT, "CP_IB1_INT" },
- { 1 << A3XX_INT_CP_RB_INT, "CP_RB_INT" },
- { 1 << A3XX_INT_CP_REG_PROTECT_FAULT,
- "CP_REG_PROTECT_FAULT" },
- { 1 << A3XX_INT_CP_RB_DONE_TS, "CP_RB_DONE_TS" },
- { 1 << A3XX_INT_CP_VS_DONE_TS, "CP_VS_DONE_TS" },
- { 1 << A3XX_INT_CP_PS_DONE_TS, "CP_PS_DONE_TS" },
- { 1 << A3XX_INT_CACHE_FLUSH_TS, "CACHE_FLUSH_TS" },
- { 1 << A3XX_INT_CP_AHB_ERROR_HALT,
- "CP_AHB_ERROR_HALT" },
- { 1 << A3XX_INT_MISC_HANG_DETECT, "MISC_HANG_DETECT" },
- { 1 << A3XX_INT_UCHE_OOB_ACCESS, "UCHE_OOB_ACCESS" })
- : "None"
- )
-);
-
-#endif /* _ADRENO_A3XX_TRACE_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
deleted file mode 100755
index 86dcc43..0000000
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/export.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-
-#include "kgsl.h"
-#include "adreno.h"
-
-#include "a2xx_reg.h"
-
-unsigned int kgsl_cff_dump_enable;
-
-static int kgsl_cff_dump_enable_set(void *data, u64 val)
-{
-#ifdef CONFIG_MSM_KGSL_CFF_DUMP
- kgsl_cff_dump_enable = (val != 0);
- return 0;
-#else
- return -EINVAL;
-#endif
-}
-
-static int kgsl_cff_dump_enable_get(void *data, u64 *val)
-{
- *val = kgsl_cff_dump_enable;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
- kgsl_cff_dump_enable_set, "%llu\n");
-
-typedef void (*reg_read_init_t)(struct kgsl_device *device);
-typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
- unsigned int *vals, int linec);
-
-void adreno_debugfs_init(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- if (!device->d_debugfs || IS_ERR(device->d_debugfs))
- return;
-
- debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
- &kgsl_cff_dump_enable_fops);
- debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
- &adreno_dev->wait_timeout);
- debugfs_create_u32("ib_check", 0644, device->d_debugfs,
- &adreno_dev->ib_check_level);
- debugfs_create_u32("active_cnt", 0444, device->d_debugfs,
- &device->active_cnt);
-}
diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h
deleted file mode 100644
index af7e797..0000000
--- a/drivers/gpu/msm/adreno_debugfs.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __ADRENO_DEBUGFS_H
-#define __ADRENO_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-
-int adreno_debugfs_init(struct kgsl_device *device);
-
-extern int adreno_pm_regs_enabled;
-extern int adreno_pm_ib_enabled;
-
-static inline int is_adreno_pm_regs_enabled(void)
-{
- return adreno_pm_regs_enabled;
-}
-
-static inline int is_adreno_pm_ib_enabled(void)
-{
- return adreno_pm_ib_enabled;
-}
-
-#else
-static inline int adreno_debugfs_init(struct kgsl_device *device)
-{
- return 0;
-}
-
-static inline int kgsl_pmregs_enabled(void)
-{
- /* If debugfs is turned off, then always print registers */
- return 1;
-}
-#endif
-
-#endif /* __ADRENO_DEBUGFS_H */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
deleted file mode 100644
index 72b73b6..0000000
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ /dev/null
@@ -1,1415 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/jiffies.h>
-#include <linux/err.h>
-
-#include "kgsl.h"
-#include "adreno.h"
-#include "adreno_ringbuffer.h"
-#include "adreno_trace.h"
-
-#define ADRENO_DISPATCHER_ACTIVE 0
-#define ADRENO_DISPATCHER_PAUSE 1
-
-#define ADRENO_DISPATCHER_SOFT_FAULT 1
-#define ADRENO_DISPATCHER_HARD_FAULT 2
-#define ADRENO_DISPATCHER_TIMEOUT_FAULT 3
-
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
-
-/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
-
-/* Number of milliseconds to wait for the context queue to clear */
-static unsigned int _context_queue_wait = 10000;
-
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
-
-/* Number of command batches inflight in the ringbuffer at any time */
-static unsigned int _dispatcher_inflight = 15;
-
-/* Command batch timeout (in milliseconds) */
-static unsigned int _cmdbatch_timeout = 2000;
-
-/* Interval for reading and comparing fault detection registers */
-static unsigned int _fault_timer_interval = 100;
-
-/* Local array for the current set of fault detect registers */
-static unsigned int *fault_detect_regs;
-
-/**
- * fault_detect_read() - Read the set of fault detect registers
- * @device: Pointer to the KGSL device struct
- *
- * Read the set of fault detect registers and store them in the local array.
- * This is for the initial values that are compared later with
- * fault_detect_read_compare
- */
-static void fault_detect_read(struct kgsl_device *device)
-{
- int i;
-
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (ft_detect_regs[i] == 0)
- continue;
- adreno_regread(device, ft_detect_regs[i],
- &fault_detect_regs[i]);
- }
-}
-
-/**
- * fault_detect_read_compare() - Read the fault detect registers and compare
- * them to the current value
- * @device: Pointer to the KGSL device struct
- *
- * Read the set of fault detect registers and compare them to the current set
- * of registers. Return 1 if any of the register values changed
- */
-static int fault_detect_read_compare(struct kgsl_device *device)
-{
- int i, ret = 0;
-
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- unsigned int val;
-
- if (ft_detect_regs[i] == 0)
- continue;
- adreno_regread(device, ft_detect_regs[i], &val);
- if (val != fault_detect_regs[i])
- ret = 1;
- fault_detect_regs[i] = val;
- }
-
- return ret;
-}
-
-/**
- * adreno_context_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
- */
-static inline struct kgsl_cmdbatch *adreno_context_get_cmdbatch(
- struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- mutex_lock(&drawctxt->mutex);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- /*
- * Don't dequeue a cmdbatch that is still waiting for other
- * events
- */
- if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
- cmdbatch = ERR_PTR(-EAGAIN);
- goto done;
- }
-
- drawctxt->cmdqueue_head =
- CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
- }
-
-done:
- mutex_unlock(&drawctxt->mutex);
-
- return cmdbatch;
-}
-
-/**
- * adreno_context_requeue_cmdbatch() - Put a command back on the context queue
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
- *
- * Failure to submit a command to the ringbuffer isn't the fault of the command
- * being submitted so if a failure happens, push it back on the head of the the
- * context queue to be reconsidered again
- */
-static inline void adreno_context_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
-{
- unsigned int prev;
- mutex_lock(&drawctxt->mutex);
-
- prev = drawctxt->cmdqueue_head - 1;
-
- if (prev < 0)
- prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
-
- /*
- * The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
- */
-
- BUG_ON(prev == drawctxt->cmdqueue_tail);
-
- drawctxt->cmdqueue[prev] = cmdbatch;
- drawctxt->queued++;
-
- /* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
- mutex_unlock(&drawctxt->mutex);
-}
-
-/**
- * dispatcher_queue_context() - Queue a context in the dispatcher pending list
- * @dispatcher: Pointer to the adreno dispatcher struct
- * @drawctxt: Pointer to the adreno draw context
- *
- * Add a context to the dispatcher pending list.
- */
-static void dispatcher_queue_context(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- spin_lock(&dispatcher->plist_lock);
-
-
- if (plist_node_empty(&drawctxt->pending)) {
- /* Get a reference to the context while it sits on the list */
- _kgsl_context_get(&drawctxt->base);
- trace_dispatch_queue_context(drawctxt);
- plist_add(&drawctxt->pending, &dispatcher->pending);
- }
-
- spin_unlock(&dispatcher->plist_lock);
-}
-
-/**
- * sendcmd() - Send a command batch to the GPU hardware
- * @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
- *
- * Send a KGSL command batch to the GPU hardware
- */
-static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int ret;
-
- dispatcher->inflight++;
-
- mutex_lock(&device->mutex);
-
- if (dispatcher->inflight == 1) {
- /* Time to make the donuts. Turn on the GPU */
- ret = kgsl_active_count_get(device);
- if (ret) {
- dispatcher->inflight--;
- mutex_unlock(&device->mutex);
- return ret;
- }
- }
-
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
-
- /*
- * On the first command, if the submission was successful, then read the
- * fault registers. If it failed then turn off the GPU. Sad face.
- */
-
- if (dispatcher->inflight == 1) {
- if (ret == 0)
- fault_detect_read(device);
- else
- kgsl_active_count_put(device);
- }
-
- mutex_unlock(&device->mutex);
-
- if (ret) {
- dispatcher->inflight--;
- KGSL_DRV_ERR(device,
- "Unable to submit command to the ringbuffer\n");
- return ret;
- }
-
- trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
-
- dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
- dispatcher->tail = (dispatcher->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
-
- /*
- * If this is the first command in the pipe then the GPU will
- * immediately start executing it so we can start the expiry timeout on
- * the command batch here. Subsequent command batches will have their
- * timer started when the previous command batch is retired
- */
- if (dispatcher->inflight == 1) {
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(_cmdbatch_timeout);
- mod_timer(&dispatcher->timer, cmdbatch->expires);
-
- /* Start the fault detection timer */
- if (adreno_dev->fast_hang_detect)
- mod_timer(&dispatcher->fault_timer,
- jiffies +
- msecs_to_jiffies(_fault_timer_interval));
- }
-
- return 0;
-}
-
-/**
- * dispatcher_context_sendcmds() - Send commands from a context to the GPU
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno context to dispatch commands from
- *
- * Dequeue and send a burst of commands from the specified context to the GPU
- */
-static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int count = 0;
-
- /*
- * Each context can send a specific number of command batches per cycle
- */
- for ( ; count < _context_cmdbatch_burst &&
- dispatcher->inflight < _dispatcher_inflight; count++) {
- int ret;
- struct kgsl_cmdbatch *cmdbatch =
- adreno_context_get_cmdbatch(drawctxt);
-
- if (cmdbatch == NULL)
- break;
-
- /*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
- * When the sync points are satisfied then the context will get
- * reqeueued
- */
-
- if (IS_ERR(cmdbatch))
- return count;
-
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
- count--;
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- ret = sendcmd(adreno_dev, cmdbatch);
-
- /*
- * There are various reasons why we can't submit a command (no
- * memory for the commands, full ringbuffer, etc) but none of
- * these are actually the current command's fault. Requeue it
- * back on the context and let it come back around again if
- * conditions improve
- */
- if (ret) {
- adreno_context_requeue_cmdbatch(drawctxt, cmdbatch);
- break;
- }
- }
-
- /*
- * If the context successfully submitted commands, then
- * unconditionally put it back on the queue to be considered the
- * next time around. This might seem a little wasteful but it is
- * reasonable to think that a busy context will stay busy.
- */
-
- if (count) {
- dispatcher_queue_context(adreno_dev, drawctxt);
-
- /*
- * If we submitted something there will be room in the
- * context queue so ping the context wait queue on the
- * chance that the context is snoozing
- */
-
- wake_up_interruptible_all(&drawctxt->wq);
- }
-
- return count;
-}
-
-/**
- * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
- * @adreno_dev: Pointer to the adreno device struct
- *
- * Issue as many commands as possible (up to inflight) from the pending contexts
- * This function assumes the dispatcher mutex has been locked.
- */
-static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /* Don't do anything if the dispatcher is paused */
- if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
- return 0;
-
- while (dispatcher->inflight < _dispatcher_inflight) {
- struct adreno_context *drawctxt = NULL;
-
- spin_lock(&dispatcher->plist_lock);
-
- if (!plist_head_empty(&dispatcher->pending)) {
- drawctxt = plist_first_entry(&dispatcher->pending,
- struct adreno_context, pending);
-
- plist_del(&drawctxt->pending, &dispatcher->pending);
- }
-
- spin_unlock(&dispatcher->plist_lock);
-
- if (drawctxt == NULL)
- break;
-
- if (kgsl_context_detached(&drawctxt->base) ||
- drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- kgsl_context_put(&drawctxt->base);
- continue;
- }
-
- dispatcher_context_sendcmds(adreno_dev, drawctxt);
- kgsl_context_put(&drawctxt->base);
- }
-
- return 0;
-}
-
-/**
- * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
- * @adreno_dev: Pointer to the adreno device struct
- *
- * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
- */
-int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int ret;
-
- mutex_lock(&dispatcher->mutex);
- ret = _adreno_dispatcher_issuecmds(adreno_dev);
- mutex_unlock(&dispatcher->mutex);
-
- return ret;
-}
-
-static int _check_context_queue(struct adreno_context *drawctxt)
-{
- int ret;
-
- mutex_lock(&drawctxt->mutex);
-
- /*
- * Wake up if there is room in the context or if the whole thing got
- * invalidated while we were asleep
- */
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- ret = 1;
- else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
-
- mutex_unlock(&drawctxt->mutex);
-
- return ret;
-}
-
-/**
- * get_timestamp() - Return the next timestamp for the context
- * @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
- * @timestamp - Pointer to a timestamp value possibly passed from the user
- *
- * Assign a timestamp based on the settings of the draw context and the command
- * batch.
- */
-static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
-{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
- *timestamp = 0;
- return 0;
- }
-
- if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
- /*
- * User specified timestamps need to be greater than the last
- * issued timestamp in the context
- */
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
- return -ERANGE;
-
- drawctxt->timestamp = *timestamp;
- } else
- drawctxt->timestamp++;
-
- *timestamp = drawctxt->timestamp;
- return 0;
-}
-
-/**
- * adreno_context_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_context_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- int ret;
-
- mutex_lock(&drawctxt->mutex);
-
- if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
- mutex_unlock(&drawctxt->mutex);
- return -EINVAL;
- }
-
- /*
- * After skipping to the end of the frame we need to force the preamble
- * to run (if it exists) regardless of the context state.
- */
-
- if (drawctxt->flags & CTXT_FLAGS_FORCE_PREAMBLE) {
- cmdbatch->priv |= CMDBATCH_FLAG_FORCE_PREAMBLE;
- drawctxt->flags &= ~CTXT_FLAGS_FORCE_PREAMBLE;
- }
-
- /*
- * If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
- * through the pipeline but it won't actually send any commands
- */
-
- if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
- cmdbatch->priv |= CMDBATCH_FLAG_SKIP;
-
- /*
- * If this command batch represents the EOF then clear the way
- * for the dispatcher to continue submitting
- */
-
- if (cmdbatch->flags & KGSL_CONTEXT_END_OF_FRAME) {
- drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
-
- /*
- * Force the preamble on the next command to ensure that
- * the state is correct
- */
-
- drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
- }
- }
-
- /* Wait for room in the context queue */
-
- while (drawctxt->queued >= _context_cmdqueue_size) {
- trace_adreno_context_sleep(drawctxt);
- mutex_unlock(&drawctxt->mutex);
-
- ret = wait_event_interruptible_timeout(drawctxt->wq,
- _check_context_queue(drawctxt),
- msecs_to_jiffies(_context_queue_wait));
-
- mutex_lock(&drawctxt->mutex);
- trace_adreno_context_wake(drawctxt);
-
- if (ret <= 0) {
- mutex_unlock(&drawctxt->mutex);
- return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
-
- /*
- * Account for the possiblity that the context got invalidated
- * while we were sleeping
- */
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- mutex_unlock(&drawctxt->mutex);
- return -EDEADLK;
- }
- }
-
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- mutex_unlock(&drawctxt->mutex);
- return ret;
- }
-
- cmdbatch->timestamp = *timestamp;
-
- /* The batch fault policy is the current system fault policy */
- cmdbatch->fault_policy = adreno_dev->ft_policy;
-
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
-
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
-
-
- mutex_unlock(&drawctxt->mutex);
-
- /* Add the context to the dispatcher pending list */
- dispatcher_queue_context(adreno_dev, drawctxt);
-
- /*
- * Only issue commands if inflight is less than burst -this prevents us
- * from sitting around waiting for the mutex on a busy system - the work
- * loop will schedule it for us. Inflight is mutex protected but the
- * worse that can happen is that it will go to 0 after we check and if
- * it goes to 0 it is because the work loop decremented it and the work
- * queue will try to schedule new commands anyway.
- */
-
- if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
- adreno_dispatcher_issuecmds(adreno_dev);
-
- return 0;
-}
-
-/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
- * passed in then zero the size which effectively skips it when it is submitted
- * in the ringbuffer.
- */
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, unsigned int base)
-{
- int i;
-
- for (i = 0; i < cmdbatch->ibcount; i++) {
- if (cmdbatch->ibdesc[i].gpuaddr == base) {
- cmdbatch->ibdesc[i].sizedwords = 0;
- return;
- }
- }
-}
-
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
-{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- int skip = 1;
- int i;
-
- for (i = 0; i < count; i++) {
-
- /*
- * Only operate on command batches that belong to the
- * faulting context
- */
-
- if (replay[i]->context->id != cmdbatch->context->id)
- continue;
-
- /*
- * Skip all the command batches in this context until
- * the EOF flag is seen. If the EOF flag is seen then
- * force the preamble for the next command.
- */
-
- if (skip) {
- replay[i]->priv |= CMDBATCH_FLAG_SKIP;
-
- if (replay[i]->flags & KGSL_CONTEXT_END_OF_FRAME)
- skip = 0;
- } else {
- replay[i]->priv |= CMDBATCH_FLAG_FORCE_PREAMBLE;
- return;
- }
- }
-
- /*
- * If the EOF flag hasn't been seen yet then set the flag in the
- * drawctxt to keep looking for it
- */
-
- if (skip && drawctxt)
- drawctxt->flags |= CTXT_FLAGS_SKIP_EOF;
-
- /*
- * If we did see the EOF flag then force the preamble on for the
- * next command issued on this context
- */
-
- if (!skip && drawctxt)
- drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
-}
-
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- struct adreno_context *drawctxt;
-
- if (cmd == NULL)
- continue;
-
- drawctxt = ADRENO_CONTEXT(cmd->context);
-
- if (kgsl_context_detached(cmd->context) ||
- drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- replay[i] = NULL;
-
- mutex_lock(&device->mutex);
- kgsl_cancel_events_timestamp(device, cmd->context,
- cmd->timestamp);
- mutex_unlock(&device->mutex);
-
- kgsl_cmdbatch_destroy(cmd);
- }
- }
-}
-
-static void dispatcher_do_fault(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- unsigned int ptr;
- unsigned int reg, base;
- struct kgsl_cmdbatch **replay = NULL;
- struct kgsl_cmdbatch *cmdbatch;
- int ret, i, count = 0;
-
- BUG_ON(dispatcher->inflight == 0);
-
- /* Turn off all the timers */
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-
- mutex_lock(&device->mutex);
-
- cmdbatch = dispatcher->cmdqueue[dispatcher->head];
-
- /*
- * If the fault was due to a timeout then stop the CP to ensure we don't
- * get activity while we are trying to dump the state of the system
- */
-
- if (dispatcher->fault == ADRENO_DISPATCHER_TIMEOUT_FAULT) {
- kgsl_regread(device, REG_CP_ME_CNTL, &reg);
- reg |= (1 << 27) | (1 << 28);
- kgsl_regwrite(device, REG_CP_ME_CNTL, reg);
-
- /* Skip the PM dump for a timeout because it confuses people */
- cmdbatch->fault_policy |= KGSL_FT_SKIP_PMDUMP;
- }
-
- kgsl_regread(device, REG_CP_IB1_BASE, &base);
-
- /*
- * Dump the postmortem and snapshot information if this is the first
- * detected fault for the oldest active command batch
- */
-
- if (!(cmdbatch->fault_policy & KGSL_FT_SKIP_PMDUMP)) {
- kgsl_postmortem_dump(device, 0);
- kgsl_device_snapshot(device, 1);
- }
-
- mutex_unlock(&device->mutex);
-
- /* Allocate memory to store the inflight commands */
- replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
-
- if (replay == NULL) {
- unsigned int ptr = dispatcher->head;
-
- while (ptr != dispatcher->tail) {
- struct kgsl_context *context =
- dispatcher->cmdqueue[ptr]->context;
-
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatcher->cmdqueue[ptr]);
-
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
- }
-
- /*
- * Set the replay count to zero - this will ensure that the
- * hardware gets reset but nothing else goes played
- */
-
- count = 0;
- goto replay;
- }
-
- /* Copy the inflight command batches into the temporary storage */
- ptr = dispatcher->head;
-
- while (ptr != dispatcher->tail) {
- replay[count++] = dispatcher->cmdqueue[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
- }
-
- /*
- * For the purposes of replay, we assume that the oldest command batch
- * that hasn't retired a timestamp is "hung".
- */
-
- cmdbatch = replay[0];
-
- /*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
- * again on replay
- */
-
- cmdbatch->fault_policy |= KGSL_FT_SKIP_PMDUMP;
-
- /*
- * A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
- * Clear the replay bit and move on to the next policy level
- */
-
- if (dispatcher->fault == ADRENO_DISPATCHER_HARD_FAULT)
- cmdbatch->fault_policy &= ~KGSL_FT_REPLAY;
-
- /*
- * A timeout fault means the IB timed out - don't be silly and replay
- * it, because it will probably timeout again
- */
-
- if (dispatcher->fault == ADRENO_DISPATCHER_TIMEOUT_FAULT)
- cmdbatch->fault_policy &= ~KGSL_FT_REPLAY;
-
- /*
- * Execute the fault tolerance policy. Each command batch stores the
- * current fault policy that was set when it was queued.
- * As the options are tried in descending priority
- * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
- * change comes around again
- */
-
- /* Replay the hanging command batch again */
- if (cmdbatch->fault_policy & KGSL_FT_REPLAY) {
- cmdbatch->fault_policy &= ~KGSL_FT_REPLAY;
- goto replay;
- }
-
- /*
- * Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
- * because the CP may have caused a page-fault while it was prefetching
- * the next IB1/IB2. walk all outstanding commands and zap the
- * supposedly bad IB1 where ever it lurks.
- */
-
- if (cmdbatch->fault_policy & KGSL_FT_SKIPIB) {
- cmdbatch->fault_policy &= ~KGSL_FT_SKIPIB;
-
- for (i = 0; i < count; i++) {
- if (replay[i] != NULL)
- cmdbatch_skip_ib(replay[i], base);
- }
-
- goto replay;
- }
-
- if (cmdbatch->fault_policy & KGSL_FT_SKIPFRAME) {
-
- cmdbatch->fault_policy &= ~KGSL_FT_SKIPFRAME;
-
- /*
- * Skip all the pending command batches for this context until
- * the EOF frame is seen
- */
- cmdbatch_skip_frame(cmdbatch, replay, count);
- goto replay;
- }
-
- /* If we get here then all the policies failed or FT is disabled */
-
- /* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
-
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
-
-replay:
- /* Reset the dispatcher queue */
- dispatcher->inflight = 0;
- dispatcher->head = dispatcher->tail = 0;
-
- /* Reset the GPU */
- mutex_lock(&device->mutex);
- ret = adreno_reset(device);
- mutex_unlock(&device->mutex);
-
- /* If adreno_reset() fails then what hope do we have for the future? */
- BUG_ON(ret);
-
- /*
- * Force the preamble on the first command (if applicable) to avoid any
- * strange stage issues
- */
-
- if (replay[0])
- replay[0]->priv |= CMDBATCH_FLAG_FORCE_PREAMBLE;
-
- /* Replay the pending command buffers */
- for (i = 0; i < count; i++) {
-
- int ret;
-
- if (replay[i] == NULL)
- continue;
-
- /*
- * Force each command batch to wait for idle - this avoids wierd
- * CP parse issues
- */
-
- replay[i]->flags |= KGSL_CMD_FLAGS_WFI;
-
- ret = sendcmd(adreno_dev, replay[i]);
-
- /*
- * If sending the command fails, then try to recover by
- * invalidating the context
- */
-
- if (ret) {
- adreno_drawctxt_invalidate(device, cmdbatch->context);
-
- remove_invalidated_cmdbatches(device, &replay[i],
- count - i);
- }
- }
-
- mutex_lock(&device->mutex);
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
-
- kfree(replay);
-}
-
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
- unsigned int consumed, unsigned int retired)
-{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
-}
-
-/**
- * adreno_dispatcher_work() - Master work handler for the dispatcher
- * @work: Pointer to the work struct for the current work queue
- *
- * Process expired commands and send new ones.
- */
-static void adreno_dispatcher_work(struct work_struct *work)
-{
- struct adreno_dispatcher *dispatcher =
- container_of(work, struct adreno_dispatcher, work);
- struct adreno_device *adreno_dev =
- container_of(dispatcher, struct adreno_device, dispatcher);
- struct kgsl_device *device = &adreno_dev->dev;
- int count = 0;
-
- mutex_lock(&dispatcher->mutex);
-
- while (dispatcher->head != dispatcher->tail) {
- uint32_t consumed, retired = 0;
- struct kgsl_cmdbatch *cmdbatch =
- dispatcher->cmdqueue[dispatcher->head];
- struct adreno_context *drawctxt;
- BUG_ON(cmdbatch == NULL);
-
- drawctxt = ADRENO_CONTEXT(cmdbatch->context);
-
- /*
- * First try to expire the timestamp. This happens if the
- * context is valid and the timestamp expired normally or if the
- * context was destroyed before the command batch was finished
- * in the GPU. Either way retire the command batch advance the
- * pointers and continue processing the queue
- */
-
- if (!kgsl_context_detached(cmdbatch->context))
- retired = kgsl_readtimestamp(device, cmdbatch->context,
- KGSL_TIMESTAMP_RETIRED);
-
- if (kgsl_context_detached(cmdbatch->context) ||
- (timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
-
- trace_adreno_cmdbatch_retired(cmdbatch,
- dispatcher->inflight - 1);
-
- /* Reduce the number of inflight command batches */
- dispatcher->inflight--;
-
- /* Zero the old entry*/
- dispatcher->cmdqueue[dispatcher->head] = NULL;
-
- /* Advance the buffer head */
- dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
-
- /* Destroy the retired command batch */
- kgsl_cmdbatch_destroy(cmdbatch);
-
- /* Update the expire time for the next command batch */
-
- if (dispatcher->inflight > 0) {
- cmdbatch =
- dispatcher->cmdqueue[dispatcher->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(_cmdbatch_timeout);
- }
-
- count++;
- continue;
- }
-
- /*
- * If we got a fault from the interrupt handler, this command
- * is to blame. Invalidate it, reset and replay
- */
-
- if (dispatcher->fault) {
- dispatcher_do_fault(device);
- dispatcher->fault = 0;
- goto done;
- }
-
- /* Get the last consumed timestamp */
- consumed = kgsl_readtimestamp(device, cmdbatch->context,
- KGSL_TIMESTAMP_CONSUMED);
-
- /*
- * Break here if fault detection is dsiabled for the context or
- * if the long running IB detection is disabled device wide
- * Long running command buffers will be allowed to run to
- * completion - but badly behaving command buffers (infinite
- * shaders etc) can end up running forever.
- */
-
- if (!adreno_dev->long_ib_detect ||
- drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
- break;
-
- /*
- * The last line of defense is to check if the command batch has
- * timed out. If we get this far but the timeout hasn't expired
- * yet then the GPU is still ticking away
- */
-
- if (time_is_after_jiffies(cmdbatch->expires))
- break;
-
- /* Boom goes the dynamite */
-
- KGSL_DRV_ERR(device,
- "Context %d, timestamp %d ran too long\n",
- drawctxt->base.id, drawctxt->timestamp);
-
- dispatcher->fault = ADRENO_DISPATCHER_TIMEOUT_FAULT;
-
- dispatcher_do_fault(device);
- break;
- }
-
- /*
- * Decrement the active count to 0 - this will allow the system to go
- * into suspend even if there are queued command batches
- */
-
- if (count && dispatcher->inflight == 0) {
- mutex_lock(&device->mutex);
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
- }
-
- /* Dispatch new commands if we have the room */
- if (dispatcher->inflight < _dispatcher_inflight)
- _adreno_dispatcher_issuecmds(adreno_dev);
-
-done:
- /* Either update the timer for the next command batch or disable it */
- if (dispatcher->inflight) {
- struct kgsl_cmdbatch *cmdbatch
- = dispatcher->cmdqueue[dispatcher->head];
-
- /* Update the timeout timer for the next command batch */
- mod_timer(&dispatcher->timer, cmdbatch->expires);
- } else {
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
- }
-
- /* Before leaving update the pwrscale information */
- mutex_lock(&device->mutex);
- kgsl_pwrscale_idle(device);
- mutex_unlock(&device->mutex);
-
- mutex_unlock(&dispatcher->mutex);
-}
-
-void adreno_dispatcher_schedule(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- queue_work(device->work_queue, &dispatcher->work);
-}
-
-/**
- * adreno_dispatcher_queue_context() - schedule a drawctxt in the dispatcher
- * device: pointer to the KGSL device
- * drawctxt: pointer to the drawctxt to schedule
- *
- * Put a draw context on the dispatcher pending queue and schedule the
- * dispatcher. This is used to reschedule changes that might have been blocked
- * for sync points or other concerns
- */
-void adreno_dispatcher_queue_context(struct kgsl_device *device,
- struct adreno_context *drawctxt)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- dispatcher_queue_context(adreno_dev, drawctxt);
- adreno_dispatcher_schedule(device);
-}
-
-/*
- * This is called on a regular basis while command batches are inflight. Fault
- * detection registers are read and compared to the existing values - if they
- * changed then the GPU is still running. If they are the same between
- * subsequent calls then the GPU may have faulted
- */
-
-void adreno_dispatcher_fault_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /* Leave if the user decided to turn off fast hang detection */
- if (adreno_dev->fast_hang_detect == 0)
- return;
-
- /* Don't do anything if the dispatcher is idle or faulted */
- if (dispatcher->inflight == 0 || dispatcher->fault)
- return;
-
- /* Make sure the device is active before trying a read */
- if (device->state != KGSL_STATE_ACTIVE)
- return;
-
- /*
- * Read the fault registers - if it returns 0 then they haven't changed
- * so mark the dispatcher as faulted and schedule the work loop.
- */
-
- if (!fault_detect_read_compare(device)) {
- dispatcher->fault = ADRENO_DISPATCHER_SOFT_FAULT;
- adreno_dispatcher_schedule(device);
- } else {
- mod_timer(&dispatcher->fault_timer,
- jiffies + msecs_to_jiffies(_fault_timer_interval));
- }
-}
-
-/*
- * This is called when the timer expires - it either means the GPU is hung or
- * the IB is taking too long to execute
- */
-void adreno_dispatcher_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct kgsl_device *device = &adreno_dev->dev;
-
- adreno_dispatcher_schedule(device);
-}
-/**
- * adreno_dispatcher_irq_fault() - Trigger a fault in the dispatcher
- * @device: Pointer to the KGSL device
- *
- * Called from an interrupt context this will trigger a fault in the
- * dispatcher for the oldest pending command batch
- */
-void adreno_dispatcher_irq_fault(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- dispatcher->fault = ADRENO_DISPATCHER_HARD_FAULT;
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * adreno_dispatcher_pause() - stop the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Pause the dispather so it doesn't accept any new commands
- */
-void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /*
- * This will probably get called while holding other mutexes so don't
- * take the dispatcher mutex. The biggest penalty is that another
- * command might be submitted while we are in here but thats okay
- * because whoever is waiting for the drain will just have another
- * command batch to wait for
- */
-
- dispatcher->state = ADRENO_DISPATCHER_PAUSE;
-}
-
-/**
- * adreno_dispatcher_start() - activate the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Set the disaptcher active and start the loop once to get things going
- */
-void adreno_dispatcher_start(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
-
- /* Schedule the work loop to get things going */
- adreno_dispatcher_schedule(&adreno_dev->dev);
-}
-
-/**
- * adreno_dispatcher_stop() - stop the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Stop the dispatcher and close all the timers
- */
-void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-}
-
-/**
- * adreno_dispatcher_close() - close the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Close the dispatcher and free all the oustanding commands and memory
- */
-void adreno_dispatcher_close(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- mutex_lock(&dispatcher->mutex);
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-
- while (dispatcher->head != dispatcher->tail) {
- kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
- dispatcher->head = (dispatcher->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
- }
-
- kfree(fault_detect_regs);
- fault_detect_regs = NULL;
-
- mutex_unlock(&dispatcher->mutex);
-
- kobject_put(&dispatcher->kobj);
-}
-
-struct dispatcher_attribute {
- struct attribute attr;
- ssize_t (*show)(struct adreno_dispatcher *,
- struct dispatcher_attribute *, char *);
- ssize_t (*store)(struct adreno_dispatcher *,
- struct dispatcher_attribute *, const char *buf,
- size_t count);
- unsigned int max;
- unsigned int *value;
-};
-
-#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
- struct dispatcher_attribute dispatcher_attr_##_name = { \
- .attr = { .name = __stringify(_name), .mode = _mode }, \
- .show = _show_uint, \
- .store = _store_uint, \
- .max = _max, \
- .value = &(_value), \
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment