Skip to content

Instantly share code, notes, and snippets.

@supercairos
Created April 8, 2019 14:54
Show Gist options
  • Save supercairos/3a5c296333fe1641ab6a60f8d49d94b8 to your computer and use it in GitHub Desktop.
Save supercairos/3a5c296333fe1641ab6a60f8d49d94b8 to your computer and use it in GitHub Desktop.
Arm64 CEF Patch
diff --git a/include/base/cef_atomicops.h b/include/base/cef_atomicops.h
index 96aebab..c8796c1 100644
--- a/include/base/cef_atomicops.h
+++ b/include/base/cef_atomicops.h
@@ -182,6 +182,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "include/base/internal/cef_atomicops_mac.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
#include "include/base/internal/cef_atomicops_x86_gcc.h"
+#elif defined(COMPILER_GCC) && defined(__ARM_ARCH_ISA_A64)
+#include "include/base/internal/cef_atomicops_arm64_gcc.h"
#elif defined(COMPILER_GCC) && defined(__ARM_ARCH)
#include "include/base/internal/cef_atomicops_arm_gcc.h"
#else
diff --git a/include/base/internal/cef_atomicops_arm64_gcc.h b/include/base/internal/cef_atomicops_arm64_gcc.h
new file mode 100644
index 0000000..0d9ea3c
--- /dev/null
+++ b/include/base/internal/cef_atomicops_arm64_gcc.h
@@ -0,0 +1,308 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_
+#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_
+
+namespace base {
+namespace subtle {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %w[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %w[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %x[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %x[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace base::subtle
+
+#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_
diff --git a/tools/automate/automate-git.py b/tools/automate/automate-git.py
index 769549e..0f21f95 100644
--- a/tools/automate/automate-git.py
+++ b/tools/automate/automate-git.py
@@ -497,6 +497,8 @@ def get_build_directory_name(is_debug):
build_dir += 'GN_x64'
elif options.armbuild:
build_dir += 'GN_arm'
+ elif options.arm64build:
+ build_dir+= 'GN_arm64'
else:
build_dir += 'GN_x86'
else:
@@ -817,6 +819,12 @@ parser.add_option(
dest='armbuild',
default=False,
help='Create an ARM build.')
+parser.add_option(
+ '--arm64-build',
+ action='store_true',
+ dest='arm64build',
+ default=False,
+ help='Create an ARM64 build.')
# Test-related options.
parser.add_option(
@@ -959,7 +967,7 @@ if (options.noreleasebuild and \
parser.print_help(sys.stderr)
sys.exit()
-if options.x64build and options.armbuild:
+if options.x64build + options.armbuild + options.arm64build > 1:
print 'Invalid combination of options.'
parser.print_help(sys.stderr)
sys.exit()
@@ -1025,6 +1033,9 @@ branch_is_2743_or_older = (cef_branch != 'trunk' and int(cef_branch) <= 2743)
# True if the requested branch is newer than 2785.
branch_is_newer_than_2785 = (cef_branch == 'trunk' or int(cef_branch) > 2785)
+# True if the requested branch is newer than 2840.
+branch_is_newer_than_2840 = (cef_branch == 'trunk' or int(cef_branch) > 2840)
+
# True if the requested branch is 3029 or older.
branch_is_3029_or_older = (cef_branch != 'trunk' and int(cef_branch) <= 3029)
@@ -1050,8 +1061,18 @@ if use_gn:
if not branch_is_newer_than_2785:
print 'The ARM build option is not supported with branch 2785 and older.'
sys.exit()
+
+ if options.arm64build:
+ if platform != 'linux':
+ print 'The ARM64 build option is only supported on Linux.'
+ sys.exit()
+
+ if not branch_is_newer_than_2840:
+ print 'The ARM64 build option is not supported with branch 2840 and older.'
+ sys.exit()
+
else:
- if options.armbuild:
+ if options.armbuild or options.arm64build:
print 'The ARM build option is not supported by GYP.'
sys.exit()
@@ -1642,6 +1663,8 @@ if not options.nodistrib and (chromium_checkout_changed or \
path = path + ' --x64-build'
elif options.armbuild:
path = path + ' --arm-build'
+ elif options.arm64build:
+ path = path + ' --arm64-build'
if type == 'minimal':
path = path + ' --minimal'
diff --git a/tools/cefbuilds/cef_html_builder.py b/tools/cefbuilds/cef_html_builder.py
index 1916701..6ab50ae 100644
--- a/tools/cefbuilds/cef_html_builder.py
+++ b/tools/cefbuilds/cef_html_builder.py
@@ -123,6 +123,7 @@ class cef_html_builder:
'linux32': 'Linux 32-bit',
'linux64': 'Linux 64-bit',
'linuxarm': 'Linux ARM',
+ 'linuxarm64': 'Linux ARM64',
'macosx64': 'Mac OS X 64-bit',
'windows32': 'Windows 32-bit',
'windows64': 'Windows 64-bit'
diff --git a/tools/cefbuilds/cef_json_builder.py b/tools/cefbuilds/cef_json_builder.py
index 292d465..cbcf60f 100644
--- a/tools/cefbuilds/cef_json_builder.py
+++ b/tools/cefbuilds/cef_json_builder.py
@@ -92,7 +92,7 @@ class cef_json_builder:
@staticmethod
def get_platforms():
""" Returns the list of supported platforms. """
- return ('linux32', 'linux64', 'linuxarm', 'macosx64', 'windows32',
+ return ('linux32', 'linux64', 'linuxarm', 'linuxarm64', 'macosx64', 'windows32',
'windows64')
@staticmethod
diff --git a/tools/gn_args.py b/tools/gn_args.py
index a51b9c3..57c00ee 100644
--- a/tools/gn_args.py
+++ b/tools/gn_args.py
@@ -318,13 +318,15 @@ def ValidateArgs(args):
assert target_cpu in ('x86', 'x64'), 'target_cpu must be "x86" or "x64"'
elif platform == 'linux':
assert target_cpu in ('x86', 'x64',
- 'arm'), 'target_cpu must be "x86", "x64" or "arm"'
+ 'arm', 'arm64'), 'target_cpu must be "x86", "x64", "arm" or "arm64"'
if platform == 'linux':
if target_cpu == 'x86':
assert use_sysroot, 'target_cpu="x86" requires use_sysroot=true'
elif target_cpu == 'arm':
assert use_sysroot, 'target_cpu="arm" requires use_sysroot=true'
+ elif target_cpu == 'arm64':
+ assert use_sysroot, 'target_cpu="arm64" requires use_sysroot=true'
# ASAN requires Release builds.
if is_asan:
@@ -421,7 +423,7 @@ def GetConfigArgs(args, is_debug, cpu):
'target_cpu': cpu,
})
- if platform == 'linux' and cpu != 'arm':
+ if platform == 'linux' and not cpu.startswith('arm'):
# Remove any arm-related values from non-arm configs.
for key in result.keys():
if key.startswith('arm_'):
@@ -470,6 +472,8 @@ def LinuxSysrootExists(cpu):
sysroot_name = 'debian_sid_amd64-sysroot'
elif cpu == 'arm':
sysroot_name = 'debian_sid_arm-sysroot'
+ elif cpu == 'arm64':
+ sysroot_name = 'debian_sid_arm64-sysroot'
else:
raise Exception('Unrecognized sysroot CPU: %s' % cpu)
@@ -498,7 +502,7 @@ def GetAllPlatformConfigs(build_args):
use_sysroot = GetArgValue(args, 'use_sysroot')
if use_sysroot:
# Only generate configurations for sysroots that have been installed.
- for cpu in ('x86', 'x64', 'arm'):
+ for cpu in ('x86', 'x64', 'arm', 'arm64'):
if LinuxSysrootExists(cpu):
supported_cpus.append(cpu)
else:
diff --git a/tools/make_distrib.py b/tools/make_distrib.py
index 8097c3e..b179399 100644
--- a/tools/make_distrib.py
+++ b/tools/make_distrib.py
@@ -479,6 +479,12 @@ parser.add_option(
default=False,
help='create an ARM binary distribution (Linux only)')
parser.add_option(
+ '--arm64-build',
+ action='store_true',
+ dest='arm64build',
+ default=False,
+ help='create an ARM64 binary distribution (Linux only)')
+parser.add_option(
'--minimal',
action='store_true',
dest='minimal',
@@ -524,13 +530,13 @@ if options.minimal and options.client:
parser.print_help(sys.stderr)
sys.exit()
-if options.x64build and options.armbuild:
- print 'Cannot specify both --x64-build and --arm-build'
+if options.x64build + options.armbuild + options.arm64build > 1:
+ print 'Invalid combination of options.'
parser.print_help(sys.stderr)
sys.exit()
-if options.armbuild and platform != 'linux':
- print '--arm-build is only supported on Linux.'
+if (options.armbuild or options.arm64build) and platform != 'linux':
+ print '--arm-build or --arm64-build is only supported on Linux.'
sys.exit()
if options.sandbox and not platform in ('macosx', 'windows'):
@@ -579,6 +585,8 @@ if options.x64build:
platform_arch = '64'
elif options.armbuild:
platform_arch = 'arm'
+elif options.arm64build:
+ platform_arch = 'arm64'
else:
platform_arch = '32'
@@ -624,6 +632,8 @@ if options.x64build:
build_dir_suffix = '_GN_x64'
elif options.armbuild:
build_dir_suffix = '_GN_arm'
+elif options.arm64build:
+ build_dir_suffix = '_GN_arm64'
else:
build_dir_suffix = '_GN_x86'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment