aboutsummaryrefslogtreecommitdiffhomepage
path: root/platform_tools/android
diff options
context:
space:
mode:
authorGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-05-29 13:26:32 +0000
committerGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-05-29 13:26:32 +0000
commit6320e8f393539b7a536b32b5a072a474709da5ff (patch)
tree83420d4b43486a23aac1c9ebc842df1216ca3911 /platform_tools/android
parent0a4805e33f8ddb445a2fd061462e715e1707f049 (diff)
Remove dependency of gsutil to download ndk
Currently we use gsutil to download ndk. However, it requires extra steps to install, and is not convenient to configure (several steps to enable Google storage), especially for developers behind proxy (need to set config of boto). Chromium provides some python scripts to help download nacl toolchains that can meet this need well. So this patch basically borrows two python scripts from there, and makes some according changes to remove the usage of gsutil. With new script, we may also implement some advanced features, such as hash check, so that the download would be more intelligent. BUG= R=djsollen@google.com, borenet@google.com, reed@google.com Author: yang.gu@intel.com Review URL: https://chromiumcodereview.appspot.com/15951008 git-svn-id: http://skia.googlecode.com/svn/trunk@9311 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'platform_tools/android')
-rwxr-xr-xplatform_tools/android/bin/android_setup.sh15
-rwxr-xr-xplatform_tools/android/bin/download_toolchains.py19
-rwxr-xr-xplatform_tools/android/bin/download_utils.py323
-rwxr-xr-xplatform_tools/android/bin/http_download.py92
4 files changed, 442 insertions, 7 deletions
diff --git a/platform_tools/android/bin/android_setup.sh b/platform_tools/android/bin/android_setup.sh
index d71c2e630e..b955bea1cd 100755
--- a/platform_tools/android/bin/android_setup.sh
+++ b/platform_tools/android/bin/android_setup.sh
@@ -65,12 +65,6 @@ exportVar ANDROID_TOOLCHAIN ${TOOLCHAIN_DIR}/${TOOLCHAIN_TYPE}/bin
# if the toolchain doesn't exist on your machine then we need to fetch it
if [ ! -d "$ANDROID_TOOLCHAIN" ]; then
- # gsutil must be installed on your system and in your PATH
- gsutil version &> /dev/null
- if [[ "$?" != "0" ]]; then
- echo "ERROR: Unable to find gsutil. Please install it before proceeding."
- exit 1
- fi
# create the toolchain directory if needed
if [ ! -d "$TOOLCHAIN_DIR" ]; then
mkdir $TOOLCHAIN_DIR
@@ -78,7 +72,14 @@ if [ ! -d "$ANDROID_TOOLCHAIN" ]; then
# enter the toolchain directory then download, unpack, and remove the tarball
pushd $TOOLCHAIN_DIR
TARBALL=ndk-r$NDK_REV-v$API_LEVEL.tgz
- gsutil cp gs://chromium-skia-gm/android-toolchains/$TARBALL $TARBALL
+
+ echo "Downloading $TARBALL ..."
+ ${SCRIPT_DIR}/download_toolchains.py http://chromium-skia-gm.commondatastorage.googleapis.com/android-toolchains/$TARBALL $TOOLCHAIN_DIR/$TARBALL
+ if [[ "$?" != "0" ]]; then
+ echo "ERROR: Unable to download toolchain $TARBALL."
+ exit 1
+ fi
+
echo "Untarring $TOOLCHAIN_TYPE from $TARBALL."
tar -xzf $TARBALL $TOOLCHAIN_TYPE
echo "Removing $TARBALL"
diff --git a/platform_tools/android/bin/download_toolchains.py b/platform_tools/android/bin/download_toolchains.py
new file mode 100755
index 0000000000..9922283dc0
--- /dev/null
+++ b/platform_tools/android/bin/download_toolchains.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+
+"""Download all toolchains for this platform.
+
+This module downloads multiple tgz's.
+"""
+
+import download_utils
+import sys
+
+url = sys.argv[1]
+filepath = sys.argv[2]
+
+try:
+ download_utils.SyncURL(url, filepath)
+ exit(0)
+except download_utils.HashError, e:
+ exit(1)
+
diff --git a/platform_tools/android/bin/download_utils.py b/platform_tools/android/bin/download_utils.py
new file mode 100755
index 0000000000..298ba9a863
--- /dev/null
+++ b/platform_tools/android/bin/download_utils.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+# Copyright (c) 2012 The Native Client Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A library to assist automatically downloading files.
+
+This library is used by scripts that download tarballs, zipfiles, etc. as part
+of the build process.
+"""
+
+import hashlib
+import http_download
+import os.path
+import re
+import shutil
+import sys
+import time
+import urllib2
+
+SOURCE_STAMP = 'SOURCE_URL'
+HASH_STAMP = 'SOURCE_SHA1'
+
+
+# Designed to handle more general inputs than sys.platform because the platform
+# name may come from the command line.
+PLATFORM_COLLAPSE = {
+ 'windows': 'windows',
+ 'win32': 'windows',
+ 'cygwin': 'windows',
+ 'linux': 'linux',
+ 'linux2': 'linux',
+ 'linux3': 'linux',
+ 'darwin': 'mac',
+ 'mac': 'mac',
+}
+
+ARCH_COLLAPSE = {
+ 'i386' : 'x86',
+ 'i686' : 'x86',
+ 'x86_64': 'x86',
+ 'armv7l': 'arm',
+}
+
+
+class HashError(Exception):
+ def __init__(self, download_url, expected_hash, actual_hash):
+ self.download_url = download_url
+ self.expected_hash = expected_hash
+ self.actual_hash = actual_hash
+
+ def __str__(self):
+ return 'Got hash "%s" but expected hash "%s" for "%s"' % (
+ self.actual_hash, self.expected_hash, self.download_url)
+
+
+def PlatformName(name=None):
+ if name is None:
+ name = sys.platform
+ return PLATFORM_COLLAPSE[name]
+
+def ArchName(name=None):
+ if name is None:
+ if PlatformName() == 'windows':
+ # TODO(pdox): Figure out how to auto-detect 32-bit vs 64-bit Windows.
+ name = 'i386'
+ else:
+ import platform
+ name = platform.machine()
+ return ARCH_COLLAPSE[name]
+
+def EnsureFileCanBeWritten(filename):
+ directory = os.path.dirname(filename)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+
+def WriteData(filename, data):
+ EnsureFileCanBeWritten(filename)
+ f = open(filename, 'wb')
+ f.write(data)
+ f.close()
+
+
+def WriteDataFromStream(filename, stream, chunk_size, verbose=True):
+ EnsureFileCanBeWritten(filename)
+ dst = open(filename, 'wb')
+ try:
+ while True:
+ data = stream.read(chunk_size)
+ if len(data) == 0:
+ break
+ dst.write(data)
+ if verbose:
+ # Indicate that we're still writing.
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ finally:
+ if verbose:
+ sys.stdout.write('\n')
+ dst.close()
+
+
+def DoesStampMatch(stampfile, expected, index):
+ try:
+ f = open(stampfile, 'r')
+ stamp = f.read()
+ f.close()
+ if stamp.split('\n')[index] == expected:
+ return "already up-to-date."
+ elif stamp.startswith('manual'):
+ return "manual override."
+ return False
+ except IOError:
+ return False
+
+
+def WriteStamp(stampfile, data):
+ EnsureFileCanBeWritten(stampfile)
+ f = open(stampfile, 'w')
+ f.write(data)
+ f.close()
+
+
+def StampIsCurrent(path, stamp_name, stamp_contents, min_time=None, index=0):
+ stampfile = os.path.join(path, stamp_name)
+
+ # Check if the stampfile is older than the minimum last mod time
+ if min_time:
+ try:
+ stamp_time = os.stat(stampfile).st_mtime
+ if stamp_time <= min_time:
+ return False
+ except OSError:
+ return False
+
+ return DoesStampMatch(stampfile, stamp_contents, index)
+
+
+def WriteSourceStamp(path, url):
+ stampfile = os.path.join(path, SOURCE_STAMP)
+ WriteStamp(stampfile, url)
+
+def WriteHashStamp(path, hash_val):
+ hash_stampfile = os.path.join(path, HASH_STAMP)
+ WriteStamp(hash_stampfile, hash_val)
+
+
+def Retry(op, *args):
+ # Windows seems to be prone to having commands that delete files or
+ # directories fail. We currently do not have a complete understanding why,
+ # and as a workaround we simply retry the command a few times.
+ # It appears that file locks are hanging around longer than they should. This
+ # may be a secondary effect of processes hanging around longer than they
+ # should. This may be because when we kill a browser sel_ldr does not exit
+ # immediately, etc.
+ # Virus checkers can also accidently prevent files from being deleted, but
+ # that shouldn't be a problem on the bots.
+ if sys.platform in ('win32', 'cygwin'):
+ count = 0
+ while True:
+ try:
+ op(*args)
+ break
+ except Exception:
+ sys.stdout.write("FAILED: %s %s\n" % (op.__name__, repr(args)))
+ count += 1
+ if count < 5:
+ sys.stdout.write("RETRY: %s %s\n" % (op.__name__, repr(args)))
+ time.sleep(pow(2, count))
+ else:
+ # Don't mask the exception.
+ raise
+ else:
+ op(*args)
+
+
+def MoveDirCleanly(src, dst):
+ RemoveDir(dst)
+ MoveDir(src, dst)
+
+
+def MoveDir(src, dst):
+ Retry(shutil.move, src, dst)
+
+
+def RemoveDir(path):
+ if os.path.exists(path):
+ Retry(shutil.rmtree, path)
+
+
+def RemoveFile(path):
+ if os.path.exists(path):
+ Retry(os.unlink, path)
+
+
+def _HashFileHandle(fh):
+ """sha1 of a file like object.
+
+ Arguments:
+ fh: file handle like object to hash.
+ Returns:
+ sha1 as a string.
+ """
+ hasher = hashlib.sha1()
+ try:
+ while True:
+ data = fh.read(4096)
+ if not data:
+ break
+ hasher.update(data)
+ finally:
+ fh.close()
+ return hasher.hexdigest()
+
+
+def HashFile(filename):
+ """sha1 a file on disk.
+
+ Arguments:
+ filename: filename to hash.
+ Returns:
+ sha1 as a string.
+ """
+ fh = open(filename, 'rb')
+ return _HashFileHandle(fh)
+
+
+def HashUrlByDownloading(url):
+ """sha1 the data at an url.
+
+ Arguments:
+ url: url to download from.
+ Returns:
+ sha1 of the data at the url.
+ """
+ try:
+ fh = urllib2.urlopen(url)
+ except:
+ sys.stderr.write("Failed fetching URL: %s\n" % url)
+ raise
+ return _HashFileHandle(fh)
+
+
+# Attempts to get the SHA1 hash of a file given a URL by looking for
+# an adjacent file with a ".sha1hash" suffix. This saves having to
+# download a large tarball just to get its hash. Otherwise, we fall
+# back to downloading the main file.
+def HashUrl(url):
+ hash_url = '%s.sha1hash' % url
+ try:
+ fh = urllib2.urlopen(hash_url)
+ data = fh.read(100)
+ fh.close()
+ except urllib2.HTTPError, exn:
+ if exn.code == 404:
+ return HashUrlByDownloading(url)
+ raise
+ else:
+ if not re.match('[0-9a-f]{40}\n?$', data):
+ raise AssertionError('Bad SHA1 hash file: %r' % data)
+ return data.strip()
+
+
+def SyncURL(url, filename=None, stamp_dir=None, min_time=None,
+ hash_val=None, keep=False, verbose=False, stamp_index=0):
+ """Synchronize a destination file with a URL
+
+ if the URL does not match the URL stamp, then we must re-download it.
+
+ Arugments:
+ url: the url which will to compare against and download
+ filename: the file to create on download
+ path: the download path
+ stamp_dir: the filename containing the URL stamp to check against
+ hash_val: if set, the expected hash which must be matched
+ verbose: prints out status as it runs
+ stamp_index: index within the stamp file to check.
+ Returns:
+ True if the file is replaced
+ False if the file is not replaced
+ Exception:
+ HashError: if the hash does not match
+ """
+
+ assert url and filename
+
+ # If we are not keeping the tarball, or we already have it, we can
+ # skip downloading it for this reason. If we are keeping it,
+ # it must exist.
+ if keep:
+ tarball_ok = os.path.isfile(filename)
+ else:
+ tarball_ok = True
+
+ # If we don't need the tarball and the stamp_file matches the url, then
+ # we must be up to date. If the URL differs but the recorded hash matches
+ # the one we'll insist the tarball has, then that's good enough too.
+ # TODO(mcgrathr): Download the .sha1sum file first to compare with
+ # the cached hash, in case --file-hash options weren't used.
+ if tarball_ok and stamp_dir is not None:
+ if StampIsCurrent(stamp_dir, SOURCE_STAMP, url, min_time):
+ if verbose:
+ print '%s is already up to date.' % filename
+ return False
+ if (hash_val is not None and
+ StampIsCurrent(stamp_dir, HASH_STAMP, hash_val, min_time, stamp_index)):
+ if verbose:
+ print '%s is identical to the up to date file.' % filename
+ return False
+
+ if verbose:
+ print 'Updating %s\n\tfrom %s.' % (filename, url)
+ EnsureFileCanBeWritten(filename)
+ http_download.HttpDownload(url, filename)
+
+ if hash_val:
+ tar_hash = HashFile(filename)
+ if hash_val != tar_hash:
+ raise HashError(actual_hash=tar_hash, expected_hash=hash_val,
+ download_url=url)
+
+ return True
diff --git a/platform_tools/android/bin/http_download.py b/platform_tools/android/bin/http_download.py
new file mode 100755
index 0000000000..15f7983b77
--- /dev/null
+++ b/platform_tools/android/bin/http_download.py
@@ -0,0 +1,92 @@
+#!/usr/bin/python
+# Copyright (c) 2012 The Native Client Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Download a file from a URL to a file on disk.
+
+This module supports username and password with basic authentication.
+"""
+
+import base64
+import os
+import os.path
+import sys
+import urllib2
+
+import download_utils
+
+
+def _CreateDirectory(path):
+ """Create a directory tree, ignore if it's already there."""
+ try:
+ os.makedirs(path)
+ return True
+ except os.error:
+ return False
+
+
+def HttpDownload(url, target, username=None, password=None, verbose=True,
+ logger=None):
+ """Download a file from a remote server.
+
+ Args:
+ url: A URL to download from.
+ target: Filename to write download to.
+ username: Optional username for download.
+ password: Optional password for download (ignored if no username).
+ logger: Function to log events to.
+ """
+
+ # Log to stdout by default.
+ if logger is None:
+ logger = sys.stdout.write
+ headers = [('Accept', '*/*')]
+ if username:
+ if password:
+ auth_code = base64.b64encode(username + ':' + password)
+ else:
+ auth_code = base64.b64encode(username)
+ headers.append(('Authorization', 'Basic ' + auth_code))
+ if os.environ.get('http_proxy'):
+ proxy = os.environ.get('http_proxy')
+ proxy_handler = urllib2.ProxyHandler({
+ 'http': proxy,
+ 'https': proxy})
+ opener = urllib2.build_opener(proxy_handler)
+ else:
+ opener = urllib2.build_opener()
+ opener.addheaders = headers
+ urllib2.install_opener(opener)
+ _CreateDirectory(os.path.split(target)[0])
+ # Retry up to 10 times (appengine logger is flaky).
+ for i in xrange(10):
+ if i:
+ logger('Download failed on %s, retrying... (%d)\n' % (url, i))
+ try:
+ # 30 second timeout to ensure we fail and retry on stalled connections.
+ src = urllib2.urlopen(url, timeout=30)
+ try:
+ download_utils.WriteDataFromStream(target, src, chunk_size=2**20,
+ verbose=verbose)
+ content_len = src.headers.get('Content-Length')
+ if content_len:
+ content_len = int(content_len)
+ file_size = os.path.getsize(target)
+ if content_len != file_size:
+ logger('Filesize:%d does not match Content-Length:%d' % (
+ file_size, content_len))
+ continue
+ finally:
+ src.close()
+ break
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ logger('Resource does not exist.\n')
+ raise
+ logger('Failed to open.\n')
+ except urllib2.URLError:
+ logger('Failed mid stream.\n')
+ else:
+ logger('Download failed on %s, giving up.\n' % url)
+ raise