Path: blob/develop/tests/integration/customizations/s3/test_plugin.py
1567 views
# -*- coding: utf-8 -*-1# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License"). You4# may not use this file except in compliance with the License. A copy of5# the License is located at6#7# http://aws.amazon.com/apache2.0/8#9# or in the "license" file accompanying this file. This file is10# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF11# ANY KIND, either express or implied. See the License for the specific12# language governing permissions and limitations under the License.1314# The following tests are performed to ensure that the commands work.15# It does not check every possible parameter that can be thrown as16# those are checked by tests in other classes17import os18import platform19import contextlib20import time21import stat22import signal23import string24import socket25import tempfile26import shutil27import copy28import logging2930import pytest3132from awscli.compat import BytesIO, urlopen33import botocore.session3435from awscli.testutils import unittest, get_stdout_encoding36from awscli.testutils import skip_if_windows37from awscli.testutils import aws as _aws38from awscli.testutils import BaseS3CLICommand39from awscli.testutils import random_chars, random_bucket_name40from awscli.customizations.s3.transferconfig import DEFAULTS41from awscli.customizations.scalarparse import add_scalar_parsers, identity424344# Using the same log name as testutils.py45LOG = logging.getLogger('awscli.tests.integration')46_SHARED_BUCKET = random_bucket_name()47_NON_EXISTENT_BUCKET = random_bucket_name()48_DEFAULT_REGION = 'us-west-2'49_DEFAULT_AZ = 'usw2-az1'50_SHARED_DIR_BUCKET = f'{random_bucket_name()}--{_DEFAULT_AZ}--x-s3'515253def setup_module():54s3 = botocore.session.get_session().create_client('s3')55waiter = s3.get_waiter('bucket_exists')56params = {57'Bucket': _SHARED_BUCKET,58'CreateBucketConfiguration': {59'LocationConstraint': _DEFAULT_REGION,60},61'ObjectOwnership': 'ObjectWriter'62}63dir_bucket_params = {64'Bucket': _SHARED_DIR_BUCKET,65'CreateBucketConfiguration': {66'Location': {67'Type': 'AvailabilityZone',68'Name': _DEFAULT_AZ69},70'Bucket': {71'Type': 'Directory',72'DataRedundancy': 'SingleAvailabilityZone'73}74}75}76try:77s3.create_bucket(**params)78s3.create_bucket(**dir_bucket_params)79except Exception as e:80# A create_bucket can fail for a number of reasons.81# We're going to defer to the waiter below to make the82# final call as to whether or not the bucket exists.83LOG.debug("create_bucket() raised an exception: %s", e, exc_info=True)84waiter.wait(Bucket=_SHARED_BUCKET)85waiter.wait(Bucket=_SHARED_DIR_BUCKET)86s3.delete_public_access_block(87Bucket=_SHARED_BUCKET88)8990# Validate that "_NON_EXISTENT_BUCKET" doesn't exist.91waiter = s3.get_waiter('bucket_not_exists')92try:93waiter.wait(Bucket=_NON_EXISTENT_BUCKET)94except Exception as e:95LOG.debug(96f"The following bucket was unexpectedly discovered: {_NON_EXISTENT_BUCKET}",97e,98exc_info=True,99)100101102def clear_out_bucket(bucket, delete_bucket=False):103s3 = botocore.session.get_session().create_client(104's3', region_name=_DEFAULT_REGION)105page = s3.get_paginator('list_objects_v2')106# Use pages paired with batch delete_objects().107for page in page.paginate(Bucket=bucket):108keys = [{'Key': obj['Key']} for obj in page.get('Contents', [])]109if keys:110s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})111if delete_bucket:112try:113s3.delete_bucket(Bucket=bucket)114except Exception as e:115# We can sometimes get exceptions when trying to116# delete a bucket. We'll let the waiter make117# the final call as to whether the bucket was able118# to be deleted.119LOG.debug("delete_bucket() raised an exception: %s",120e, exc_info=True)121waiter = s3.get_waiter('bucket_not_exists')122waiter.wait(Bucket=bucket)123124125def teardown_module():126clear_out_bucket(_SHARED_BUCKET, delete_bucket=True)127clear_out_bucket(_SHARED_DIR_BUCKET, delete_bucket=True)128129130@contextlib.contextmanager131def cd(directory):132original = os.getcwd()133try:134os.chdir(directory)135yield136finally:137os.chdir(original)138139140def aws(command, collect_memory=False, env_vars=None, wait_for_finish=True,141input_data=None, input_file=None):142if not env_vars:143env_vars = os.environ.copy()144env_vars['AWS_DEFAULT_REGION'] = "us-west-2"145return _aws(command, collect_memory=collect_memory, env_vars=env_vars,146wait_for_finish=wait_for_finish, input_data=input_data,147input_file=input_file)148149150def wait_for_process_exit(process, timeout=60):151deadline = time.time() + timeout152while time.time() < deadline:153rc = process.poll()154if rc is not None:155break156time.sleep(1)157else:158process.kill()159raise AssertionError("CLI did not exist within %s seconds of "160"receiving a Ctrl+C" % timeout)161162163def _running_on_rhel():164return (165hasattr(platform, 'linux_distribution') and166platform.linux_distribution()[0] == 'Red Hat Enterprise Linux Server')167168169class BaseS3IntegrationTest(BaseS3CLICommand):170171def setUp(self):172clear_out_bucket(_SHARED_BUCKET)173clear_out_bucket(_SHARED_DIR_BUCKET)174super(BaseS3IntegrationTest, self).setUp()175176177class TestMoveCommand(BaseS3IntegrationTest):178def assert_mv_local_to_s3(self, bucket_name):179full_path = self.files.create_file('foo.txt', 'this is foo.txt')180p = aws('s3 mv %s s3://%s/foo.txt' % (full_path,181bucket_name))182self.assert_no_errors(p)183# When we move an object, the local file is gone:184self.assertTrue(not os.path.exists(full_path))185# And now resides in s3.186self.assert_key_contents_equal(bucket_name, 'foo.txt',187'this is foo.txt')188189def assert_mv_s3_to_local(self, bucket_name):190self.put_object(bucket_name, 'foo.txt', 'this is foo.txt')191full_path = self.files.full_path('foo.txt')192self.assertTrue(self.key_exists(bucket_name, key_name='foo.txt'))193p = aws('s3 mv s3://%s/foo.txt %s' % (bucket_name, full_path))194self.assert_no_errors(p)195self.assertTrue(os.path.exists(full_path))196with open(full_path, 'r') as f:197self.assertEqual(f.read(), 'this is foo.txt')198# The s3 file should not be there anymore.199self.assertTrue(self.key_not_exists(bucket_name, key_name='foo.txt'))200201def assert_mv_s3_to_s3(self, from_bucket, create_bucket_call):202to_bucket = create_bucket_call()203self.put_object(from_bucket, 'foo.txt', 'this is foo.txt')204205p = aws('s3 mv s3://%s/foo.txt s3://%s/foo.txt' % (from_bucket,206to_bucket))207self.assert_no_errors(p)208contents = self.get_key_contents(to_bucket, 'foo.txt')209self.assertEqual(contents, 'this is foo.txt')210# And verify that the object no longer exists in the from_bucket.211self.assertTrue(self.key_not_exists(from_bucket, key_name='foo.txt'))212213def test_mv_local_to_s3(self):214self.assert_mv_local_to_s3(_SHARED_BUCKET)215216def test_mv_local_to_s3_express(self):217self.assert_mv_local_to_s3(_SHARED_DIR_BUCKET)218219def test_mv_s3_to_local(self):220self.assert_mv_s3_to_local(_SHARED_BUCKET)221222def test_mv_s3_express_to_local(self):223self.assert_mv_s3_to_local(_SHARED_DIR_BUCKET)224225def test_mv_s3_to_s3(self):226self.assert_mv_s3_to_s3(_SHARED_BUCKET, self.create_bucket)227228def test_mv_s3_to_s3_express(self):229self.assert_mv_s3_to_s3(_SHARED_BUCKET, self.create_dir_bucket)230231def test_mv_s3_express_to_s3_express(self):232self.assert_mv_s3_to_s3(_SHARED_DIR_BUCKET, self.create_dir_bucket)233234def test_mv_s3_express_to_s3(self):235self.assert_mv_s3_to_s3(_SHARED_DIR_BUCKET, self.create_bucket)236237@pytest.mark.slow238def test_mv_s3_to_s3_multipart(self):239from_bucket = _SHARED_BUCKET240to_bucket = self.create_bucket()241file_contents = BytesIO(b'abcd' * (1024 * 1024 * 10))242self.put_object(from_bucket, 'foo.txt', file_contents)243244p = aws('s3 mv s3://%s/foo.txt s3://%s/foo.txt' % (from_bucket,245to_bucket))246self.assert_no_errors(p)247self.assert_key_contents_equal(to_bucket, 'foo.txt', file_contents)248# And verify that the object no longer exists in the from_bucket.249self.assertTrue(self.key_not_exists(from_bucket, key_name='foo.txt'))250251def test_mv_s3_to_s3_multipart_recursive(self):252from_bucket = _SHARED_BUCKET253to_bucket = self.create_bucket()254255large_file_contents = BytesIO(b'abcd' * (1024 * 1024 * 10))256small_file_contents = 'small file contents'257self.put_object(from_bucket, 'largefile', large_file_contents)258self.put_object(from_bucket, 'smallfile', small_file_contents)259260p = aws('s3 mv s3://%s/ s3://%s/ --recursive' % (from_bucket,261to_bucket))262self.assert_no_errors(p)263# Nothing's in the from_bucket.264self.assertTrue(self.key_not_exists(from_bucket,265key_name='largefile'))266self.assertTrue(self.key_not_exists(from_bucket,267key_name='smallfile'))268269# And both files are in the to_bucket.270self.assertTrue(self.key_exists(to_bucket, key_name='largefile'))271self.assertTrue(self.key_exists(to_bucket, key_name='smallfile'))272273# And the contents are what we expect.274self.assert_key_contents_equal(to_bucket, 'smallfile',275small_file_contents)276self.assert_key_contents_equal(to_bucket, 'largefile',277large_file_contents)278279def test_mv_s3_to_s3_with_sig4(self):280to_region = 'eu-central-1'281from_region = 'us-west-2'282283from_bucket = self.create_bucket(region=from_region)284to_bucket = self.create_bucket(region=to_region)285286file_name = 'hello.txt'287file_contents = 'hello'288self.put_object(from_bucket, file_name, file_contents)289290p = aws('s3 mv s3://{0}/{4} s3://{1}/{4} '291'--source-region {2} --region {3}'292.format(from_bucket, to_bucket, from_region, to_region,293file_name))294self.assert_no_errors(p)295296self.assertTrue(self.key_not_exists(from_bucket, file_name))297self.assertTrue(self.key_exists(to_bucket, file_name))298299@pytest.mark.slow300def test_mv_with_large_file(self):301bucket_name = _SHARED_BUCKET302# 40MB will force a multipart upload.303file_contents = BytesIO(b'abcd' * (1024 * 1024 * 10))304foo_txt = self.files.create_file(305'foo.txt', file_contents.getvalue().decode('utf-8'))306p = aws('s3 mv %s s3://%s/foo.txt' % (foo_txt, bucket_name))307self.assert_no_errors(p)308# When we move an object, the local file is gone:309self.assertTrue(not os.path.exists(foo_txt))310# And now resides in s3.311self.assert_key_contents_equal(bucket_name, 'foo.txt', file_contents)312313# Now verify we can download this file.314p = aws('s3 mv s3://%s/foo.txt %s' % (bucket_name, foo_txt))315self.assert_no_errors(p)316self.assertTrue(os.path.exists(foo_txt))317self.assertEqual(os.path.getsize(foo_txt),318len(file_contents.getvalue()))319320def test_mv_to_nonexistent_bucket(self):321full_path = self.files.create_file('foo.txt', 'this is foo.txt')322p = aws(f's3 mv {full_path} s3://{_NON_EXISTENT_BUCKET}/foo.txt')323self.assertEqual(p.rc, 1)324325def test_cant_move_file_onto_itself_small_file(self):326# We don't even need a remote file in this case. We can327# immediately validate that we can't move a file onto itself.328bucket_name = _SHARED_BUCKET329self.put_object(bucket_name, key_name='key.txt', contents='foo')330p = aws('s3 mv s3://%s/key.txt s3://%s/key.txt' %331(bucket_name, bucket_name))332self.assertEqual(p.rc, 255)333self.assertIn('Cannot mv a file onto itself', p.stderr)334335def test_cant_move_large_file_onto_itself(self):336# At the API level, you can multipart copy an object onto itself,337# but a mv command doesn't make sense because a mv is just a338# cp + an rm of the src file. We should be consistent and339# not allow large files to be mv'd onto themselves.340file_contents = BytesIO(b'a' * (1024 * 1024 * 10))341bucket_name = _SHARED_BUCKET342self.put_object(bucket_name, key_name='key.txt',343contents=file_contents)344p = aws('s3 mv s3://%s/key.txt s3://%s/key.txt' %345(bucket_name, bucket_name))346self.assertEqual(p.rc, 255)347self.assertIn('Cannot mv a file onto itself', p.stderr)348349350class TestRm(BaseS3IntegrationTest):351def assert_rm_with_page_size(self, bucket_name):352self.put_object(bucket_name, 'foo.txt', contents='hello world')353self.put_object(bucket_name, 'bar.txt', contents='hello world2')354p = aws('s3 rm s3://%s/ --recursive --page-size 1' % bucket_name)355self.assert_no_errors(p)356357self.assertTrue(self.key_not_exists(bucket_name, key_name='foo.txt'))358self.assertTrue(self.key_not_exists(bucket_name, key_name='bar.txt'))359@skip_if_windows('Newline in filename test not valid on windows.')360# Windows won't let you do this. You'll get:361# [Errno 22] invalid mode ('w') or filename:362# 'c:\\windows\\temp\\tmp0fv8uu\\foo\r.txt'363def test_rm_with_newlines(self):364bucket_name = _SHARED_BUCKET365366# Note the carriage return in the key name.367foo_txt = self.files.create_file('foo\r.txt', 'this is foo.txt')368p = aws('s3 cp %s s3://%s/foo\r.txt' % (foo_txt, bucket_name))369self.assert_no_errors(p)370371# Make sure object is in bucket.372self.assertTrue(self.key_exists(bucket_name, key_name='foo\r.txt'))373374# Then delete the file.375p = aws('s3 rm s3://%s/ --recursive' % (bucket_name,))376377# And verify it's gone.378self.assertTrue(self.key_not_exists(bucket_name, key_name='foo\r.txt'))379380def test_rm_with_page_size(self):381self.assert_rm_with_page_size(_SHARED_BUCKET)382383def test_s3_express_rm_with_page_size(self):384self.assert_rm_with_page_size(_SHARED_DIR_BUCKET)385386387class TestCp(BaseS3IntegrationTest):388389def assert_cp_to_and_from_s3(self, bucket_name):390# This tests the ability to put a single file in s3391# move it to a different bucket.392# and download the file locally393394# copy file into bucket.395foo_txt = self.files.create_file('foo.txt', 'this is foo.txt')396p = aws('s3 cp %s s3://%s/foo.txt' % (foo_txt, bucket_name))397self.assert_no_errors(p)398399# Make sure object is in bucket.400self.assertTrue(self.key_exists(bucket_name, key_name='foo.txt'))401self.assertEqual(402self.get_key_contents(bucket_name, key_name='foo.txt'),403'this is foo.txt')404405self.assertEqual(406self.content_type_for_key(bucket_name, key_name='foo.txt'),407'text/plain')408409# Make a new name for the file and copy it locally.410full_path = self.files.full_path('bar.txt')411p = aws('s3 cp s3://%s/foo.txt %s' % (bucket_name, full_path))412self.assert_no_errors(p)413414with open(full_path, 'r') as f:415self.assertEqual(f.read(), 'this is foo.txt')416417def test_cp_to_and_from_s3(self):418self.assert_cp_to_and_from_s3(_SHARED_BUCKET)419420def test_cp_to_and_from_s3_express(self):421self.assert_cp_to_and_from_s3(_SHARED_DIR_BUCKET)422423def test_cp_without_trailing_slash(self):424# There's a unit test for this, but we still want to verify this425# with an integration test.426bucket_name = _SHARED_BUCKET427428# copy file into bucket.429foo_txt = self.files.create_file('foo.txt', 'this is foo.txt')430# Note that the destination has no trailing slash.431p = aws('s3 cp %s s3://%s' % (foo_txt, bucket_name))432self.assert_no_errors(p)433434# Make sure object is in bucket.435self.assertTrue(self.key_exists(bucket_name, key_name='foo.txt'))436self.assertEqual(437self.get_key_contents(bucket_name, key_name='foo.txt'),438'this is foo.txt')439440@pytest.mark.slow441def test_cp_s3_s3_multipart(self):442from_bucket = _SHARED_BUCKET443to_bucket = self.create_bucket()444file_contents = BytesIO(b'abcd' * (1024 * 1024 * 10))445self.put_object(from_bucket, 'foo.txt', file_contents)446447p = aws('s3 cp s3://%s/foo.txt s3://%s/foo.txt' %448(from_bucket, to_bucket))449self.assert_no_errors(p)450self.assert_key_contents_equal(to_bucket, 'foo.txt', file_contents)451self.assertTrue(self.key_exists(from_bucket, key_name='foo.txt'))452453def test_guess_mime_type(self):454bucket_name = _SHARED_BUCKET455bar_png = self.files.create_file('bar.jpeg', 'fake png image')456p = aws('s3 cp %s s3://%s/bar.jpeg' % (bar_png, bucket_name))457self.assert_no_errors(p)458459# We should have correctly guessed the content type based on the460# filename extension.461self.assertEqual(462self.content_type_for_key(bucket_name, key_name='bar.jpeg'),463'image/jpeg')464465@pytest.mark.slow466def test_download_large_file(self):467# This will force a multipart download.468bucket_name = _SHARED_BUCKET469foo_contents = BytesIO(b'abcd' * (1024 * 1024 * 10))470self.put_object(bucket_name, key_name='foo.txt',471contents=foo_contents)472local_foo_txt = self.files.full_path('foo.txt')473p = aws('s3 cp s3://%s/foo.txt %s' % (bucket_name, local_foo_txt))474self.assert_no_errors(p)475self.assertEqual(os.path.getsize(local_foo_txt),476len(foo_contents.getvalue()))477478@pytest.mark.slow479@skip_if_windows('SIGINT not supported on Windows.')480def test_download_ctrl_c_does_not_hang(self):481bucket_name = _SHARED_BUCKET482foo_contents = BytesIO(b'abcd' * (1024 * 1024 * 40))483self.put_object(bucket_name, key_name='foo.txt',484contents=foo_contents)485local_foo_txt = self.files.full_path('foo.txt')486# --quiet is added to make sure too much output is not communicated487# to the PIPE, causing a deadlock when not consumed.488process = aws('s3 cp s3://%s/foo.txt %s --quiet' %489(bucket_name, local_foo_txt), wait_for_finish=False)490# Give it some time to start up and enter it's main task loop.491time.sleep(3)492# The process has 60 seconds to finish after being sent a Ctrl+C,493# otherwise the test fails.494process.send_signal(signal.SIGINT)495wait_for_process_exit(process, timeout=60)496# A Ctrl+C should have a non-zero RC.497# We either caught the process in498# its main polling loop (rc=1), or it was successfully terminated by499# the SIGINT (rc=-2).500#501# There is also the chance the interrupt happened before the transfer502# process started or even after transfer process finished. So the503# signal may have never been encountered, resulting in an rc of 0.504# Therefore, it is acceptable to have an rc of 0 as the important part505# about this test is that it does not hang.506self.assertIn(process.returncode, [0, 1, -2])507508@pytest.mark.slow509@skip_if_windows('SIGINT not supported on Windows.')510def test_cleans_up_aborted_uploads(self):511bucket_name = _SHARED_BUCKET512foo_txt = self.files.create_file('foo.txt', '')513with open(foo_txt, 'wb') as f:514for i in range(20):515f.write(b'a' * 1024 * 1024)516# --quiet is added to make sure too much output is not communicated517# to the PIPE, causing a deadlock when not consumed.518process = aws('s3 cp %s s3://%s/ --quiet' % (foo_txt, bucket_name),519wait_for_finish=False)520time.sleep(3)521# The process has 60 seconds to finish after being sent a Ctrl+C,522# otherwise the test fails.523process.send_signal(signal.SIGINT)524wait_for_process_exit(process, timeout=60)525uploads_after = self.client.list_multipart_uploads(526Bucket=bucket_name).get('Uploads', [])527self.assertEqual(uploads_after, [],528"Not all multipart uploads were properly "529"aborted after receiving Ctrl-C: %s" % uploads_after)530531def test_cp_to_nonexistent_bucket(self):532foo_txt = self.files.create_file('foo.txt', 'this is foo.txt')533p = aws(f's3 cp {foo_txt} s3://{_NON_EXISTENT_BUCKET}/foo.txt')534self.assertEqual(p.rc, 1)535536def test_cp_empty_file(self):537bucket_name = _SHARED_BUCKET538foo_txt = self.files.create_file('foo.txt', contents='')539p = aws('s3 cp %s s3://%s/' % (foo_txt, bucket_name))540self.assertEqual(p.rc, 0)541self.assertNotIn('failed', p.stderr)542self.assertTrue(self.key_exists(bucket_name, 'foo.txt'))543544def test_download_non_existent_key(self):545p = aws(f's3 cp s3://{_NON_EXISTENT_BUCKET}/foo.txt foo.txt')546self.assertEqual(p.rc, 1)547expected_err_msg = (548'An error occurred (404) when calling the '549'HeadObject operation: Key "foo.txt" does not exist')550self.assertIn(expected_err_msg, p.stderr)551552def test_download_encrypted_kms_object(self):553bucket_name = self.create_bucket(region='eu-central-1')554extra_args = {555'ServerSideEncryption': 'aws:kms',556'SSEKMSKeyId': 'alias/aws/s3'557}558object_name = 'foo.txt'559contents = 'this is foo.txt'560self.put_object(bucket_name, object_name, contents,561extra_args=extra_args)562local_filename = self.files.full_path('foo.txt')563p = aws('s3 cp s3://%s/%s %s --region eu-central-1' %564(bucket_name, object_name, local_filename))565self.assertEqual(p.rc, 0)566# Assert that the file was downloaded properly.567with open(local_filename, 'r') as f:568self.assertEqual(f.read(), contents)569570def test_download_empty_object(self):571bucket_name = _SHARED_BUCKET572object_name = 'empty-object'573self.put_object(bucket_name, object_name, '')574local_filename = self.files.full_path('empty.txt')575p = aws('s3 cp s3://%s/%s %s' % (576bucket_name, object_name, local_filename))577self.assertEqual(p.rc, 0)578# Assert that the file was downloaded and has no content.579with open(local_filename, 'r') as f:580self.assertEqual(f.read(), '')581582def test_website_redirect_ignore_paramfile(self):583bucket_name = _SHARED_BUCKET584foo_txt = self.files.create_file('foo.txt', 'bar')585website_redirect = 'http://someserver'586p = aws('s3 cp %s s3://%s/foo.txt --website-redirect %s' %587(foo_txt, bucket_name, website_redirect))588self.assert_no_errors(p)589590# Ensure that the web address is used as opposed to the contents591# of the web address. We can check via a head object.592response = self.head_object(bucket_name, 'foo.txt')593self.assertEqual(response['WebsiteRedirectLocation'], website_redirect)594595@pytest.mark.slow596def test_copy_large_file_signature_v4(self):597# Just verify that we can upload a large file to a region598# that uses signature version 4.599bucket_name = self.create_bucket(region='eu-central-1')600num_mb = 200601foo_txt = self.files.create_file('foo.txt', '')602with open(foo_txt, 'wb') as f:603for i in range(num_mb):604f.write(b'a' * 1024 * 1024)605606p = aws('s3 cp %s s3://%s/ --region eu-central-1' % (607foo_txt, bucket_name))608self.assert_no_errors(p)609self.assertTrue(self.key_exists(bucket_name, key_name='foo.txt'))610611def test_copy_metadata(self):612# Copy the same style of parsing as the CLI session. This is needed613# For comparing expires timestamp.614add_scalar_parsers(self.session)615bucket_name = _SHARED_BUCKET616key = random_chars(6)617filename = self.files.create_file(key, contents='')618p = aws('s3 cp %s s3://%s/%s --metadata keyname=value' %619(filename, bucket_name, key))620self.assert_no_errors(p)621response = self.head_object(bucket_name, key)622# These values should have the metadata of the source object623self.assertEqual(response['Metadata'].get('keyname'), 'value')624625def test_copy_metadata_directive(self):626# Copy the same style of parsing as the CLI session. This is needed627# For comparing expires timestamp.628self.override_parser(timestamp_parser=identity)629bucket_name = _SHARED_BUCKET630original_key = '%s-a' % random_chars(6)631new_key = '%s-b' % random_chars(6)632metadata = {633'ContentType': 'foo',634'ContentDisposition': 'foo',635'ContentEncoding': 'foo',636'ContentLanguage': 'foo',637'CacheControl': '90',638'Expires': '0'639}640self.put_object(bucket_name, original_key, contents='foo',641extra_args=metadata)642p = aws('s3 cp s3://%s/%s s3://%s/%s' %643(bucket_name, original_key, bucket_name, new_key))644self.assert_no_errors(p)645response = self.head_object(bucket_name, new_key)646# These values should have the metadata of the source object647metadata_ref = copy.copy(metadata)648metadata_ref['Expires'] = 'Thu, 01 Jan 1970 00:00:00 GMT'649for name, value in metadata_ref.items():650self.assertEqual(response[name], value)651652# Use REPLACE to wipe out all of the metadata when copying to a new653# key.654new_key = '%s-c' % random_chars(6)655p = aws('s3 cp s3://%s/%s s3://%s/%s --metadata-directive REPLACE' %656(bucket_name, original_key, bucket_name, new_key))657self.assert_no_errors(p)658response = self.head_object(bucket_name, new_key)659# Make sure all of the original metadata is gone.660for name, value in metadata_ref.items():661self.assertNotEqual(response.get(name), value)662663# Use REPLACE to wipe out all of the metadata but include a new664# metadata value.665new_key = '%s-d' % random_chars(6)666p = aws('s3 cp s3://%s/%s s3://%s/%s --metadata-directive REPLACE '667'--content-type bar' %668(bucket_name, original_key, bucket_name, new_key))669self.assert_no_errors(p)670response = self.head_object(bucket_name, new_key)671# Make sure the content type metadata is included672self.assertEqual(response['ContentType'], 'bar')673# Make sure all of the original metadata is gone.674for name, value in metadata_ref.items():675self.assertNotEqual(response.get(name), value)676677def test_cp_with_request_payer(self):678bucket_name = _SHARED_BUCKET679680foo_txt = self.files.create_file('foo.txt', 'this is foo.txt')681p = aws('s3 cp %s s3://%s/mykey --request-payer' % (682foo_txt, bucket_name))683684# From the S3 API, the only way to for sure know that request payer is685# working is to set up a bucket with request payer and have another686# account with permissions make a request to that bucket. If they687# do not include request payer, they will get an access denied error.688# Setting this up for an integration test would be tricky as it689# requires having/creating another account outside of the one running690# the integration tests. So instead at the very least we want to691# make sure we can use the parameter, have the command run692# successfully, and correctly upload the key to S3.693self.assert_no_errors(p)694self.assertTrue(self.key_exists(bucket_name, key_name='mykey'))695self.assertEqual(696self.get_key_contents(bucket_name, key_name='mykey'),697'this is foo.txt')698699700class TestSync(BaseS3IntegrationTest):701def test_sync_with_plus_chars_paginate(self):702# This test ensures pagination tokens are url decoded.703# 1. Create > 2 files with '+' in the filename.704# 2. Sync up to s3 while the page size is 2.705# 3. Sync up to s3 while the page size is 2.706# 4. Verify nothing was synced up down from s3 in step 3.707bucket_name = _SHARED_BUCKET708filenames = []709for i in range(4):710# Create a file with a space char and a '+' char in the filename.711# We're interested in testing the filename comparisons, not the712# mtime comparisons so we're setting the mtime to some time713# in the past to avoid mtime comparisons interfering with714# test results.715mtime = time.time() - 300716filenames.append(717self.files.create_file('foo +%06d' % i,718contents='',719mtime=mtime))720p = aws('s3 sync %s s3://%s/ --page-size 2' %721(self.files.rootdir, bucket_name))722self.assert_no_errors(p)723time.sleep(1)724p2 = aws('s3 sync %s s3://%s/ --page-size 2'725% (self.files.rootdir, bucket_name))726self.assertNotIn('upload:', p2.stdout)727self.assertEqual('', p2.stdout)728729def test_s3_to_s3_sync_with_plus_char_paginate(self):730keynames = []731for i in range(4):732keyname = 'foo+%d' % i733keynames.append(keyname)734self.files.create_file(keyname, contents='')735736bucket_name = _SHARED_BUCKET737bucket_name_2 = self.create_bucket()738739p = aws('s3 sync %s s3://%s' % (self.files.rootdir, bucket_name))740self.assert_no_errors(p)741for key in keynames:742self.assertTrue(self.key_exists(bucket_name, key))743744p = aws('s3 sync s3://%s/ s3://%s/ --page-size 2' %745(bucket_name, bucket_name_2))746self.assert_no_errors(p)747for key in keynames:748self.assertTrue(self.key_exists(bucket_name_2, key))749750p2 = aws('s3 sync s3://%s/ s3://%s/ --page-size 2' %751(bucket_name, bucket_name_2))752self.assertNotIn('copy:', p2.stdout)753self.assertEqual('', p2.stdout)754755def test_sync_no_resync(self):756self.files.create_file('xyz123456789', contents='test1')757self.files.create_file(os.path.join('xyz1', 'test'), contents='test2')758self.files.create_file(os.path.join('xyz', 'test'), contents='test3')759bucket_name = _SHARED_BUCKET760761p = aws('s3 sync %s s3://%s' % (self.files.rootdir, bucket_name))762self.assert_no_errors(p)763time.sleep(2)764self.assertTrue(self.key_exists(bucket_name, 'xyz123456789'))765self.assertTrue(self.key_exists(bucket_name, 'xyz1/test'))766self.assertTrue(self.key_exists(bucket_name, 'xyz/test'))767768p2 = aws('s3 sync %s s3://%s/' % (self.files.rootdir, bucket_name))769self.assertNotIn('upload:', p2.stdout)770self.assertEqual('', p2.stdout)771772def test_sync_to_from_s3(self):773bucket_name = _SHARED_BUCKET774foo_txt = self.files.create_file('foo.txt', 'foo contents')775bar_txt = self.files.create_file('bar.txt', 'bar contents')776777# Sync the directory and the bucket.778p = aws('s3 sync %s s3://%s' % (self.files.rootdir, bucket_name))779self.assert_no_errors(p)780781# Ensure both files are in the bucket.782self.assertTrue(self.key_exists(bucket_name, 'foo.txt'))783self.assertTrue(self.key_exists(bucket_name, 'bar.txt'))784785# Sync back down. First remote the local files.786os.remove(foo_txt)787os.remove(bar_txt)788p = aws('s3 sync s3://%s %s' % (bucket_name, self.files.rootdir))789# The files should be back now.790self.assertTrue(os.path.isfile(foo_txt))791self.assertTrue(os.path.isfile(bar_txt))792with open(foo_txt, 'r') as f:793self.assertEqual(f.read(), 'foo contents')794with open(bar_txt, 'r') as f:795self.assertEqual(f.read(), 'bar contents')796797def test_sync_to_nonexistent_bucket(self):798self.files.create_file('foo.txt', 'foo contents')799self.files.create_file('bar.txt', 'bar contents')800801# Sync the directory and the bucket.802p = aws('s3 sync %s s3://noexist-bkt-nme-1412' % (self.files.rootdir,))803self.assertEqual(p.rc, 1)804805def test_sync_with_empty_files(self):806self.files.create_file('foo.txt', 'foo contents')807self.files.create_file('bar.txt', contents='')808bucket_name = _SHARED_BUCKET809p = aws('s3 sync %s s3://%s/' % (self.files.rootdir, bucket_name))810self.assertEqual(p.rc, 0)811self.assertNotIn('failed', p.stderr)812self.assertTrue(813self.key_exists(bucket_name=bucket_name, key_name='bar.txt'))814815def test_sync_with_delete_option_with_same_prefix(self):816# Test for issue 440 (https://github.com/aws/aws-cli/issues/440)817# First, we need to create a directory structure that has a dir with818# the same prefix as some of the files:819#820# test/foo.txt821# test-123.txt822# test-321.txt823# test.txt824bucket_name = _SHARED_BUCKET825# create test/foo.txt826nested_dir = os.path.join(self.files.rootdir, 'test')827os.mkdir(nested_dir)828self.files.create_file(os.path.join(nested_dir, 'foo.txt'),829contents='foo.txt contents')830# Then create test-123.txt, test-321.txt, test.txt.831self.files.create_file('test-123.txt', 'test-123.txt contents')832self.files.create_file('test-321.txt', 'test-321.txt contents')833self.files.create_file('test.txt', 'test.txt contents')834835# Now sync this content up to s3.836# Allow settling time so that we have a different time between837# source and destination.838time.sleep(2)839p = aws('s3 sync %s s3://%s/' % (self.files.rootdir, bucket_name))840self.assert_no_errors(p)841842# Now here's the issue. If we try to sync the contents down843# with the --delete flag we should *not* see any output, the844# sync operation should determine that nothing is different and845# therefore do nothing. We can just use --dryrun to show the issue.846p = aws('s3 sync s3://%s/ %s --dryrun --delete' % (847bucket_name, self.files.rootdir))848self.assert_no_errors(p)849# These assertion methods will give better error messages than just850# checking if the output is empty.851self.assertNotIn('download:', p.stdout)852self.assertNotIn('delete:', p.stdout)853self.assertEqual('', p.stdout)854855def test_sync_with_delete_across_sig4_regions(self):856src_region = 'us-west-2'857dst_region = 'eu-central-1'858859src_bucket = self.create_bucket(region=src_region)860dst_bucket = self.create_bucket(region=dst_region)861862src_key_name = 'hello.txt'863self.files.create_file(src_key_name, contents='hello')864865p = aws('s3 sync %s s3://%s --region %s' %866(self.files.rootdir, src_bucket, src_region))867self.assert_no_errors(p)868self.assertTrue(self.key_exists(src_bucket, src_key_name))869870self.files.remove_all()871872dst_key_name = 'goodbye.txt'873self.files.create_file(dst_key_name, contents='goodbye')874875p = aws('s3 sync %s s3://%s --region %s' %876(self.files.rootdir, dst_bucket, dst_region))877self.assert_no_errors(p)878self.assertTrue(self.key_exists(dst_bucket, dst_key_name))879self.assertTrue(self.key_not_exists(dst_bucket, src_key_name))880881p = aws('s3 sync --delete s3://%s s3://%s '882'--source-region %s --region %s' %883(src_bucket, dst_bucket, src_region, dst_region))884self.assert_no_errors(p)885886self.assertTrue(self.key_exists(src_bucket, src_key_name))887self.assertTrue(self.key_exists(dst_bucket, src_key_name))888self.assertTrue(self.key_not_exists(src_bucket, dst_key_name))889self.assertTrue(self.key_not_exists(dst_bucket, dst_key_name))890891def test_sync_delete_locally(self):892bucket_name = _SHARED_BUCKET893file_to_delete = self.files.create_file(894'foo.txt', contents='foo contents')895self.put_object(bucket_name, 'bar.txt', contents='bar contents')896897p = aws('s3 sync s3://%s/ %s --delete' % (898bucket_name, self.files.rootdir))899self.assert_no_errors(p)900901# Make sure the uploaded file got downloaded and the previously902# existing local file got deleted903self.assertTrue(os.path.exists(904os.path.join(self.files.rootdir, 'bar.txt')))905self.assertFalse(os.path.exists(file_to_delete))906907908class TestSourceRegion(BaseS3IntegrationTest):909def extra_setup(self):910name_comp = []911# This creates a non DNS compatible bucket name by making two random912# sequences of characters and joining them with a period and913# adding a .com at the end.914for i in range(2):915name_comp.append(random_chars(10))916self.src_name = '.'.join(name_comp + ['com'])917name_comp = []918for i in range(2):919name_comp.append(random_chars(10))920self.dest_name = '.'.join(name_comp + ['com'])921self.src_region = 'us-west-1'922self.dest_region = 'us-east-1'923self.src_bucket = self.create_bucket(self.src_name, self.src_region)924self.dest_bucket = self.create_bucket(self.dest_name, self.dest_region)925926def test_cp_region(self):927self.files.create_file('foo.txt', 'foo')928p = aws('s3 sync %s s3://%s/ --region %s' %929(self.files.rootdir, self.src_bucket, self.src_region))930self.assert_no_errors(p)931p2 = aws('s3 cp s3://%s/ s3://%s/ --region %s --source-region %s '932'--recursive' %933(self.src_bucket, self.dest_bucket, self.dest_region,934self.src_region))935self.assertEqual(p2.rc, 0, p2.stdout)936self.assertTrue(937self.key_exists(bucket_name=self.dest_bucket, key_name='foo.txt'))938939def test_sync_region(self):940self.files.create_file('foo.txt', 'foo')941p = aws('s3 sync %s s3://%s/ --region %s' %942(self.files.rootdir, self.src_bucket, self.src_region))943self.assert_no_errors(p)944p2 = aws('s3 sync s3://%s/ s3://%s/ --region %s --source-region %s ' %945(self.src_bucket, self.dest_bucket, self.dest_region,946self.src_region))947self.assertEqual(p2.rc, 0, p2.stdout)948self.assertTrue(949self.key_exists(bucket_name=self.dest_bucket, key_name='foo.txt'))950951def test_mv_region(self):952self.files.create_file('foo.txt', 'foo')953p = aws('s3 sync %s s3://%s/ --region %s' %954(self.files.rootdir, self.src_bucket, self.src_region))955self.assert_no_errors(p)956p2 = aws('s3 mv s3://%s/ s3://%s/ --region %s --source-region %s '957'--recursive' %958(self.src_bucket, self.dest_bucket, self.dest_region,959self.src_region))960self.assertEqual(p2.rc, 0, p2.stdout)961self.assertTrue(962self.key_exists(bucket_name=self.dest_bucket, key_name='foo.txt'))963self.assertTrue(964self.key_not_exists(965bucket_name=self.src_bucket, key_name='foo.txt'))966967@pytest.mark.slow968def test_mv_large_file_region(self):969foo_txt = self.files.create_file('foo.txt', 'a' * 1024 * 1024 * 10)970p = aws('s3 cp %s s3://%s/foo.txt --region %s' %971(foo_txt, self.src_bucket, self.src_region))972self.assert_no_errors(p)973974p2 = aws(975's3 mv s3://%s/foo.txt s3://%s/ --region %s --source-region %s ' %976(self.src_bucket, self.dest_bucket, self.dest_region,977self.src_region)978)979self.assert_no_errors(p2)980self.assertTrue(981self.key_exists(bucket_name=self.dest_bucket, key_name='foo.txt'))982self.assertTrue(983self.key_not_exists(984bucket_name=self.src_bucket, key_name='foo.txt'))985986987class TestWarnings(BaseS3IntegrationTest):988def test_no_exist(self):989bucket_name = _SHARED_BUCKET990filename = os.path.join(self.files.rootdir, "no-exists-file")991p = aws('s3 cp %s s3://%s/' % (filename, bucket_name))992# If the local path provided by the user is nonexistent for an993# upload, this should error out.994self.assertEqual(p.rc, 255, p.stderr)995self.assertIn('The user-provided path %s does not exist.' %996filename, p.stderr)997998@skip_if_windows('Read permissions tests only supported on mac/linux')999def test_no_read_access(self):1000if os.geteuid() == 0:1001self.skipTest('Cannot completely remove read access as root user.')1002bucket_name = _SHARED_BUCKET1003self.files.create_file('foo.txt', 'foo')1004filename = os.path.join(self.files.rootdir, 'foo.txt')1005permissions = stat.S_IMODE(os.stat(filename).st_mode)1006# Remove read permissions1007permissions = permissions ^ stat.S_IREAD1008os.chmod(filename, permissions)1009p = aws('s3 cp %s s3://%s/' % (filename, bucket_name))1010self.assertEqual(p.rc, 2, p.stderr)1011self.assertIn('warning: Skipping file %s. File/Directory is '1012'not readable.' % filename, p.stderr)10131014@skip_if_windows('Special files only supported on mac/linux')1015def test_is_special_file(self):1016bucket_name = _SHARED_BUCKET1017file_path = os.path.join(self.files.rootdir, 'foo')1018# Use socket for special file.1019sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)1020sock.bind(file_path)1021p = aws('s3 cp %s s3://%s/' % (file_path, bucket_name))1022self.assertEqual(p.rc, 2, p.stderr)1023self.assertIn(("warning: Skipping file %s. File is character "1024"special device, block special device, FIFO, or "1025"socket." % file_path), p.stderr)102610271028class TestUnableToWriteToFile(BaseS3IntegrationTest):10291030@skip_if_windows('Write permissions tests only supported on mac/linux')1031def test_no_write_access_small_file(self):1032bucket_name = _SHARED_BUCKET1033if os.geteuid() == 0:1034self.skipTest(1035'Cannot completely remove write access as root user.')1036os.chmod(self.files.rootdir, 0o444)1037self.put_object(bucket_name, 'foo.txt',1038contents='Hello world')1039p = aws('s3 cp s3://%s/foo.txt %s' % (1040bucket_name, os.path.join(self.files.rootdir, 'foo.txt')))1041self.assertEqual(p.rc, 1)1042self.assertIn('download failed', p.stderr)10431044@skip_if_windows('Write permissions tests only supported on mac/linux')1045def test_no_write_access_large_file(self):1046if os.geteuid() == 0:1047self.skipTest(1048'Cannot completely remove write access as root user.')1049bucket_name = _SHARED_BUCKET1050# We have to use a file like object because using a string1051# would result in the header + body sent as a single packet1052# which effectively disables the expect 100 continue logic.1053# This will result in a test error because we won't follow1054# the temporary redirect for the newly created bucket.1055contents = BytesIO(b'a' * 10 * 1024 * 1024)1056self.put_object(bucket_name, 'foo.txt',1057contents=contents)1058os.chmod(self.files.rootdir, 0o444)1059p = aws('s3 cp s3://%s/foo.txt %s' % (1060bucket_name, os.path.join(self.files.rootdir, 'foo.txt')))1061self.assertEqual(p.rc, 1)1062self.assertIn('download failed', p.stderr)106310641065@skip_if_windows('Symlink tests only supported on mac/linux')1066class TestSymlinks(BaseS3IntegrationTest):1067"""1068This class test the ability to follow or not follow symlinks.1069"""1070def extra_setup(self):1071self.bucket_name = _SHARED_BUCKET1072self.nested_dir = os.path.join(self.files.rootdir, 'realfiles')1073os.mkdir(self.nested_dir)1074self.sample_file = \1075self.files.create_file(os.path.join(self.nested_dir, 'foo.txt'),1076contents='foo.txt contents')1077# Create a symlink to foo.txt.1078os.symlink(self.sample_file, os.path.join(self.files.rootdir,1079'a-goodsymlink'))1080# Create a bad symlink.1081os.symlink('non-existent-file', os.path.join(self.files.rootdir,1082'b-badsymlink'))1083# Create a symlink to directory where foo.txt is.1084os.symlink(self.nested_dir, os.path.join(self.files.rootdir,1085'c-goodsymlink'))10861087def test_no_follow_symlinks(self):1088p = aws('s3 sync %s s3://%s/ --no-follow-symlinks' % (1089self.files.rootdir, self.bucket_name))1090self.assert_no_errors(p)1091self.assertTrue(self.key_not_exists(self.bucket_name,1092'a-goodsymlink'))1093self.assertTrue(self.key_not_exists(self.bucket_name,1094'b-badsymlink'))1095self.assertTrue(self.key_not_exists(self.bucket_name,1096'c-goodsymlink/foo.txt'))1097self.assertEqual(self.get_key_contents(self.bucket_name,1098key_name='realfiles/foo.txt'),1099'foo.txt contents')11001101def test_follow_symlinks(self):1102# Get rid of the bad symlink first.1103os.remove(os.path.join(self.files.rootdir, 'b-badsymlink'))1104p = aws('s3 sync %s s3://%s/ --follow-symlinks' %1105(self.files.rootdir, self.bucket_name))1106self.assert_no_errors(p)1107self.assertEqual(self.get_key_contents(self.bucket_name,1108key_name='a-goodsymlink'),1109'foo.txt contents')1110self.assertTrue(self.key_not_exists(self.bucket_name,1111'b-badsymlink'))1112self.assertEqual(1113self.get_key_contents(self.bucket_name,1114key_name='c-goodsymlink/foo.txt'),1115'foo.txt contents')1116self.assertEqual(self.get_key_contents(self.bucket_name,1117key_name='realfiles/foo.txt'),1118'foo.txt contents')11191120def test_follow_symlinks_default(self):1121# Get rid of the bad symlink first.1122os.remove(os.path.join(self.files.rootdir, 'b-badsymlink'))1123p = aws('s3 sync %s s3://%s/' %1124(self.files.rootdir, self.bucket_name))1125self.assert_no_errors(p)1126self.assertEqual(self.get_key_contents(self.bucket_name,1127key_name='a-goodsymlink'),1128'foo.txt contents')1129self.assertTrue(self.key_not_exists(self.bucket_name,1130'b-badsymlink'))1131self.assertEqual(1132self.get_key_contents(self.bucket_name,1133key_name='c-goodsymlink/foo.txt'),1134'foo.txt contents')1135self.assertEqual(self.get_key_contents(self.bucket_name,1136key_name='realfiles/foo.txt'),1137'foo.txt contents')11381139def test_bad_symlink(self):1140p = aws('s3 sync %s s3://%s/' % (self.files.rootdir, self.bucket_name))1141self.assertEqual(p.rc, 2, p.stderr)1142self.assertIn('warning: Skipping file %s. File does not exist.' %1143os.path.join(self.files.rootdir, 'b-badsymlink'),1144p.stderr)114511461147class TestUnicode(BaseS3IntegrationTest):1148"""1149The purpose of these tests are to ensure that the commands can handle1150unicode characters in both keyname and from those generated for both1151uploading and downloading files.1152"""1153def test_cp(self):1154bucket_name = _SHARED_BUCKET1155local_example1_txt = \1156self.files.create_file(u'\u00e9xample.txt', 'example1 contents')1157s3_example1_txt = 's3://%s/%s' % (bucket_name,1158os.path.basename(local_example1_txt))1159local_example2_txt = self.files.full_path(u'\u00e9xample2.txt')11601161p = aws('s3 cp %s %s' % (local_example1_txt, s3_example1_txt))1162self.assert_no_errors(p)11631164# Download the file to the second example2.txt filename.1165p = aws('s3 cp %s %s --quiet' % (s3_example1_txt, local_example2_txt))1166self.assert_no_errors(p)1167with open(local_example2_txt, 'rb') as f:1168self.assertEqual(f.read(), b'example1 contents')11691170def test_recursive_cp(self):1171bucket_name = _SHARED_BUCKET1172local_example1_txt = self.files.create_file(u'\u00e9xample.txt',1173'example1 contents')1174local_example2_txt = self.files.create_file(u'\u00e9xample2.txt',1175'example2 contents')1176p = aws('s3 cp %s s3://%s --recursive --quiet' % (1177self.files.rootdir, bucket_name))1178self.assert_no_errors(p)11791180os.remove(local_example1_txt)1181os.remove(local_example2_txt)11821183p = aws('s3 cp s3://%s %s --recursive --quiet' % (1184bucket_name, self.files.rootdir))1185self.assert_no_errors(p)1186self.assertEqual(open(local_example1_txt).read(), 'example1 contents')1187self.assertEqual(open(local_example2_txt).read(), 'example2 contents')118811891190class TestLs(BaseS3IntegrationTest):1191"""1192This tests using the ``ls`` command.1193"""11941195def assert_ls_with_prefix(self, bucket_name):1196self.put_object(bucket_name, 'foo.txt', 'contents')1197self.put_object(bucket_name, 'foo', 'contents')1198self.put_object(bucket_name, 'bar.txt', 'contents')1199self.put_object(bucket_name, 'subdir/foo.txt', 'contents')1200p = aws('s3 ls s3://%s' % bucket_name)1201self.assertIn('PRE subdir/', p.stdout)1202self.assertIn('8 foo.txt', p.stdout)1203self.assertIn('8 foo', p.stdout)1204self.assertIn('8 bar.txt', p.stdout)12051206def assert_ls_recursive(self, bucket_name):1207self.put_object(bucket_name, 'foo.txt', 'contents')1208self.put_object(bucket_name, 'foo', 'contents')1209self.put_object(bucket_name, 'bar.txt', 'contents')1210self.put_object(bucket_name, 'subdir/foo.txt', 'contents')1211p = aws('s3 ls s3://%s --recursive' % bucket_name)1212self.assertIn('8 foo.txt', p.stdout)1213self.assertIn('8 foo', p.stdout)1214self.assertIn('8 bar.txt', p.stdout)1215self.assertIn('8 subdir/foo.txt', p.stdout)12161217def test_ls_bucket(self):1218p = aws('s3 ls')1219self.assert_no_errors(p)12201221def test_ls_with_no_env_vars(self):1222# By default, the aws() function injects1223# an AWS_DEFAULT_REGION into the env var of the1224# process. We're verifying that a region does *not*1225# need to be set anywhere. If we provide our1226# own environ dict, then the aws() function won't1227# inject a region.1228env = os.environ.copy()1229p = aws('s3 ls', env_vars=env)1230self.assert_no_errors(p)12311232def test_ls_bucket_with_s3_prefix(self):1233p = aws('s3 ls s3://')1234self.assert_no_errors(p)12351236def test_ls_non_existent_bucket(self):1237p = aws(f's3 ls s3://{_NON_EXISTENT_BUCKET}')1238self.assertEqual(p.rc, 255)1239self.assertIn(1240('An error occurred (NoSuchBucket) when calling the '1241'ListObjectsV2 operation: The specified bucket does not exist'),1242p.stderr)1243# There should be no stdout if we can't find the bucket.1244self.assertEqual(p.stdout, '')12451246def test_ls_with_prefix(self):1247self.assert_ls_with_prefix(_SHARED_BUCKET)12481249def test_s3_express_ls_with_prefix(self):1250self.assert_ls_with_prefix(_SHARED_DIR_BUCKET)12511252def test_ls_recursive(self):1253self.assert_ls_recursive(_SHARED_BUCKET)12541255def test_s3_express_ls_recursive(self):1256self.assert_ls_recursive(_SHARED_DIR_BUCKET)12571258def test_ls_without_prefix(self):1259# The ls command does not require an s3:// prefix,1260# we're always listing s3 contents.1261bucket_name = _SHARED_BUCKET1262self.put_object(bucket_name, 'foo.txt', 'contents')1263p = aws('s3 ls %s' % bucket_name)1264self.assertEqual(p.rc, 0)1265self.assertIn('foo.txt', p.stdout)12661267def test_only_prefix(self):1268bucket_name = _SHARED_BUCKET1269self.put_object(bucket_name, 'temp/foo.txt', 'contents')1270p = aws('s3 ls s3://%s/temp/foo.txt' % bucket_name)1271self.assertEqual(p.rc, 0)1272self.assertIn('foo.txt', p.stdout)12731274def test_ls_empty_bucket(self):1275bucket_name = _SHARED_BUCKET1276p = aws('s3 ls %s' % bucket_name)1277# There should not be an error thrown for checking the contents of1278# an empty bucket because no key was specified.1279self.assertEqual(p.rc, 0)12801281def test_ls_fail(self):1282bucket_name = _SHARED_BUCKET1283p = aws('s3 ls s3://%s/foo' % bucket_name)1284self.assertEqual(p.rc, 1)12851286def test_ls_fail_recursive(self):1287bucket_name = _SHARED_BUCKET1288p = aws('s3 ls s3://%s/bar --recursive' % bucket_name)1289self.assertEqual(p.rc, 1)129012911292class TestMbRb(BaseS3IntegrationTest):1293"""1294Tests primarily using ``rb`` and ``mb`` command.1295"""1296def extra_setup(self):1297self.bucket_name = random_bucket_name()12981299def test_mb_rb(self):1300p = aws('s3 mb s3://%s' % self.bucket_name)1301self.assert_no_errors(p)13021303# Give the bucket time to form.1304time.sleep(1)1305response = self.list_buckets()1306self.assertIn(self.bucket_name, [b['Name'] for b in response])13071308p = aws('s3 rb s3://%s' % self.bucket_name)1309self.assert_no_errors(p)13101311def test_fail_mb_rb(self):1312# Choose a bucket name that already exists.1313p = aws('s3 mb s3://mybucket')1314self.assertIn("BucketAlreadyExists", p.stderr)1315self.assertEqual(p.rc, 1)131613171318class TestOutput(BaseS3IntegrationTest):1319"""1320This ensures that arguments that affect output i.e. ``--quiet`` and1321``--only-show-errors`` behave as expected.1322"""1323def test_normal_output(self):1324bucket_name = _SHARED_BUCKET1325foo_txt = self.files.create_file('foo.txt', 'foo contents')13261327# Copy file into bucket.1328p = aws('s3 cp %s s3://%s/' % (foo_txt, bucket_name))1329self.assertEqual(p.rc, 0)1330# Check that there were no errors and that parts of the expected1331# progress message are written to stdout.1332self.assert_no_errors(p)1333self.assertIn('upload', p.stdout)1334self.assertIn('s3://%s/foo.txt' % bucket_name, p.stdout)13351336def test_normal_output_quiet(self):1337bucket_name = _SHARED_BUCKET1338foo_txt = self.files.create_file('foo.txt', 'foo contents')13391340# Copy file into bucket.1341p = aws('s3 cp %s s3://%s/ --quiet' % (foo_txt, bucket_name))1342self.assertEqual(p.rc, 0)1343# Check that nothing was printed to stdout.1344self.assertEqual('', p.stdout)13451346def test_normal_output_only_show_errors(self):1347bucket_name = _SHARED_BUCKET1348foo_txt = self.files.create_file('foo.txt', 'foo contents')13491350# Copy file into bucket.1351p = aws('s3 cp %s s3://%s/ --only-show-errors' % (foo_txt,1352bucket_name))1353self.assertEqual(p.rc, 0)1354# Check that nothing was printed to stdout.1355self.assertEqual('', p.stdout)13561357def test_normal_output_no_progress(self):1358bucket_name = _SHARED_BUCKET1359foo_txt = self.files.create_file('foo.txt', 'foo contents')13601361# Copy file into bucket.1362p = aws('s3 cp %s s3://%s/ --no-progress' % (foo_txt, bucket_name))1363self.assertEqual(p.rc, 0)1364# Ensure success message was printed1365self.assertIn('upload', p.stdout)1366self.assertIn('s3://%s/foo.txt' % bucket_name, p.stdout)1367self.assertNotIn('Completed ', p.stdout)1368self.assertNotIn('calculating', p.stdout)13691370def test_error_output(self):1371foo_txt = self.files.create_file('foo.txt', 'foo contents')13721373# Copy file into bucket.1374p = aws(f's3 cp {foo_txt} s3://{_NON_EXISTENT_BUCKET}/')1375# Check that there were errors and that the error was print to stderr.1376self.assertEqual(p.rc, 1)1377self.assertIn('upload failed', p.stderr)13781379def test_error_ouput_quiet(self):1380foo_txt = self.files.create_file('foo.txt', 'foo contents')13811382# Copy file into bucket.1383p = aws(f's3 cp {foo_txt} s3://{_NON_EXISTENT_BUCKET}/ --quiet')1384# Check that there were errors and that the error was not1385# print to stderr.1386self.assertEqual(p.rc, 1)1387self.assertEqual('', p.stderr)13881389def test_error_ouput_only_show_errors(self):1390foo_txt = self.files.create_file('foo.txt', 'foo contents')13911392# Copy file into bucket.1393p = aws(f's3 cp {foo_txt} s3://{_NON_EXISTENT_BUCKET}/ --only-show-errors')1394# Check that there were errors and that the error was print to stderr.1395self.assertEqual(p.rc, 1)1396self.assertIn('upload failed', p.stderr)13971398def test_error_and_success_output_only_show_errors(self):1399# Make a bucket.1400bucket_name = _SHARED_BUCKET14011402# Create one file.1403self.files.create_file('f', 'foo contents')14041405# Create another file that has a slightly longer name than the first.1406self.files.create_file('bar.txt', 'bar contents')14071408# Create a prefix that will cause the second created file to have a key1409# longer than 1024 bytes which is not allowed in s3.1410long_prefix = 'd' * 102214111412p = aws('s3 cp %s s3://%s/%s/ --only-show-errors --recursive'1413% (self.files.rootdir, bucket_name, long_prefix))14141415# Check that there was at least one error.1416self.assertEqual(p.rc, 1)14171418# Check that there was nothing written to stdout for successful upload.1419self.assertEqual('', p.stdout)14201421# Check that the failed message showed up in stderr.1422self.assertIn('upload failed', p.stderr)14231424# Ensure the expected successful key exists in the bucket.1425self.assertTrue(self.key_exists(bucket_name, long_prefix + '/f'))142614271428class TestDryrun(BaseS3IntegrationTest):1429"""1430This ensures that dryrun works.1431"""1432def test_dryrun(self):1433bucket_name = _SHARED_BUCKET1434foo_txt = self.files.create_file('foo.txt', 'foo contents')14351436# Copy file into bucket.1437p = aws('s3 cp %s s3://%s/ --dryrun' % (foo_txt, bucket_name))1438self.assertEqual(p.rc, 0)1439self.assert_no_errors(p)1440self.assertTrue(self.key_not_exists(bucket_name, 'foo.txt'))14411442def test_dryrun_large_files(self):1443bucket_name = _SHARED_BUCKET1444foo_txt = self.files.create_file('foo.txt', 'a' * 1024 * 1024 * 10)14451446# Copy file into bucket.1447p = aws('s3 cp %s s3://%s/ --dryrun' % (foo_txt, bucket_name))1448self.assertEqual(p.rc, 0)1449self.assert_no_errors(p)1450self.assertTrue(1451self.key_not_exists(bucket_name, 'foo.txt'),1452"The key 'foo.txt' exists in S3. It looks like the --dryrun "1453"argument was not obeyed.")14541455def test_dryrun_download_large_file(self):1456bucket_name = _SHARED_BUCKET1457full_path = self.files.create_file('largefile', 'a' * 1024 * 1024 * 10)1458with open(full_path, 'rb') as body:1459self.put_object(bucket_name, 'foo.txt', body)14601461foo_txt = self.files.full_path('foo.txt')1462p = aws('s3 cp s3://%s/foo.txt %s --dryrun' % (bucket_name, foo_txt))1463self.assertEqual(p.rc, 0)1464self.assert_no_errors(p)1465self.assertFalse(1466os.path.exists(foo_txt),1467"The file 'foo.txt' exists locally. It looks like the --dryrun "1468"argument was not obeyed.")146914701471@skip_if_windows('Memory tests only supported on mac/linux')1472class TestMemoryUtilization(BaseS3IntegrationTest):1473# These tests verify the memory utilization and growth are what we expect.1474def extra_setup(self):1475self.num_threads = DEFAULTS['max_concurrent_requests']1476self.chunk_size = DEFAULTS['multipart_chunksize']1477expected_memory_usage = self.num_threads * self.chunk_size1478# margin for things like python VM overhead, botocore service1479# objects, etc. 1.5 is really generous, perhaps over time this can be1480# lowered.1481runtime_margin = 1.51482self.max_mem_allowed = runtime_margin * expected_memory_usage14831484def assert_max_memory_used(self, process, max_mem_allowed, full_command):1485peak_memory = max(process.memory_usage)1486if peak_memory > max_mem_allowed:1487failure_message = (1488'Exceeded max memory allowed (%s MB) for command '1489'"%s": %s MB' % (self.max_mem_allowed / 1024.0 / 1024.0,1490full_command,1491peak_memory / 1024.0 / 1024.0))1492self.fail(failure_message)14931494@pytest.mark.slow1495def test_transfer_single_large_file(self):1496# 40MB will force a multipart upload.1497bucket_name = _SHARED_BUCKET1498file_contents = 'abcdabcd' * (1024 * 1024 * 10)1499foo_txt = self.files.create_file('foo.txt', file_contents)1500full_command = 's3 mv %s s3://%s/foo.txt' % (foo_txt, bucket_name)1501p = aws(full_command, collect_memory=True)1502self.assert_no_errors(p)1503self.assert_max_memory_used(p, self.max_mem_allowed, full_command)15041505# Verify downloading it back down obeys memory utilization.1506download_full_command = 's3 mv s3://%s/foo.txt %s' % (1507bucket_name, foo_txt)1508p = aws(download_full_command, collect_memory=True)1509self.assert_no_errors(p)1510self.assert_max_memory_used(p, self.max_mem_allowed,1511download_full_command)15121513# Some versions of RHEL allocate memory in a way where free'd memory isn't1514# given back to the OS. We haven't seen behavior as bad as RHEL's to the1515# point where this test fails on other distros, so for now we're disabling1516# the test on RHEL until we come up with a better way to collect1517# memory usage.1518@pytest.mark.slow1519@unittest.skipIf(_running_on_rhel(),1520'Streaming memory tests no supported on RHEL.')1521def test_stream_large_file(self):1522"""1523This tests to ensure that streaming files for both uploads and1524downloads do not use too much memory. Note that streaming uploads1525will use slightly more memory than usual but should not put the1526entire file into memory.1527"""1528bucket_name = _SHARED_BUCKET15291530# Create a 200 MB file that will be streamed1531num_mb = 2001532foo_txt = self.files.create_file('foo.txt', '')1533with open(foo_txt, 'wb') as f:1534for i in range(num_mb):1535f.write(b'a' * 1024 * 1024)15361537# The current memory threshold is set at about the peak amount for1538# performing a streaming upload of a file larger than 100 MB. So1539# this maximum needs to be bumped up. The maximum memory allowance1540# is increased by two chunksizes because that is the maximum1541# amount of chunks that will be queued while not being operated on1542# by a thread when performing a streaming multipart upload.1543max_mem_allowed = self.max_mem_allowed + 2 * self.chunk_size15441545full_command = 's3 cp - s3://%s/foo.txt' % bucket_name1546with open(foo_txt, 'rb') as f:1547p = aws(full_command, input_file=f, collect_memory=True)1548self.assert_no_errors(p)1549self.assert_max_memory_used(p, max_mem_allowed, full_command)15501551# Now perform a streaming download of the file.1552full_command = 's3 cp s3://%s/foo.txt - > %s' % (bucket_name, foo_txt)1553p = aws(full_command, collect_memory=True)1554self.assert_no_errors(p)1555# Use the usual bar for maximum memory usage since a streaming1556# download's memory usage should be comparable to non-streaming1557# transfers.1558self.assert_max_memory_used(p, self.max_mem_allowed, full_command)155915601561class TestWebsiteConfiguration(BaseS3IntegrationTest):1562def test_create_website_index_configuration(self):1563bucket_name = self.create_bucket()1564# Supply only --index-document argument.1565full_command = 's3 website %s --index-document index.html' % \1566(bucket_name)1567p = aws(full_command)1568self.assertEqual(p.rc, 0)1569self.assert_no_errors(p)1570# Verify we have a bucket website configured.1571parsed = self.client.get_bucket_website(Bucket=bucket_name)1572self.assertEqual(parsed['IndexDocument']['Suffix'], 'index.html')1573self.assertNotIn('ErrorDocument', parsed)1574self.assertNotIn('RoutingRules', parsed)1575self.assertNotIn('RedirectAllRequestsTo', parsed)15761577def test_create_website_index_and_error_configuration(self):1578bucket_name = self.create_bucket()1579# Supply both --index-document and --error-document arguments.1580p = aws('s3 website %s --index-document index.html '1581'--error-document error.html' % bucket_name)1582self.assertEqual(p.rc, 0)1583self.assert_no_errors(p)1584# Verify we have a bucket website configured.1585parsed = self.client.get_bucket_website(Bucket=bucket_name)1586self.assertEqual(parsed['IndexDocument']['Suffix'], 'index.html')1587self.assertEqual(parsed['ErrorDocument']['Key'], 'error.html')1588self.assertNotIn('RoutingRules', parsed)1589self.assertNotIn('RedirectAllRequestsTo', parsed)159015911592class TestIncludeExcludeFilters(BaseS3IntegrationTest):1593def assert_no_files_would_be_uploaded(self, p):1594self.assert_no_errors(p)1595# There should be no output.1596self.assertEqual(p.stdout, '')1597self.assertEqual(p.stderr, '')15981599def test_basic_exclude_filter_for_single_file(self):1600full_path = self.files.create_file('foo.txt', 'this is foo.txt')1601# With no exclude we should upload the file.1602p = aws('s3 cp %s s3://random-bucket-name/ --dryrun' % full_path)1603self.assert_no_errors(p)1604self.assertIn('(dryrun) upload:', p.stdout)16051606p2 = aws("s3 cp %s s3://random-bucket-name/ --dryrun --exclude '*'"1607% full_path)1608self.assert_no_files_would_be_uploaded(p2)16091610def test_explicitly_exclude_single_file(self):1611full_path = self.files.create_file('foo.txt', 'this is foo.txt')1612p = aws('s3 cp %s s3://random-bucket-name/'1613' --dryrun --exclude foo.txt'1614% full_path)1615self.assert_no_files_would_be_uploaded(p)16161617def test_cwd_doesnt_matter(self):1618full_path = self.files.create_file('foo.txt', 'this is foo.txt')1619tempdir = tempfile.mkdtemp()1620self.addCleanup(shutil.rmtree, tempdir)1621with cd(tempdir):1622p = aws("s3 cp %s s3://random-bucket-name/ --dryrun --exclude '*'"1623% full_path)1624self.assert_no_files_would_be_uploaded(p)16251626def test_recursive_exclude(self):1627# create test/foo.txt1628nested_dir = os.path.join(self.files.rootdir, 'test')1629os.mkdir(nested_dir)1630self.files.create_file(os.path.join(nested_dir, 'foo.txt'),1631contents='foo.txt contents')1632# Then create test-123.txt, test-321.txt, test.txt.1633self.files.create_file('test-123.txt', 'test-123.txt contents')1634self.files.create_file('test-321.txt', 'test-321.txt contents')1635self.files.create_file('test.txt', 'test.txt contents')1636# An --exclude test* should exclude everything here.1637p = aws("s3 cp %s s3://random-bucket-name/ --dryrun --exclude '*' "1638"--recursive" % self.files.rootdir)1639self.assert_no_files_would_be_uploaded(p)16401641# We can include the test directory though.1642p = aws("s3 cp %s s3://random-bucket-name/ --dryrun "1643"--exclude '*' --include 'test/*' --recursive"1644% self.files.rootdir)1645self.assert_no_errors(p)1646self.assertRegex(p.stdout, r'\(dryrun\) upload:.*test/foo.txt.*')16471648def test_s3_filtering(self):1649# Should behave the same as local file filtering.1650bucket_name = _SHARED_BUCKET1651self.put_object(bucket_name, key_name='foo.txt')1652self.put_object(bucket_name, key_name='bar.txt')1653self.put_object(bucket_name, key_name='baz.jpg')1654p = aws("s3 rm s3://%s/ --dryrun --exclude '*' --recursive"1655% bucket_name)1656self.assert_no_files_would_be_uploaded(p)16571658p = aws(1659"s3 rm s3://%s/ --dryrun --exclude '*.jpg' --exclude '*.txt' "1660"--recursive" % bucket_name)1661self.assert_no_files_would_be_uploaded(p)16621663p = aws("s3 rm s3://%s/ --dryrun --exclude '*.txt' --recursive"1664% bucket_name)1665self.assert_no_errors(p)1666self.assertRegex(p.stdout, r'\(dryrun\) delete:.*baz.jpg.*')1667self.assertNotIn(p.stdout, 'bar.txt')1668self.assertNotIn(p.stdout, 'foo.txt')16691670def test_exclude_filter_with_delete(self):1671# Test for: https://github.com/aws/aws-cli/issues/7781672bucket_name = _SHARED_BUCKET1673self.files.create_file('foo.txt', 'contents')1674second = self.files.create_file('bar.py', 'contents')1675p = aws("s3 sync %s s3://%s/" % (self.files.rootdir, bucket_name))1676self.assert_no_errors(p)1677self.assertTrue(self.key_exists(bucket_name, key_name='bar.py'))1678os.remove(second)1679# We now have the same state as specified in the bug:1680# local remote1681# ----- ------1682#1683# foo.txt foo.txt1684# bar.py1685#1686# If we now run --exclude '*.py' --delete, then we should *not*1687# delete bar.py and the remote side.1688p = aws("s3 sync %s s3://%s/ --exclude '*.py' --delete" % (1689self.files.rootdir, bucket_name))1690self.assert_no_errors(p)1691self.assertTrue(1692self.key_exists(bucket_name, key_name='bar.py'),1693("The --delete flag was not applied to the receiving "1694"end, the 'bar.py' file was deleted even though it"1695" was excluded."))16961697def test_exclude_filter_with_relative_path(self):1698# Same test as test_exclude_filter_with_delete, except we don't1699# use an absolute path on the source dir.1700bucket_name = _SHARED_BUCKET1701self.files.create_file('foo.txt', 'contents')1702second = self.files.create_file('bar.py', 'contents')1703p = aws("s3 sync %s s3://%s/" % (self.files.rootdir, bucket_name))1704self.assert_no_errors(p)1705self.assertTrue(self.key_exists(bucket_name, key_name='bar.py'))1706os.remove(second)1707cwd = os.getcwd()1708try:1709os.chdir(self.files.rootdir)1710# Note how we're using "." for the source directory.1711p = aws("s3 sync . s3://%s/ --exclude '*.py' --delete"1712% bucket_name)1713finally:1714os.chdir(cwd)1715self.assert_no_errors(p)1716self.assertTrue(1717self.key_exists(bucket_name, key_name='bar.py'),1718("The --delete flag was not applied to the receiving "1719"end, the 'bar.py' file was deleted even though"1720" it was excluded."))17211722def test_filter_s3_with_prefix(self):1723bucket_name = _SHARED_BUCKET1724self.put_object(bucket_name, key_name='temp/test')1725p = aws('s3 cp s3://%s/temp/ %s --recursive --exclude test --dryrun'1726% (bucket_name, self.files.rootdir))1727self.assert_no_files_would_be_uploaded(p)17281729def test_filter_no_resync(self):1730# This specifically tests for the issue described here:1731# https://github.com/aws/aws-cli/issues/7941732bucket_name = _SHARED_BUCKET1733dir_name = os.path.join(self.files.rootdir, 'temp')1734self.files.create_file(os.path.join(dir_name, 'test.txt'),1735contents='foo')1736# Sync a local directory to an s3 prefix.1737p = aws('s3 sync %s s3://%s/temp' % (dir_name, bucket_name))1738self.assert_no_errors(p)1739self.assertTrue(self.key_exists(bucket_name, key_name='temp/test.txt'))17401741# Nothing should be synced down if filters are used.1742p = aws("s3 sync s3://%s/temp %s --exclude '*' --include test.txt"1743% (bucket_name, dir_name))1744self.assert_no_files_would_be_uploaded(p)174517461747class TestFileWithSpaces(BaseS3IntegrationTest):1748def test_upload_download_file_with_spaces(self):1749bucket_name = _SHARED_BUCKET1750filename = self.files.create_file('with space.txt', 'contents')1751p = aws('s3 cp %s s3://%s/ --recursive' % (self.files.rootdir,1752bucket_name))1753self.assert_no_errors(p)1754os.remove(filename)1755# Now download the file back down locally.1756p = aws('s3 cp s3://%s/ %s --recursive' % (bucket_name,1757self.files.rootdir))1758self.assert_no_errors(p)1759self.assertEqual(os.listdir(self.files.rootdir)[0], 'with space.txt')17601761def test_sync_file_with_spaces(self):1762bucket_name = _SHARED_BUCKET1763self.files.create_file('with space.txt',1764'contents', mtime=time.time() - 300)1765p = aws('s3 sync %s s3://%s/' % (self.files.rootdir,1766bucket_name))1767self.assert_no_errors(p)1768time.sleep(1)1769# Now syncing again should *not* trigger any uploads (i.e we should1770# get nothing on stdout).1771p2 = aws('s3 sync %s s3://%s/' % (self.files.rootdir,1772bucket_name))1773self.assertEqual(p2.stdout, '')1774self.assertEqual(p2.stderr, '')1775self.assertEqual(p2.rc, 0)177617771778class TestStreams(BaseS3IntegrationTest):1779def test_upload(self):1780"""1781This tests uploading a small stream from stdin.1782"""1783bucket_name = _SHARED_BUCKET1784p = aws('s3 cp - s3://%s/stream' % bucket_name,1785input_data=b'This is a test')1786self.assert_no_errors(p)1787self.assertTrue(self.key_exists(bucket_name, 'stream'))1788self.assertEqual(self.get_key_contents(bucket_name, 'stream'),1789'This is a test')17901791def test_unicode_upload(self):1792"""1793This tests being able to upload unicode from stdin.1794"""1795unicode_str = u'\u00e9 This is a test'1796byte_str = unicode_str.encode('utf-8')1797bucket_name = _SHARED_BUCKET1798p = aws('s3 cp - s3://%s/stream' % bucket_name,1799input_data=byte_str)1800self.assert_no_errors(p)1801self.assertTrue(self.key_exists(bucket_name, 'stream'))1802self.assertEqual(self.get_key_contents(bucket_name, 'stream'),1803unicode_str)18041805@pytest.mark.slow1806def test_multipart_upload(self):1807"""1808This tests the ability to multipart upload streams from stdin.1809The data has some unicode in it to avoid having to do a separate1810multipart upload test just for unicode.1811"""1812bucket_name = _SHARED_BUCKET1813data = u'\u00e9bcd' * (1024 * 1024 * 10)1814data_encoded = data.encode('utf-8')1815p = aws('s3 cp - s3://%s/stream' % bucket_name,1816input_data=data_encoded)1817self.assert_no_errors(p)1818self.assertTrue(self.key_exists(bucket_name, 'stream'))1819self.assert_key_contents_equal(bucket_name, 'stream', data)18201821def test_download(self):1822"""1823This tests downloading a small stream from stdout.1824"""1825bucket_name = _SHARED_BUCKET1826p = aws('s3 cp - s3://%s/stream' % bucket_name,1827input_data=b'This is a test')1828self.assert_no_errors(p)18291830p = aws('s3 cp s3://%s/stream -' % bucket_name)1831self.assert_no_errors(p)1832self.assertEqual(p.stdout, 'This is a test')18331834def test_unicode_download(self):1835"""1836This tests downloading a small unicode stream from stdout.1837"""1838bucket_name = _SHARED_BUCKET18391840data = u'\u00e9 This is a test'1841data_encoded = data.encode('utf-8')1842p = aws('s3 cp - s3://%s/stream' % bucket_name,1843input_data=data_encoded)1844self.assert_no_errors(p)18451846# Downloading the unicode stream to standard out.1847p = aws('s3 cp s3://%s/stream -' % bucket_name)1848self.assert_no_errors(p)1849self.assertEqual(p.stdout, data_encoded.decode(get_stdout_encoding()))18501851@pytest.mark.slow1852def test_multipart_download(self):1853"""1854This tests the ability to multipart download streams to stdout.1855The data has some unicode in it to avoid having to do a separate1856multipart download test just for unicode.1857"""1858bucket_name = _SHARED_BUCKET18591860# First lets upload some data via streaming since1861# its faster and we do not have to write to a file!1862data = u'\u00e9bcd' * (1024 * 1024 * 10)1863data_encoded = data.encode('utf-8')1864p = aws('s3 cp - s3://%s/stream' % bucket_name,1865input_data=data_encoded)18661867# Download the unicode stream to standard out.1868p = aws('s3 cp s3://%s/stream -' % bucket_name)1869self.assert_no_errors(p)1870self.assertEqual(p.stdout, data_encoded.decode(get_stdout_encoding()))187118721873class TestLSWithProfile(BaseS3IntegrationTest):1874def extra_setup(self):1875self.config_file = os.path.join(self.files.rootdir, 'tmpconfig')1876with open(self.config_file, 'w') as f:1877creds = self.session.get_credentials()1878f.write(1879"[profile testprofile]\n"1880"aws_access_key_id=%s\n"1881"aws_secret_access_key=%s\n" % (1882creds.access_key,1883creds.secret_key)1884)1885if creds.token is not None:1886f.write("aws_session_token=%s\n" % creds.token)18871888def test_can_ls_with_profile(self):1889env_vars = os.environ.copy()1890env_vars['AWS_CONFIG_FILE'] = self.config_file1891p = aws('s3 ls s3:// --profile testprofile', env_vars=env_vars)1892self.assert_no_errors(p)189318941895class TestNoSignRequests(BaseS3IntegrationTest):1896def test_no_sign_request(self):1897bucket_name = _SHARED_BUCKET1898self.put_object(bucket_name, 'foo', contents='bar',1899extra_args={'ACL': 'public-read-write'})1900env_vars = os.environ.copy()1901env_vars['AWS_ACCESS_KEY_ID'] = 'foo'1902env_vars['AWS_SECRET_ACCESS_KEY'] = 'bar'1903p = aws('s3 cp s3://%s/foo %s/ --region %s' %1904(bucket_name, self.files.rootdir, self.region),1905env_vars=env_vars)1906# Should have credential issues1907self.assertEqual(p.rc, 1)19081909p = aws('s3 cp s3://%s/foo %s/ --region %s --no-sign-request' %1910(bucket_name, self.files.rootdir, self.region),1911env_vars=env_vars)1912# Should be able to download the file when not signing the request.1913self.assert_no_errors(p)191419151916class TestHonorsEndpointUrl(BaseS3IntegrationTest):1917def test_verify_endpoint_url_is_used(self):1918# We're going to verify this indirectly by looking at the1919# debug logs. The endpoint url we specify should be in the1920# debug logs, and the endpoint url that botocore would have1921# used if we didn't provide the endpoint-url should not1922# be in the debug logs. The other alternative is to actually1923# watch what connections are made in the process, which is not1924# easy.1925p = aws('s3 ls s3://dnscompat/ '1926'--endpoint-url http://localhost:51515 '1927'--debug')1928debug_logs = p.stderr1929original_hostname = 'dnscompat.s3.amazonaws.com'1930expected = 'localhost'1931self.assertNotIn(original_hostname, debug_logs,1932'--endpoint-url is being ignored in s3 commands.')1933self.assertIn(expected, debug_logs)193419351936class TestSSERelatedParams(BaseS3IntegrationTest):1937def download_and_assert_kms_object_integrity(self, bucket, key, contents):1938self.wait_until_key_exists(bucket, key)1939# Ensure the kms object can be download it by downloading it1940# with --sse aws:kms is enabled to ensure sigv4 is used on the1941# download, as it is required for kms.1942download_filename = os.path.join(self.files.rootdir, 'tmp', key)1943p = aws('s3 cp s3://%s/%s %s --sse aws:kms' % (1944bucket, key, download_filename))1945self.assert_no_errors(p)19461947self.assertTrue(os.path.isfile(download_filename))1948with open(download_filename, 'r') as f:1949self.assertEqual(f.read(), contents)19501951def test_sse_upload(self):1952bucket = _SHARED_BUCKET1953key = 'foo.txt'1954contents = 'contents'1955file_name = self.files.create_file(key, contents)19561957# Upload the file using AES2561958p = aws('s3 cp %s s3://%s/%s --sse AES256' % (file_name, bucket, key))1959self.assert_no_errors(p)19601961# Ensure the file was uploaded correctly1962self.assert_key_contents_equal(bucket, key, contents)19631964def test_large_file_sse_upload(self):1965bucket = _SHARED_BUCKET1966key = 'foo.txt'1967contents = 'a' * (10 * (1024 * 1024))1968file_name = self.files.create_file(key, contents)19691970# Upload the file using AES2561971p = aws('s3 cp %s s3://%s/%s --sse AES256' % (file_name, bucket, key))1972self.assert_no_errors(p)19731974# Ensure the file was uploaded correctly1975self.assert_key_contents_equal(bucket, key, contents)19761977def test_sse_with_kms_upload(self):1978bucket = _SHARED_BUCKET1979key = 'foo.txt'1980contents = 'contents'1981file_name = self.files.create_file(key, contents)19821983# Upload the file using KMS1984p = aws('s3 cp %s s3://%s/%s --sse aws:kms' % (file_name, bucket, key))1985self.assert_no_errors(p)19861987self.download_and_assert_kms_object_integrity(bucket, key, contents)19881989def test_large_file_sse_kms_upload(self):1990bucket = _SHARED_BUCKET1991key = 'foo.txt'1992contents = 'a' * (10 * (1024 * 1024))1993file_name = self.files.create_file(key, contents)19941995# Upload the file using KMS1996p = aws('s3 cp %s s3://%s/%s --sse aws:kms' % (file_name, bucket, key))1997self.assert_no_errors(p)19981999self.download_and_assert_kms_object_integrity(bucket, key, contents)20002001def test_sse_copy(self):2002bucket = _SHARED_BUCKET2003key = 'foo.txt'2004new_key = 'bar.txt'2005contents = 'contents'2006self.put_object(bucket, key, contents)20072008# Copy the file using AES2562009p = aws('s3 cp s3://%s/%s s3://%s/%s --sse AES256' % (2010bucket, key, bucket, new_key))2011self.assert_no_errors(p)20122013# Ensure the file was copied correctly2014self.assert_key_contents_equal(bucket, new_key, contents)20152016def test_large_file_sse_copy(self):2017bucket = _SHARED_BUCKET2018key = 'foo.txt'2019new_key = 'bar.txt'2020contents = 'a' * (10 * (1024 * 1024))20212022# This is a little faster and more efficient than2023# calling self.put_object()2024file_name = self.files.create_file(key, contents)2025p = aws('s3 cp %s s3://%s/%s' % (file_name, bucket, key))2026self.assert_no_errors(p)20272028# Copy the file using AES2562029p = aws('s3 cp s3://%s/%s s3://%s/%s --sse AES256' % (2030bucket, key, bucket, new_key))2031self.assert_no_errors(p)20322033# Ensure the file was copied correctly2034self.assert_key_contents_equal(bucket, new_key, contents)20352036def test_sse_kms_copy(self):2037bucket = _SHARED_BUCKET2038key = 'foo.txt'2039new_key = 'bar.txt'2040contents = 'contents'2041self.put_object(bucket, key, contents)20422043# Copy the file using KMS2044p = aws('s3 cp s3://%s/%s s3://%s/%s --sse aws:kms' % (2045bucket, key, bucket, new_key))2046self.assert_no_errors(p)2047self.download_and_assert_kms_object_integrity(bucket, key, contents)20482049def test_large_file_sse_kms_copy(self):2050bucket = _SHARED_BUCKET2051key = 'foo.txt'2052new_key = 'bar.txt'2053contents = 'a' * (10 * (1024 * 1024))20542055# This is a little faster and more efficient than2056# calling self.put_object()2057file_name = self.files.create_file(key, contents)2058p = aws('s3 cp %s s3://%s/%s' % (file_name, bucket, key))2059self.assert_no_errors(p)20602061# Copy the file using KMS2062p = aws('s3 cp s3://%s/%s s3://%s/%s --sse aws:kms' % (2063bucket, key, bucket, new_key))2064self.assert_no_errors(p)2065self.download_and_assert_kms_object_integrity(bucket, key, contents)20662067def test_smoke_sync_sse(self):2068bucket = _SHARED_BUCKET2069key = 'foo.txt'2070contents = 'contents'2071file_name = self.files.create_file(key, contents)20722073# Upload sync2074p = aws('s3 sync %s s3://%s/foo/ --sse AES256' % (2075self.files.rootdir, bucket))2076self.assert_no_errors(p)2077self.wait_until_key_exists(bucket, 'foo/foo.txt')20782079# Copy sync2080p = aws('s3 sync s3://%s/foo/ s3://%s/bar/ --sse AES256' % (2081bucket, bucket))2082self.assert_no_errors(p)2083self.wait_until_key_exists(bucket, 'bar/foo.txt')20842085# Remove the original file2086os.remove(file_name)20872088# Download sync2089p = aws('s3 sync s3://%s/bar/ %s --sse AES256' % (2090bucket, self.files.rootdir))2091self.assert_no_errors(p)20922093self.assertTrue(os.path.isfile(file_name))2094with open(file_name, 'r') as f:2095self.assertEqual(f.read(), contents)20962097def test_smoke_sync_sse_kms(self):2098bucket = _SHARED_BUCKET2099key = 'foo.txt'2100contents = 'contents'2101file_name = self.files.create_file(key, contents)21022103# Upload sync2104p = aws('s3 sync %s s3://%s/foo/ --sse aws:kms' % (2105self.files.rootdir, bucket))2106self.assert_no_errors(p)21072108# Copy sync2109p = aws('s3 sync s3://%s/foo/ s3://%s/bar/ --sse aws:kms' % (2110bucket, bucket))2111self.assert_no_errors(p)21122113# Remove the original file2114os.remove(file_name)21152116# Download sync2117p = aws('s3 sync s3://%s/bar/ %s --sse aws:kms' % (2118bucket, self.files.rootdir))2119self.assert_no_errors(p)21202121self.assertTrue(os.path.isfile(file_name))2122with open(file_name, 'r') as f:2123self.assertEqual(f.read(), contents)212421252126class TestSSECRelatedParams(BaseS3IntegrationTest):2127def setUp(self):2128super(TestSSECRelatedParams, self).setUp()2129self.encrypt_key = 'a' * 322130self.other_encrypt_key = 'b' * 322131self.bucket = _SHARED_BUCKET21322133def download_and_assert_sse_c_object_integrity(2134self, bucket, key, encrypt_key, contents):2135self.wait_until_key_exists(bucket, key,2136{'SSECustomerKey': encrypt_key,2137'SSECustomerAlgorithm': 'AES256'})2138download_filename = os.path.join(self.files.rootdir, 'tmp', key)2139p = aws('s3 cp s3://%s/%s %s --sse-c AES256 --sse-c-key %s' % (2140bucket, key, download_filename, encrypt_key))2141self.assert_no_errors(p)21422143self.assertTrue(os.path.isfile(download_filename))2144with open(download_filename, 'r') as f:2145self.assertEqual(f.read(), contents)21462147def test_sse_c_upload_and_download(self):2148key = 'foo.txt'2149contents = 'contents'2150file_name = self.files.create_file(key, contents)21512152# Upload the file using SSE-C2153p = aws('s3 cp %s s3://%s --sse-c AES256 --sse-c-key %s' % (2154file_name, self.bucket, self.encrypt_key))2155self.assert_no_errors(p)21562157self.download_and_assert_sse_c_object_integrity(2158self.bucket, key, self.encrypt_key, contents)21592160def test_can_delete_single_sse_c_object(self):2161key = 'foo.txt'2162contents = 'contents'2163self.put_object(2164self.bucket, key, contents,2165extra_args={2166'SSECustomerKey': self.encrypt_key,2167'SSECustomerAlgorithm': 'AES256'2168}2169)2170p = aws('s3 rm s3://%s/%s' % (self.bucket, key))2171self.assert_no_errors(p)2172self.assertFalse(self.key_exists(self.bucket, key))21732174def test_sse_c_upload_and_download_large_file(self):2175key = 'foo.txt'2176contents = 'a' * (10 * (1024 * 1024))2177file_name = self.files.create_file(key, contents)21782179# Upload the file using SSE-C2180p = aws('s3 cp %s s3://%s --sse-c AES256 --sse-c-key %s' % (2181file_name, self.bucket, self.encrypt_key))2182self.assert_no_errors(p)21832184self.download_and_assert_sse_c_object_integrity(2185self.bucket, key, self.encrypt_key, contents)21862187def test_sse_c_copy(self):2188key = 'foo.txt'2189new_key = 'bar.txt'2190contents = 'contents'2191file_name = self.files.create_file(key, contents)21922193# Upload the file using SSE-C2194p = aws('s3 cp %s s3://%s --sse-c AES256 --sse-c-key %s' % (2195file_name, self.bucket, self.encrypt_key))2196self.assert_no_errors(p)21972198# Copy the file using SSE-C and a new encryption key2199p = aws(2200's3 cp s3://%s/%s s3://%s/%s --sse-c AES256 --sse-c-key %s '2201'--sse-c-copy-source AES256 --sse-c-copy-source-key %s' % (2202self.bucket, key, self.bucket, new_key, self.other_encrypt_key,2203self.encrypt_key))2204self.assert_no_errors(p)2205self.download_and_assert_sse_c_object_integrity(2206self.bucket, new_key, self.other_encrypt_key, contents)22072208def test_sse_c_copy_large_file(self):2209key = 'foo.txt'2210new_key = 'bar.txt'2211contents = 'a' * (10 * (1024 * 1024))2212file_name = self.files.create_file(key, contents)22132214# Upload the file using SSE-C2215p = aws('s3 cp %s s3://%s --sse-c AES256 --sse-c-key %s' % (2216file_name, self.bucket, self.encrypt_key))2217self.assert_no_errors(p)22182219# Copy the file using SSE-C and a new encryption key2220p = aws(2221's3 cp s3://%s/%s s3://%s/%s --sse-c AES256 --sse-c-key %s '2222'--sse-c-copy-source AES256 --sse-c-copy-source-key %s' % (2223self.bucket, key, self.bucket, new_key, self.other_encrypt_key,2224self.encrypt_key))2225self.assert_no_errors(p)2226self.download_and_assert_sse_c_object_integrity(2227self.bucket, new_key, self.other_encrypt_key, contents)22282229def test_smoke_sync_sse_c(self):2230key = 'foo.txt'2231contents = 'contents'2232file_name = self.files.create_file(key, contents)22332234# Upload sync2235p = aws('s3 sync %s s3://%s/foo/ --sse-c AES256 --sse-c-key %s' % (2236self.files.rootdir, self.bucket, self.encrypt_key))2237self.assert_no_errors(p)22382239# Copy sync2240p = aws('s3 sync s3://%s/foo/ s3://%s/bar/ --sse-c AES256 '2241'--sse-c-key %s --sse-c-copy-source AES256 '2242'--sse-c-copy-source-key %s' % (2243self.bucket, self.bucket, self.other_encrypt_key,2244self.encrypt_key))2245self.assert_no_errors(p)22462247# Remove the original file2248os.remove(file_name)22492250# Download sync2251p = aws('s3 sync s3://%s/bar/ %s --sse-c AES256 --sse-c-key %s' % (2252self.bucket, self.files.rootdir, self.other_encrypt_key))2253self.assert_no_errors(p)22542255self.assertTrue(os.path.isfile(file_name))2256with open(file_name, 'r') as f:2257self.assertEqual(f.read(), contents)225822592260class TestPresignCommand(BaseS3IntegrationTest):22612262def test_can_retrieve_presigned_url(self):2263bucket_name = _SHARED_BUCKET2264original_contents = b'this is foo.txt'2265self.put_object(bucket_name, 'foo.txt', original_contents)2266p = aws('s3 presign s3://%s/foo.txt' % (bucket_name,))2267self.assert_no_errors(p)2268url = p.stdout.strip()2269contents = urlopen(url).read()2270self.assertEqual(contents, original_contents)227122722273