Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added lifecycle overlap #356

Merged
6 changes: 6 additions & 0 deletions examples/bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@
service = oss2.Service(oss2.Auth(access_key_id, access_key_secret), endpoint)
print('\n'.join(info.name for info in oss2.BucketIterator(service)))

# 列举所有的Bucket,单region场景
params = {}
params['regionList'] = ''
result = service.list_buckets(params=params)
for bucket_info in result.buckets:
print('name: ' + bucket_info.name)

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
Expand Down
41 changes: 41 additions & 0 deletions examples/describe_regions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@

import os
import oss2

# Specify access information, such as AccessKeyId, AccessKeySecret, and Endpoint.
# You can obtain access information from evironment variables or replace sample values in the code, such as <your AccessKeyId> with actual values.
#
# For example, if your bucket is located in the China (Hangzhou) region, you can set Endpoint to one of the following values:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com


access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<yourAccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<yourAccessKeySecret>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<yourEndpoint>')


# Make sure that all parameters are correctly configured
for param in (access_key_id, access_key_secret, endpoint):
assert '<' not in param, 'Please set parameters:' + param

service = oss2.Service(oss2.Auth(access_key_id, access_key_secret), endpoint)

# Query Endpoint information corresponding to all supported regions
result = service.describe_regions()

for r in result.regions:
print('region: {0}'.format(r.region))
print('internet_endpoint: {0}'.format(r.internet_endpoint))
print('internal_endpoint: {0}'.format(r.internal_endpoint))
print('accelerate_endpoint: {0}'.format(r.accelerate_endpoint))


# Querying Endpoint Information Corresponding to a Specific Region
result = service.describe_regions('oss-cn-hangzhou')

for r in result.regions:
print('Specific region: {0}'.format(r.region))
print('Specific internet_endpoint: {0}'.format(r.internet_endpoint))
print('Specific internal_endpoint: {0}'.format(r.internal_endpoint))
print('Specific accelerate_endpoint: {0}'.format(r.accelerate_endpoint))
43 changes: 43 additions & 0 deletions examples/set_tls.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
import os
import ssl
from requests.adapters import HTTPAdapter
import oss2

# 以下代码展示通过自定义adapter来设置ssl

# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')


# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param

# 自定义ssl adapter,这里仅以设置ssl_version为例说明
class SSLAdapter(HTTPAdapter):
def init_poolmanager(self, *args, **kwargs):
kwargs["ssl_version"] = ssl.PROTOCOL_TLSv1_2
return super().init_poolmanager(*args, **kwargs)

# 创建session对象,通过session自定义adapter
session=oss2.Session(adapter=SSLAdapter())

# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name, session=session)

# 测试上传
bucket.put_object("example.txt", "hello")




5 changes: 3 additions & 2 deletions oss2/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1680,14 +1680,15 @@ def delete_bucket_cors(self):
logger.debug("Delete bucket CORS done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)

def put_bucket_lifecycle(self, input):
def put_bucket_lifecycle(self, input, headers=None):
"""设置生命周期管理的配置。

:param input: :class:`BucketLifecycle <oss2.models.BucketLifecycle>` 对象或其他
"""
headers = http.CaseInsensitiveDict(headers)
data = self.__convert_data(BucketLifecycle, xml_utils.to_put_bucket_lifecycle, input)
logger.debug("Start to put bucket lifecycle, bucket: {0}, lifecycle: {1}".format(self.bucket_name, data))
resp = self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''})
resp = self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''}, headers=headers)
logger.debug("Put bucket lifecycle done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)

Expand Down
2 changes: 1 addition & 1 deletion oss2/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class ProviderAuth(AuthBase):
'callback-var', 'worm', 'wormId', 'wormExtend', 'replication', 'replicationLocation',
'replicationProgress', 'transferAcceleration', 'cname', 'metaQuery',
'x-oss-ac-source-ip', 'x-oss-ac-subnet-mask', 'x-oss-ac-vpc-id', 'x-oss-ac-forward-allow',
'resourceGroup', 'style', 'styleName', 'x-oss-async-process']
'resourceGroup', 'style', 'styleName', 'x-oss-async-process', 'regionList']
)

def _sign_request(self, req, bucket_name, key):
Expand Down
1 change: 1 addition & 0 deletions oss2/headers.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
OSS_SERVER_SIDE_DATA_ENCRYPTION = "x-oss-server-side-data-encryption"

OSS_METADATA_DIRECTIVE = 'x-oss-metadata-directive'
OSS_ALLOW_ACTION_OVERLAP = "x-oss-allow-same-action-overlap"

class RequestHeader(dict):
def __init__(self, *arg, **kw):
Expand Down
10 changes: 7 additions & 3 deletions oss2/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,16 @@
class Session(object):
"""属于同一个Session的请求共享一组连接池,如有可能也会重用HTTP连接。"""

def __init__(self, pool_size=None):
def __init__(self, pool_size=None, adapter=None):
self.session = requests.Session()

psize = pool_size or defaults.connection_pool_size
self.session.mount('http://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))
self.session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))
if adapter is None:
self.session.mount('http://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))
self.session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=psize, pool_maxsize=psize))
else:
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)

def do_request(self, req, timeout):
try:
Expand Down
28 changes: 21 additions & 7 deletions oss2/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,10 +659,11 @@ def __init__(self, storage_class, data_redundancy_type=None):


class BucketStat(object):
def __init__(self, storage_size_in_bytes, object_count, multi_part_upload_count, live_channel_count,
last_modified_time, standard_storage, standard_object_count, infrequent_access_storage,
infrequent_access_real_storage, infrequent_access_object_count, archive_storage, archive_real_storage,
archive_object_count, cold_archive_storage, cold_archive_real_storage, cold_archive_object_count):
def __init__(self, storage_size_in_bytes, object_count, multi_part_upload_count, live_channel_count=None,
last_modified_time=None, standard_storage=None, standard_object_count=None, infrequent_access_storage=None,
infrequent_access_real_storage=None, infrequent_access_object_count=None, archive_storage=None, archive_real_storage=None,
archive_object_count=None, cold_archive_storage=None, cold_archive_real_storage=None, cold_archive_object_count=None,
multipart_part_count=None, delete_marker_count=None):
self.storage_size_in_bytes = storage_size_in_bytes
self.object_count = object_count
self.multi_part_upload_count = multi_part_upload_count
Expand Down Expand Up @@ -692,6 +693,10 @@ def __init__(self, storage_size_in_bytes, object_count, multi_part_upload_count,
self.cold_archive_real_storage = cold_archive_real_storage
#: 冷归档存储类型的object数量
self.cold_archive_object_count = cold_archive_object_count
#: 分片数量
self.multipart_part_count = multipart_part_count
#: 删除标记数量
self.delete_marker_count = delete_marker_count


class AccessControlList(object):
Expand Down Expand Up @@ -732,7 +737,7 @@ def __init__(self, name=None, owner=None, location=None, storage_class=None, int
class GetBucketStatResult(RequestResult, BucketStat):
def __init__(self, resp):
RequestResult.__init__(self, resp)
BucketStat.__init__(self, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
BucketStat.__init__(self, 0, 0, 0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)



Expand Down Expand Up @@ -1820,21 +1825,26 @@ class AsyncFetchTaskConfiguration(object):

:param ignore_same_key: 默认为True表示如果文件已存在则忽略本次任务,api调用将会报错。如果为False,则会覆盖已存在的object。
:type ignore_same_key: bool

:param callback_when_failed: 失败时是否回调。
:type callback_when_failed: bool
"""
def __init__(self,
url,
object_name,
host = None,
content_md5 = None,
callback = None,
ignore_same_key = None):
ignore_same_key = None,
callback_when_failed = None):

self.url = url
self.object_name = object_name
self.host = host
self.content_md5 = content_md5
self.callback = callback
self.ignore_same_key = ignore_same_key
self.callback_when_failed = callback_when_failed

class PutAsyncFetchTaskResult(RequestResult):
def __init__(self, resp, task_id=None):
Expand Down Expand Up @@ -2588,10 +2598,14 @@ class LifecycleFilter(object):
"""规则的条件参数容器。

:param list filter_not: 规则的匹配容器。 元素类型为:class:`FilterNot <oss2.models.FilterNot>`。
:param int object_size_greater_than: object大小大于.
:param int object_size_less_than: object大小小于.
"""

def __init__(self, filter_not=None):
def __init__(self, filter_not=None, object_size_greater_than=None, object_size_less_than=None):
self.filter_not = filter_not or []
self.object_size_greater_than = object_size_greater_than
self.object_size_less_than = object_size_less_than

class FilterNot(object):
"""规则的匹配容器。
Expand Down
97 changes: 63 additions & 34 deletions oss2/xml_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,19 +353,36 @@ def parse_get_bucket_stat(result, body):
result.storage_size_in_bytes = _find_int(root, 'Storage')
result.object_count = _find_int(root, 'ObjectCount')
result.multi_part_upload_count = int(_find_tag_with_default(root, 'MultipartUploadCount', 0))
result.live_channel_count = int(_find_tag_with_default(root, 'LiveChannelCount', 0))
result.last_modified_time = int(_find_tag_with_default(root, 'LastModifiedTime', 0))
result.standard_storage = int(_find_tag_with_default(root, 'StandardStorage', 0))
result.standard_object_count = int(_find_tag_with_default(root, 'StandardObjectCount', 0))
result.infrequent_access_storage = int(_find_tag_with_default(root, 'InfrequentAccessStorage', 0))
result.infrequent_access_real_storage = int(_find_tag_with_default(root, 'InfrequentAccessRealStorage', 0))
result.infrequent_access_object_count = int(_find_tag_with_default(root, 'InfrequentAccessObjectCount', 0))
result.archive_storage = int(_find_tag_with_default(root, 'ArchiveStorage', 0))
result.archive_real_storage = int(_find_tag_with_default(root, 'ArchiveRealStorage', 0))
result.archive_object_count = int(_find_tag_with_default(root, 'ArchiveObjectCount', 0))
result.cold_archive_storage = int(_find_tag_with_default(root, 'ColdArchiveStorage', 0))
result.cold_archive_real_storage = int(_find_tag_with_default(root, 'ColdArchiveRealStorage', 0))
result.cold_archive_object_count = int(_find_tag_with_default(root, 'ColdArchiveObjectCount', 0))
if root.find('LiveChannelCount') is not None:
result.live_channel_count = int(_find_tag(root, 'LiveChannelCount'))
if root.find('LastModifiedTime') is not None:
result.last_modified_time = int(_find_tag(root, 'LastModifiedTime'))
if root.find('StandardStorage') is not None:
result.standard_storage = int(_find_tag(root, 'StandardStorage'))
if root.find('StandardObjectCount') is not None:
result.standard_object_count = int(_find_tag(root, 'StandardObjectCount'))
if root.find('InfrequentAccessStorage') is not None:
result.infrequent_access_storage = int(_find_tag(root, 'InfrequentAccessStorage'))
if root.find('InfrequentAccessRealStorage') is not None:
result.infrequent_access_real_storage = int(_find_tag(root, 'InfrequentAccessRealStorage'))
if root.find('InfrequentAccessObjectCount') is not None:
result.infrequent_access_object_count = int(_find_tag(root, 'InfrequentAccessObjectCount'))
if root.find('ArchiveStorage') is not None:
result.archive_storage = int(_find_tag(root, 'ArchiveStorage'))
if root.find('ArchiveRealStorage') is not None:
result.archive_real_storage = int(_find_tag(root, 'ArchiveRealStorage'))
if root.find('ArchiveObjectCount') is not None:
result.archive_object_count = int(_find_tag(root, 'ArchiveObjectCount'))
if root.find('ColdArchiveStorage') is not None:
result.cold_archive_storage = int(_find_tag(root, 'ColdArchiveStorage'))
if root.find('ColdArchiveRealStorage') is not None:
result.cold_archive_real_storage = int(_find_tag(root, 'ColdArchiveRealStorage'))
if root.find('ColdArchiveObjectCount') is not None:
result.cold_archive_object_count = int(_find_tag(root, 'ColdArchiveObjectCount'))
if root.find('MultipartPartCount') is not None:
result.multipart_part_count = int(_find_tag(root, 'MultipartPartCount'))
if root.find('DeleteMarkerCount') is not None:
result.delete_marker_count = int(_find_tag(root, 'DeleteMarkerCount'))

return result

Expand Down Expand Up @@ -776,7 +793,7 @@ def parse_get_bucket_lifecycle(result, body):
tagging = parse_lifecycle_object_taggings(rule_node.findall('Tag'))
noncurrent_version_expiration = parse_lifecycle_version_expiration(rule_node.find('NoncurrentVersionExpiration'))
noncurrent_version_sotrage_transitions = parse_lifecycle_verison_storage_transitions(rule_node.findall('NoncurrentVersionTransition'))
lifecycle_filter = parse_lifecycle_filter_not(rule_node.findall('Filter/Not'))
lifecycle_filter = parse_lifecycle_filter(rule_node.find('Filter'))

rule = LifecycleRule(
_find_tag(rule_node, 'ID'),
Expand Down Expand Up @@ -1059,16 +1076,21 @@ def to_put_bucket_lifecycle(bucket_lifecycle):
if noncurrent_version_sotrage_transition.allow_small_file is not None:
_add_text_child(version_transition_node, 'AllowSmallFile', str(noncurrent_version_sotrage_transition.allow_small_file).lower())

if rule.filter and rule.filter.filter_not:
if rule.filter:
filter_node = ElementTree.SubElement(rule_node, "Filter")
for not_arg in rule.filter.filter_not:
not_node = ElementTree.SubElement(filter_node, 'Not')

_add_text_child(not_node, 'Prefix', not_arg.prefix)
if not_arg.tag:
tag_node = ElementTree.SubElement(not_node, 'Tag')
_add_text_child(tag_node, 'Key', not_arg.tag.key)
_add_text_child(tag_node, 'Value', not_arg.tag.value)
if rule.filter.object_size_greater_than:
_add_text_child(filter_node, 'ObjectSizeGreaterThan', str(rule.filter.object_size_greater_than))
if rule.filter.object_size_less_than:
_add_text_child(filter_node, 'ObjectSizeLessThan', str(rule.filter.object_size_less_than))
if rule.filter.filter_not:
for not_arg in rule.filter.filter_not:
not_node = ElementTree.SubElement(filter_node, 'Not')

_add_text_child(not_node, 'Prefix', not_arg.prefix)
if not_arg.tag:
tag_node = ElementTree.SubElement(not_node, 'Tag')
_add_text_child(tag_node, 'Key', not_arg.tag.key)
_add_text_child(tag_node, 'Value', not_arg.tag.value)

return _node_to_string(root)

Expand Down Expand Up @@ -1460,6 +1482,8 @@ def to_put_async_fetch_task(task_config):
_add_text_child(root, 'Callback', task_config.callback)
if task_config.ignore_same_key is not None:
_add_text_child(root, 'IgnoreSameKey', str(task_config.ignore_same_key).lower())
if task_config.callback_when_failed is not None:
_add_text_child(root, 'CallbackWhenFailed', str(task_config.callback_when_failed).lower())

return _node_to_string(root)

Expand Down Expand Up @@ -1975,17 +1999,22 @@ def parse_get_bucket_access_monitor_result(result, body):
access_monitor = AccessMonitorInfo(_find_tag(root, "Status"))
result.access_monitor = access_monitor

def parse_lifecycle_filter_not(filter_not_node):
if filter_not_node is not None:

lifecycle_filter = LifecycleFilter()
for not_node in filter_not_node:
prefix = _find_tag_with_default(not_node, 'Prefix', None)
key = _find_tag_with_default(not_node, 'Tag/Key', None)
value = _find_tag_with_default(not_node, 'Tag/Value', None)
tag = FilterNotTag(key, value)
filter_not = FilterNot(prefix, tag)
lifecycle_filter.filter_not.append(filter_not)
def parse_lifecycle_filter(filter_node):
lifecycle_filter = LifecycleFilter()
if filter_node is not None:
if filter_node.find('ObjectSizeGreaterThan') is not None:
lifecycle_filter.object_size_greater_than = int(_find_tag_with_default(filter_node, 'ObjectSizeGreaterThan', 0))
if filter_node.find('ObjectSizeLessThan') is not None:
lifecycle_filter.object_size_less_than = int(_find_tag_with_default(filter_node, 'ObjectSizeLessThan', 0))
not_nodes = filter_node.findall('Not')
if not_nodes is not None:
for not_node in not_nodes:
prefix = _find_tag_with_default(not_node, 'Prefix', None)
key = _find_tag_with_default(not_node, 'Tag/Key', None)
value = _find_tag_with_default(not_node, 'Tag/Value', None)
tag = FilterNotTag(key, value)
filter_not = FilterNot(prefix, tag)
lifecycle_filter.filter_not.append(filter_not)

return lifecycle_filter

Expand Down
Loading