-
Notifications
You must be signed in to change notification settings - Fork 274
/
core.py
2104 lines (1877 loc) · 72.6 KB
/
core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
import asyncio
import errno
import logging
import mimetypes
import os
import socket
from typing import Tuple, Optional
import weakref
from urllib3.exceptions import IncompleteRead
from fsspec.spec import AbstractBufferedFile
from fsspec.utils import infer_storage_options, tokenize, setup_logging as setup_logger
from fsspec.asyn import AsyncFileSystem, sync, sync_wrapper, FSTimeoutError
from fsspec.callbacks import _DEFAULT_CALLBACK
import aiobotocore
import botocore
import aiobotocore.session
from aiobotocore.config import AioConfig
from botocore.exceptions import ClientError, HTTPClientError, ParamValidationError
from s3fs.errors import translate_boto_error
from s3fs.utils import S3BucketRegionCache, ParamKwargsHelper, _get_brange, FileExpired
# ClientPayloadError can be thrown during an incomplete read. aiohttp is a dependency of
# aiobotocore, we guard the import here in case this dependency is replaced in a future version
# of aiobotocore.
try:
from aiohttp import ClientPayloadError
except ImportError:
ClientPayloadError = None
logger = logging.getLogger("s3fs")
def setup_logging(level=None):
setup_logger(logger=logger, level=(level or os.environ["S3FS_LOGGING_LEVEL"]))
if "S3FS_LOGGING_LEVEL" in os.environ:
setup_logging()
MANAGED_COPY_THRESHOLD = 5 * 2**30
S3_RETRYABLE_ERRORS = (socket.timeout, HTTPClientError, IncompleteRead)
if ClientPayloadError is not None:
S3_RETRYABLE_ERRORS += (ClientPayloadError,)
_VALID_FILE_MODES = {"r", "w", "a", "rb", "wb", "ab"}
_PRESERVE_KWARGS = [
"CacheControl",
"ContentDisposition",
"ContentEncoding",
"ContentLanguage",
"ContentLength",
"ContentType",
"Expires",
"WebsiteRedirectLocation",
"ServerSideEncryption",
"SSECustomerAlgorithm",
"SSEKMSKeyId",
"BucketKeyEnabled",
"StorageClass",
"ObjectLockMode",
"ObjectLockRetainUntilDate",
"ObjectLockLegalHoldStatus",
"Metadata",
]
key_acls = {
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
}
buck_acls = {"private", "public-read", "public-read-write", "authenticated-read"}
def version_id_kw(version_id):
"""Helper to make versionId kwargs.
Not all boto3 methods accept a None / empty versionId so dictionary expansion solves
that problem.
"""
if version_id:
return {"VersionId": version_id}
else:
return {}
def _coalesce_version_id(*args):
"""Helper to coalesce a list of version_ids down to one"""
version_ids = set(args)
if None in version_ids:
version_ids.remove(None)
if len(version_ids) > 1:
raise ValueError(
"Cannot coalesce version_ids where more than one are defined,"
" {}".format(version_ids)
)
elif len(version_ids) == 0:
return None
else:
return version_ids.pop()
class S3FileSystem(AsyncFileSystem):
"""
Access S3 as if it were a file system.
This exposes a filesystem-like API (ls, cp, open, etc.) on top of S3
storage.
Provide credentials either explicitly (``key=``, ``secret=``) or depend
on boto's credential methods. See botocore documentation for more
information. If no credentials are available, use ``anon=True``.
Parameters
----------
anon : bool (False)
Whether to use anonymous connection (public buckets only). If False,
uses the key/secret given, or boto's credential resolver (client_kwargs,
environment, variables, config files, EC2 IAM server, in that order)
key : string (None)
If not anonymous, use this access key ID, if specified
secret : string (None)
If not anonymous, use this secret access key, if specified
token : string (None)
If not anonymous, use this security token, if specified
use_ssl : bool (True)
Whether to use SSL in connections to S3; may be faster without, but
insecure. If ``use_ssl`` is also set in ``client_kwargs``,
the value set in ``client_kwargs`` will take priority.
s3_additional_kwargs : dict of parameters that are used when calling s3 api
methods. Typically used for things like "ServerSideEncryption".
client_kwargs : dict of parameters for the botocore client
requester_pays : bool (False)
If RequesterPays buckets are supported.
default_block_size: int (None)
If given, the default block size value used for ``open()``, if no
specific value is given at all time. The built-in default is 5MB.
default_fill_cache : Bool (True)
Whether to use cache filling with open by default. Refer to
``S3File.open``.
default_cache_type : string ('bytes')
If given, the default cache_type value used for ``open()``. Set to "none"
if no caching is desired. See fsspec's documentation for other available
cache_type values. Default cache_type is 'bytes'.
version_aware : bool (False)
Whether to support bucket versioning. If enable this will require the
user to have the necessary IAM permissions for dealing with versioned
objects.
cache_regions : bool (False)
Whether to cache bucket regions or not. Whenever a new bucket is used,
it will first find out which region it belongs and then use the client
for that region.
asynchronous : bool (False)
Whether this instance is to be used from inside coroutines.
config_kwargs : dict of parameters passed to ``botocore.client.Config``
kwargs : other parameters for core session.
session : aiobotocore AioSession object to be used for all connections.
This session will be used inplace of creating a new session inside S3FileSystem.
For example: aiobotocore.session.AioSession(profile='test_user')
The following parameters are passed on to fsspec:
skip_instance_cache: to control reuse of instances
use_listings_cache, listings_expiry_time, max_paths: to control reuse of directory listings
Examples
--------
>>> s3 = S3FileSystem(anon=False) # doctest: +SKIP
>>> s3.ls('my-bucket/') # doctest: +SKIP
['my-file.txt']
>>> with s3.open('my-bucket/my-file.txt', mode='rb') as f: # doctest: +SKIP
... print(f.read()) # doctest: +SKIP
b'Hello, world!'
"""
root_marker = ""
connect_timeout = 5
retries = 5
read_timeout = 15
default_block_size = 5 * 2**20
protocol = ["s3", "s3a"]
_extra_tokenize_attributes = ("default_block_size",)
def __init__(
self,
anon=False,
key=None,
secret=None,
token=None,
use_ssl=True,
client_kwargs=None,
requester_pays=False,
default_block_size=None,
default_fill_cache=True,
default_cache_type="bytes",
version_aware=False,
config_kwargs=None,
s3_additional_kwargs=None,
session=None,
username=None,
password=None,
cache_regions=False,
asynchronous=False,
loop=None,
**kwargs
):
if key and username:
raise KeyError("Supply either key or username, not both")
if secret and password:
raise KeyError("Supply secret or password, not both")
if username:
key = username
if password:
secret = password
self.anon = anon
self.key = key
self.secret = secret
self.token = token
self.kwargs = kwargs
super_kwargs = {
k: kwargs.pop(k)
for k in ["use_listings_cache", "listings_expiry_time", "max_paths"]
if k in kwargs
} # passed to fsspec superclass
super().__init__(loop=loop, asynchronous=asynchronous, **super_kwargs)
self.default_block_size = default_block_size or self.default_block_size
self.default_fill_cache = default_fill_cache
self.default_cache_type = default_cache_type
self.version_aware = version_aware
self.client_kwargs = client_kwargs or {}
self.config_kwargs = config_kwargs or {}
self.req_kw = {"RequestPayer": "requester"} if requester_pays else {}
self.s3_additional_kwargs = s3_additional_kwargs or {}
self.use_ssl = use_ssl
self.cache_regions = cache_regions
self._s3 = None
self.session = session
@property
def s3(self):
if self._s3 is None:
if self.asynchronous:
raise RuntimeError("please await ``.set_session`` before anything else")
self.connect()
return self._s3
def _filter_kwargs(self, s3_method, kwargs):
return self._kwargs_helper.filter_dict(s3_method.__name__, kwargs)
async def get_s3(self, bucket=None):
if self.cache_regions and bucket is not None:
return await self._s3creator.get_bucket_client(bucket)
else:
return self._s3
async def _call_s3(self, method, *akwarglist, **kwargs):
await self.set_session()
s3 = await self.get_s3(kwargs.get("Bucket"))
method = getattr(s3, method)
kw2 = kwargs.copy()
kw2.pop("Body", None)
logger.debug("CALL: %s - %s - %s", method.__name__, akwarglist, kw2)
additional_kwargs = self._get_s3_method_kwargs(method, *akwarglist, **kwargs)
for i in range(self.retries):
try:
out = await method(**additional_kwargs)
return out
except S3_RETRYABLE_ERRORS as e:
logger.debug("Retryable error: %s", e)
err = e
await asyncio.sleep(min(1.7**i * 0.1, 15))
except Exception as e:
logger.debug("Nonretryable error: %s", e)
err = e
break
if "'coroutine'" in str(err):
# aiobotocore internal error - fetch original botocore error
tb = err.__traceback__
while tb.tb_next:
tb = tb.tb_next
try:
await tb.tb_frame.f_locals["response"]
except Exception as e:
err = e
err = translate_boto_error(err)
raise err
call_s3 = sync_wrapper(_call_s3)
def _get_s3_method_kwargs(self, method, *akwarglist, **kwargs):
additional_kwargs = self.s3_additional_kwargs.copy()
for akwargs in akwarglist:
additional_kwargs.update(akwargs)
# Add the normal kwargs in
additional_kwargs.update(kwargs)
# filter all kwargs
return self._filter_kwargs(method, additional_kwargs)
@staticmethod
def _get_kwargs_from_urls(urlpath):
"""
When we have a urlpath that contains a ?versionId=
Assume that we want to use version_aware mode for
the filesystem.
"""
url_storage_opts = infer_storage_options(urlpath)
url_query = url_storage_opts.get("url_query")
out = {}
if url_query is not None:
from urllib.parse import parse_qs
parsed = parse_qs(url_query)
if "versionId" in parsed:
out["version_aware"] = True
return out
def split_path(self, path) -> Tuple[str, str, Optional[str]]:
"""
Normalise S3 path string into bucket and key.
Parameters
----------
path : string
Input path, like `s3://mybucket/path/to/file`
Examples
--------
>>> split_path("s3://mybucket/path/to/file")
['mybucket', 'path/to/file', None]
>>> split_path("s3://mybucket/path/to/versioned_file?versionId=some_version_id")
['mybucket', 'path/to/versioned_file', 'some_version_id']
"""
path = self._strip_protocol(path)
path = path.lstrip("/")
if "/" not in path:
return path, "", None
else:
bucket, keypart = path.split("/", 1)
key, _, version_id = keypart.partition("?versionId=")
return (
bucket,
key,
version_id if self.version_aware and version_id else None,
)
def _prepare_config_kwargs(self):
config_kwargs = self.config_kwargs.copy()
if "connect_timeout" not in config_kwargs.keys():
config_kwargs["connect_timeout"] = self.connect_timeout
if "read_timeout" not in config_kwargs.keys():
config_kwargs["read_timeout"] = self.read_timeout
return config_kwargs
async def set_session(self, refresh=False, kwargs={}):
"""Establish S3 connection object.
Returns
-------
Session to be closed later with await .close()
"""
if self._s3 is not None and not refresh:
return self._s3
logger.debug("Setting up s3fs instance")
client_kwargs = self.client_kwargs.copy()
init_kwargs = dict(
aws_access_key_id=self.key,
aws_secret_access_key=self.secret,
aws_session_token=self.token,
)
init_kwargs = {
key: value
for key, value in init_kwargs.items()
if value is not None and value != client_kwargs.get(key)
}
if "use_ssl" not in client_kwargs.keys():
init_kwargs["use_ssl"] = self.use_ssl
config_kwargs = self._prepare_config_kwargs()
if self.anon:
from botocore import UNSIGNED
drop_keys = {
"aws_access_key_id",
"aws_secret_access_key",
"aws_session_token",
}
init_kwargs = {
key: value for key, value in init_kwargs.items() if key not in drop_keys
}
client_kwargs = {
key: value
for key, value in client_kwargs.items()
if key not in drop_keys
}
config_kwargs["signature_version"] = UNSIGNED
conf = AioConfig(**config_kwargs)
if self.session is None:
self.session = aiobotocore.session.AioSession(**self.kwargs)
for parameters in (config_kwargs, self.kwargs, init_kwargs, client_kwargs):
for option in ("region_name", "endpoint_url"):
if parameters.get(option):
self.cache_regions = False
break
else:
cache_regions = self.cache_regions
logger.debug(
"RC: caching enabled? %r (explicit option is %r)",
cache_regions,
self.cache_regions,
)
self.cache_regions = cache_regions
if self.cache_regions:
s3creator = S3BucketRegionCache(
self.session, config=conf, **init_kwargs, **client_kwargs
)
self._s3 = await s3creator.get_client()
else:
s3creator = self.session.create_client(
"s3", config=conf, **init_kwargs, **client_kwargs
)
self._s3 = await s3creator.__aenter__()
self._s3creator = s3creator
# the following actually closes the aiohttp connection; use of privates
# might break in the future, would cause exception at gc time
if not self.asynchronous:
weakref.finalize(self, self.close_session, self.loop, self._s3creator)
self._kwargs_helper = ParamKwargsHelper(self._s3)
return self._s3
_connect = set_session
connect = sync_wrapper(set_session)
@staticmethod
def close_session(loop, s3):
if loop is not None and loop.is_running():
try:
sync(loop, s3.__aexit__, None, None, None, timeout=0.1)
return
except FSTimeoutError:
pass
try:
# close the actual socket
s3._client._endpoint.http_session._connector._close()
except AttributeError:
# but during shutdown, it may have gone
pass
async def _get_delegated_s3pars(self, exp=3600):
"""Get temporary credentials from STS, appropriate for sending across a
network. Only relevant where the key/secret were explicitly provided.
Parameters
----------
exp : int
Time in seconds that credentials are good for
Returns
-------
dict of parameters
"""
if self.anon:
return {"anon": True}
if self.token: # already has temporary cred
return {
"key": self.key,
"secret": self.secret,
"token": self.token,
"anon": False,
}
if self.key is None or self.secret is None: # automatic credentials
return {"anon": False}
async with self.session.create_client("sts") as sts:
cred = sts.get_session_token(DurationSeconds=exp)["Credentials"]
return {
"key": cred["AccessKeyId"],
"secret": cred["SecretAccessKey"],
"token": cred["SessionToken"],
"anon": False,
}
get_delegated_s3pars = sync_wrapper(_get_delegated_s3pars)
def _open(
self,
path,
mode="rb",
block_size=None,
acl="",
version_id=None,
fill_cache=None,
cache_type=None,
autocommit=True,
requester_pays=None,
cache_options=None,
**kwargs
):
"""Open a file for reading or writing
Parameters
----------
path: string
Path of file on S3
mode: string
One of 'r', 'w', 'a', 'rb', 'wb', or 'ab'. These have the same meaning
as they do for the built-in `open` function.
block_size: int
Size of data-node blocks if reading
fill_cache: bool
If seeking to new a part of the file beyond the current buffer,
with this True, the buffer will be filled between the sections to
best support random access. When reading only a few specific chunks
out of a file, performance may be better if False.
acl: str
Canned ACL to set when writing
version_id : str
Explicit version of the object to open. This requires that the s3
filesystem is version aware and bucket versioning is enabled on the
relevant bucket.
encoding : str
The encoding to use if opening the file in text mode. The platform's
default text encoding is used if not given.
cache_type : str
See fsspec's documentation for available cache_type values. Set to "none"
if no caching is desired. If None, defaults to ``self.default_cache_type``.
requester_pays : bool (optional)
If RequesterPays buckets are supported. If None, defaults to the
value used when creating the S3FileSystem (which defaults to False.)
kwargs: dict-like
Additional parameters used for s3 methods. Typically used for
ServerSideEncryption.
"""
if block_size is None:
block_size = self.default_block_size
if fill_cache is None:
fill_cache = self.default_fill_cache
if requester_pays is None:
requester_pays = bool(self.req_kw)
acl = acl or self.s3_additional_kwargs.get("ACL", "")
kw = self.s3_additional_kwargs.copy()
kw.update(kwargs)
if not self.version_aware and version_id:
raise ValueError(
"version_id cannot be specified if the filesystem "
"is not version aware"
)
if cache_type is None:
cache_type = self.default_cache_type
return S3File(
self,
path,
mode,
block_size=block_size,
acl=acl,
version_id=version_id,
fill_cache=fill_cache,
s3_additional_kwargs=kw,
cache_type=cache_type,
autocommit=autocommit,
requester_pays=requester_pays,
cache_options=cache_options,
)
async def _lsdir(
self, path, refresh=False, max_items=None, delimiter="/", prefix=""
):
bucket, key, _ = self.split_path(path)
if not prefix:
prefix = ""
if key:
prefix = key.lstrip("/") + "/" + prefix
if path not in self.dircache or refresh or not delimiter:
try:
logger.debug("Get directory listing page for %s" % path)
await self.set_session()
s3 = await self.get_s3(bucket)
pag = s3.get_paginator("list_objects_v2")
config = {}
if max_items is not None:
config.update(MaxItems=max_items, PageSize=2 * max_items)
it = pag.paginate(
Bucket=bucket,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config,
**self.req_kw,
)
files = []
dircache = []
async for i in it:
dircache.extend(i.get("CommonPrefixes", []))
for c in i.get("Contents", []):
c["type"] = "file"
c["size"] = c["Size"]
files.append(c)
if dircache:
files.extend(
[
{
"Key": l["Prefix"][:-1],
"Size": 0,
"StorageClass": "DIRECTORY",
"type": "directory",
"size": 0,
}
for l in dircache
]
)
for f in files:
f["Key"] = "/".join([bucket, f["Key"]])
f["name"] = f["Key"]
except ClientError as e:
raise translate_boto_error(e)
if delimiter and files:
self.dircache[path] = files
return files
return self.dircache[path]
async def _glob(self, path, **kwargs):
if path.startswith("*"):
raise ValueError("Cannot traverse all of S3")
return await super()._glob(path, **kwargs)
async def _find(self, path, maxdepth=None, withdirs=None, detail=False, prefix=""):
"""List all files below path.
Like posix ``find`` command without conditions
Parameters
----------
path : str
maxdepth: int or None
If not None, the maximum number of levels to descend
withdirs: bool
Whether to include directory paths in the output. This is True
when used by glob, but users usually only want files.
prefix: str
Only return files that match ``^{path}/{prefix}`` (if there is an
exact match ``filename == {path}/{prefix}``, it also will be included)
"""
path = self._strip_protocol(path)
bucket, key, _ = self.split_path(path)
if not bucket:
raise ValueError("Cannot traverse all of S3")
if (withdirs or maxdepth) and prefix:
# TODO: perhaps propagate these to a glob(f"path/{prefix}*") call
raise ValueError(
"Can not specify 'prefix' option alongside 'withdirs'/'maxdepth' options."
)
if maxdepth:
return await super()._find(
bucket + "/" + key, maxdepth=maxdepth, withdirs=withdirs, detail=detail
)
# TODO: implement find from dircache, if all listings are present
# if refresh is False:
# out = incomplete_tree_dirs(self.dircache, path)
# if len(out) == 1:
# await self._find(out[0])
# return super().find(path)
# elif len(out) == 0:
# return super().find(path)
# # else: we refresh anyway, having at least two missing trees
out = await self._lsdir(path, delimiter="", prefix=prefix)
if not out and key:
try:
out = [await self._info(path)]
except FileNotFoundError:
out = []
dirs = []
sdirs = set()
thisdircache = {}
for o in out:
par = self._parent(o["name"])
if par not in self.dircache:
if par not in sdirs:
sdirs.add(par)
d = False
if len(path) <= len(par):
d = {
"Key": self.split_path(par)[1],
"Size": 0,
"name": par,
"StorageClass": "DIRECTORY",
"type": "directory",
"size": 0,
}
dirs.append(d)
thisdircache[par] = []
ppar = self._parent(par)
if ppar in thisdircache:
if d and d not in thisdircache[ppar]:
thisdircache[ppar].append(d)
if par in sdirs:
thisdircache[par].append(o)
if not prefix:
for k, v in thisdircache.items():
if k not in self.dircache and len(k) >= len(path):
self.dircache[k] = v
if withdirs:
out = sorted(out + dirs, key=lambda x: x["name"])
if detail:
return {o["name"]: o for o in out}
return [o["name"] for o in out]
find = sync_wrapper(_find)
async def _mkdir(self, path, acl="", create_parents=True, **kwargs):
path = self._strip_protocol(path).rstrip("/")
if not path:
raise ValueError
bucket, key, _ = self.split_path(path)
if await self._exists(bucket):
if not key:
# requested to create bucket, but bucket already exist
raise FileExistsError
# else: # do nothing as bucket is already created.
elif not key or create_parents:
if acl and acl not in buck_acls:
raise ValueError("ACL not in %s", buck_acls)
try:
params = {"Bucket": bucket, "ACL": acl}
region_name = kwargs.get("region_name", None) or self.client_kwargs.get(
"region_name", None
)
if region_name:
params["CreateBucketConfiguration"] = {
"LocationConstraint": region_name
}
await self._call_s3("create_bucket", **params)
self.invalidate_cache("")
self.invalidate_cache(bucket)
except ClientError as e:
raise translate_boto_error(e)
except ParamValidationError as e:
raise ValueError("Bucket create failed %r: %s" % (bucket, e))
else:
# raises if bucket doesn't exist and doesn't get create flag.
await self._ls(bucket)
mkdir = sync_wrapper(_mkdir)
async def _makedirs(self, path, exist_ok=False):
try:
await self._mkdir(path, create_parents=True)
except FileExistsError:
if exist_ok:
pass
else:
raise
makedirs = sync_wrapper(_makedirs)
async def _rmdir(self, path):
try:
await self._call_s3("delete_bucket", Bucket=path)
except botocore.exceptions.ClientError as e:
if "NoSuchBucket" in str(e):
raise FileNotFoundError(path) from e
if "BucketNotEmpty" in str(e):
raise OSError from e
raise
self.invalidate_cache(path)
self.invalidate_cache("")
rmdir = sync_wrapper(_rmdir)
async def _lsbuckets(self, refresh=False):
if "" not in self.dircache or refresh:
if self.anon:
# cannot list buckets if not logged in
return []
try:
files = (await self._call_s3("list_buckets"))["Buckets"]
except ClientError:
# listbucket permission missing
return []
for f in files:
f["Key"] = f["Name"]
f["Size"] = 0
f["StorageClass"] = "BUCKET"
f["size"] = 0
f["type"] = "directory"
f["name"] = f["Name"]
del f["Name"]
self.dircache[""] = files
return files
return self.dircache[""]
async def _ls(self, path, detail=False, refresh=False):
"""List files in given bucket, or list of buckets.
Listing is cached unless `refresh=True`.
Note: only your buckets associated with the login will be listed by
`ls('')`, not any public buckets (even if already accessed).
Parameters
----------
path : string/bytes
location at which to list files
refresh : bool (=False)
if False, look in local cache for file details first
"""
path = self._strip_protocol(path).rstrip("/")
if path in ["", "/"]:
files = await self._lsbuckets(refresh)
else:
files = await self._lsdir(path, refresh)
if not files and "/" in path:
files = await self._lsdir(self._parent(path), refresh=refresh)
files = [
o
for o in files
if o["name"].rstrip("/") == path and o["type"] != "directory"
]
if detail:
return files
return files if detail else sorted([o["name"] for o in files])
async def _exists(self, path):
if path in ["", "/"]:
# the root always exists, even if anon
return True
path = self._strip_protocol(path)
bucket, key, version_id = self.split_path(path)
if key:
try:
if self._ls_from_cache(path):
return True
except FileNotFoundError:
return False
try:
await self._info(path, bucket, key, version_id=version_id)
return True
except FileNotFoundError:
return False
elif self.dircache.get(bucket, False):
return True
else:
try:
if self._ls_from_cache(bucket):
return True
except FileNotFoundError:
# might still be a bucket we can access but don't own
pass
try:
await self._call_s3(
"list_objects_v2", MaxKeys=1, Bucket=bucket, **self.req_kw
)
return True
except Exception:
pass
try:
await self._call_s3("get_bucket_location", Bucket=bucket, **self.req_kw)
return True
except Exception:
return False
exists = sync_wrapper(_exists)
async def _touch(self, path, truncate=True, data=None, **kwargs):
"""Create empty file or truncate"""
bucket, key, version_id = self.split_path(path)
if version_id:
raise ValueError("S3 does not support touching existing versions of files")
if not truncate and await self._exists(path):
raise ValueError("S3 does not support touching existent files")
try:
write_result = await self._call_s3(
"put_object", Bucket=bucket, Key=key, **kwargs
)
except ClientError as ex:
raise translate_boto_error(ex)
self.invalidate_cache(self._parent(path))
return write_result
touch = sync_wrapper(_touch)
async def _cat_file(self, path, version_id=None, start=None, end=None):
bucket, key, vers = self.split_path(path)
if start is not None or end is not None:
head = {"Range": await self._process_limits(path, start, end)}
else:
head = {}
resp = await self._call_s3(
"get_object",
Bucket=bucket,
Key=key,
**version_id_kw(version_id or vers),
**head,
**self.req_kw,
)
data = await resp["Body"].read()
resp["Body"].close()
return data
async def _pipe_file(self, path, data, chunksize=50 * 2**20, **kwargs):
bucket, key, _ = self.split_path(path)
size = len(data)
# 5 GB is the limit for an S3 PUT
if size < min(5 * 2**30, 2 * chunksize):
return await self._call_s3(
"put_object", Bucket=bucket, Key=key, Body=data, **kwargs
)
else:
mpu = await self._call_s3(
"create_multipart_upload", Bucket=bucket, Key=key, **kwargs
)
out = [
await self._call_s3(
"upload_part",
Bucket=bucket,
PartNumber=i + 1,
UploadId=mpu["UploadId"],
Body=data[off : off + chunksize],
Key=key,
)
for i, off in enumerate(range(0, len(data), chunksize))
]
parts = [
{"PartNumber": i + 1, "ETag": o["ETag"]} for i, o in enumerate(out)
]
await self._call_s3(
"complete_multipart_upload",
Bucket=bucket,
Key=key,
UploadId=mpu["UploadId"],
MultipartUpload={"Parts": parts},
)
self.invalidate_cache(path)
async def _put_file(
self, lpath, rpath, callback=_DEFAULT_CALLBACK, chunksize=50 * 2**20, **kwargs
):
bucket, key, _ = self.split_path(rpath)
if os.path.isdir(lpath):
if key:
# don't make remote "directory"
return
else:
await self._mkdir(lpath)
size = os.path.getsize(lpath)
callback.set_size(size)
if "ContentType" not in kwargs:
content_type, _ = mimetypes.guess_type(lpath)
if content_type is not None:
kwargs["ContentType"] = content_type
with open(lpath, "rb") as f0:
if size < min(5 * 2**30, 2 * chunksize):
await self._call_s3(
"put_object", Bucket=bucket, Key=key, Body=f0, **kwargs
)
callback.relative_update(size)
else:
mpu = await self._call_s3(
"create_multipart_upload", Bucket=bucket, Key=key, **kwargs
)
out = []
while True:
chunk = f0.read(chunksize)
if not chunk:
break
out.append(
await self._call_s3(
"upload_part",
Bucket=bucket,
PartNumber=len(out) + 1,
UploadId=mpu["UploadId"],
Body=chunk,
Key=key,