Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
alist-org
GitHub Repository: alist-org/alist
Path: blob/main/internal/op/archive.go
1560 views
1
package op
2
3
import (
4
"context"
5
stderrors "errors"
6
"fmt"
7
"io"
8
stdpath "path"
9
"strings"
10
"time"
11
12
"github.com/alist-org/alist/v3/internal/archive/tool"
13
"github.com/alist-org/alist/v3/internal/stream"
14
15
"github.com/Xhofe/go-cache"
16
"github.com/alist-org/alist/v3/internal/driver"
17
"github.com/alist-org/alist/v3/internal/errs"
18
"github.com/alist-org/alist/v3/internal/model"
19
"github.com/alist-org/alist/v3/pkg/singleflight"
20
"github.com/alist-org/alist/v3/pkg/utils"
21
"github.com/pkg/errors"
22
log "github.com/sirupsen/logrus"
23
)
24
25
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
26
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
27
28
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
29
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
30
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
31
}
32
path = utils.FixAndCleanPath(path)
33
key := Key(storage, path)
34
if !args.Refresh {
35
if meta, ok := archiveMetaCache.Get(key); ok {
36
log.Debugf("use cache when get %s archive meta", path)
37
return meta, nil
38
}
39
}
40
fn := func() (*model.ArchiveMetaProvider, error) {
41
_, m, err := getArchiveMeta(ctx, storage, path, args)
42
if err != nil {
43
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
44
}
45
if m.Expiration != nil {
46
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
47
}
48
return m, nil
49
}
50
if storage.Config().OnlyLocal {
51
meta, err := fn()
52
return meta, err
53
}
54
meta, err, _ := archiveMetaG.Do(key, fn)
55
return meta, err
56
}
57
58
func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) {
59
l, obj, err := Link(ctx, storage, path, args)
60
if err != nil {
61
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
62
}
63
baseName, ext, found := strings.Cut(obj.GetName(), ".")
64
if !found {
65
if l.MFile != nil {
66
_ = l.MFile.Close()
67
}
68
if l.RangeReadCloser != nil {
69
_ = l.RangeReadCloser.Close()
70
}
71
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
72
}
73
partExt, t, err := tool.GetArchiveTool("." + ext)
74
if err != nil {
75
var e error
76
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
77
if e != nil {
78
if l.MFile != nil {
79
_ = l.MFile.Close()
80
}
81
if l.RangeReadCloser != nil {
82
_ = l.RangeReadCloser.Close()
83
}
84
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
85
}
86
}
87
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
88
if err != nil {
89
if l.MFile != nil {
90
_ = l.MFile.Close()
91
}
92
if l.RangeReadCloser != nil {
93
_ = l.RangeReadCloser.Close()
94
}
95
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
96
}
97
ret := []*stream.SeekableStream{ss}
98
if partExt == nil {
99
return obj, t, ret, nil
100
} else {
101
index := partExt.SecondPartIndex
102
dir := stdpath.Dir(path)
103
for {
104
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
105
var o model.Obj
106
l, o, err = Link(ctx, storage, p, args)
107
if err != nil {
108
break
109
}
110
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
111
if err != nil {
112
if l.MFile != nil {
113
_ = l.MFile.Close()
114
}
115
if l.RangeReadCloser != nil {
116
_ = l.RangeReadCloser.Close()
117
}
118
for _, s := range ret {
119
_ = s.Close()
120
}
121
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
122
}
123
ret = append(ret, ss)
124
index++
125
}
126
return obj, t, ret, nil
127
}
128
}
129
130
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
131
storageAr, ok := storage.(driver.ArchiveReader)
132
if ok {
133
obj, err := GetUnwrap(ctx, storage, path)
134
if err != nil {
135
return nil, nil, errors.WithMessage(err, "failed to get file")
136
}
137
if obj.IsDir() {
138
return nil, nil, errors.WithStack(errs.NotFile)
139
}
140
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
141
if !errors.Is(err, errs.NotImplement) {
142
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
143
if meta != nil && meta.GetTree() != nil {
144
archiveMetaProvider.Sort = &storage.GetStorage().Sort
145
}
146
if !storage.Config().NoCache {
147
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
148
archiveMetaProvider.Expiration = &Expiration
149
}
150
return obj, archiveMetaProvider, err
151
}
152
}
153
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
154
if err != nil {
155
return nil, nil, err
156
}
157
defer func() {
158
var e error
159
for _, s := range ss {
160
e = stderrors.Join(e, s.Close())
161
}
162
if e != nil {
163
log.Errorf("failed to close file streamer, %v", e)
164
}
165
}()
166
meta, err := t.GetMeta(ss, args.ArchiveArgs)
167
if err != nil {
168
return nil, nil, err
169
}
170
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}
171
if meta.GetTree() != nil {
172
archiveMetaProvider.Sort = &storage.GetStorage().Sort
173
}
174
if !storage.Config().NoCache {
175
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
176
archiveMetaProvider.Expiration = &Expiration
177
} else if ss[0].Link.MFile == nil {
178
// alias、crypt 驱动
179
archiveMetaProvider.Expiration = ss[0].Link.Expiration
180
}
181
return obj, archiveMetaProvider, err
182
}
183
184
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
185
var archiveListG singleflight.Group[[]model.Obj]
186
187
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
188
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
189
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
190
}
191
path = utils.FixAndCleanPath(path)
192
metaKey := Key(storage, path)
193
key := stdpath.Join(metaKey, args.InnerPath)
194
if !args.Refresh {
195
if files, ok := archiveListCache.Get(key); ok {
196
log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath)
197
return files, nil
198
}
199
// if meta, ok := archiveMetaCache.Get(metaKey); ok {
200
// log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath)
201
// return getChildrenFromArchiveMeta(meta, args.InnerPath)
202
// }
203
}
204
objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) {
205
obj, files, err := listArchive(ctx, storage, path, args)
206
if err != nil {
207
return nil, errors.Wrapf(err, "failed to list archive [%s]%s: %+v", path, args.InnerPath, err)
208
}
209
// set path
210
for _, f := range files {
211
if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && obj.GetPath() != "" {
212
s.SetPath(stdpath.Join(obj.GetPath(), args.InnerPath, f.GetName()))
213
}
214
}
215
// warp obj name
216
model.WrapObjsName(files)
217
// sort objs
218
if storage.Config().LocalSort {
219
model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
220
}
221
model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
222
if !storage.Config().NoCache {
223
if len(files) > 0 {
224
log.Debugf("set cache: %s => %+v", key, files)
225
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
226
} else {
227
log.Debugf("del cache: %s", key)
228
archiveListCache.Del(key)
229
}
230
}
231
return files, nil
232
})
233
return objs, err
234
}
235
236
func _listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
237
storageAr, ok := storage.(driver.ArchiveReader)
238
if ok {
239
obj, err := GetUnwrap(ctx, storage, path)
240
if err != nil {
241
return nil, nil, errors.WithMessage(err, "failed to get file")
242
}
243
if obj.IsDir() {
244
return nil, nil, errors.WithStack(errs.NotFile)
245
}
246
files, err := storageAr.ListArchive(ctx, obj, args.ArchiveInnerArgs)
247
if !errors.Is(err, errs.NotImplement) {
248
return obj, files, err
249
}
250
}
251
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
252
if err != nil {
253
return nil, nil, err
254
}
255
defer func() {
256
var e error
257
for _, s := range ss {
258
e = stderrors.Join(e, s.Close())
259
}
260
if e != nil {
261
log.Errorf("failed to close file streamer, %v", e)
262
}
263
}()
264
files, err := t.List(ss, args.ArchiveInnerArgs)
265
return obj, files, err
266
}
267
268
func listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
269
obj, files, err := _listArchive(ctx, storage, path, args)
270
if errors.Is(err, errs.NotSupport) {
271
var meta model.ArchiveMeta
272
meta, err = GetArchiveMeta(ctx, storage, path, model.ArchiveMetaArgs{
273
ArchiveArgs: args.ArchiveArgs,
274
Refresh: args.Refresh,
275
})
276
if err != nil {
277
return nil, nil, err
278
}
279
files, err = getChildrenFromArchiveMeta(meta, args.InnerPath)
280
if err != nil {
281
return nil, nil, err
282
}
283
}
284
if err == nil && obj == nil {
285
obj, err = GetUnwrap(ctx, storage, path)
286
}
287
if err != nil {
288
return nil, nil, err
289
}
290
return obj, files, err
291
}
292
293
func getChildrenFromArchiveMeta(meta model.ArchiveMeta, innerPath string) ([]model.Obj, error) {
294
obj := meta.GetTree()
295
if obj == nil {
296
return nil, errors.WithStack(errs.NotImplement)
297
}
298
dirs := splitPath(innerPath)
299
for _, dir := range dirs {
300
var next model.ObjTree
301
for _, c := range obj {
302
if c.GetName() == dir {
303
next = c
304
break
305
}
306
}
307
if next == nil {
308
return nil, errors.WithStack(errs.ObjectNotFound)
309
}
310
if !next.IsDir() || next.GetChildren() == nil {
311
return nil, errors.WithStack(errs.NotFolder)
312
}
313
obj = next.GetChildren()
314
}
315
return utils.SliceConvert(obj, func(src model.ObjTree) (model.Obj, error) {
316
return src, nil
317
})
318
}
319
320
func splitPath(path string) []string {
321
var parts []string
322
for {
323
dir, file := stdpath.Split(path)
324
if file == "" {
325
break
326
}
327
parts = append([]string{file}, parts...)
328
path = strings.TrimSuffix(dir, "/")
329
}
330
return parts
331
}
332
333
func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) {
334
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
335
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
336
}
337
path = utils.FixAndCleanPath(path)
338
af, err := GetUnwrap(ctx, storage, path)
339
if err != nil {
340
return nil, nil, errors.WithMessage(err, "failed to get file")
341
}
342
if af.IsDir() {
343
return nil, nil, errors.WithStack(errs.NotFile)
344
}
345
if g, ok := storage.(driver.ArchiveGetter); ok {
346
obj, err := g.ArchiveGet(ctx, af, args.ArchiveInnerArgs)
347
if err == nil {
348
return af, model.WrapObjName(obj), nil
349
}
350
}
351
352
if utils.PathEqual(args.InnerPath, "/") {
353
return af, &model.ObjWrapName{
354
Name: RootName,
355
Obj: &model.Object{
356
Name: af.GetName(),
357
Path: af.GetPath(),
358
ID: af.GetID(),
359
Size: af.GetSize(),
360
Modified: af.ModTime(),
361
IsFolder: true,
362
},
363
}, nil
364
}
365
366
innerDir, name := stdpath.Split(args.InnerPath)
367
args.InnerPath = strings.TrimSuffix(innerDir, "/")
368
files, err := ListArchive(ctx, storage, path, args)
369
if err != nil {
370
return nil, nil, errors.WithMessage(err, "failed get parent list")
371
}
372
for _, f := range files {
373
if f.GetName() == name {
374
return af, f, nil
375
}
376
}
377
return nil, nil, errors.WithStack(errs.ObjectNotFound)
378
}
379
380
type extractLink struct {
381
Link *model.Link
382
Obj model.Obj
383
}
384
385
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
386
var extractG singleflight.Group[*extractLink]
387
388
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
389
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
390
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
391
}
392
key := stdpath.Join(Key(storage, path), args.InnerPath)
393
if link, ok := extractCache.Get(key); ok {
394
return link.Link, link.Obj, nil
395
} else if link, ok := extractCache.Get(key + ":" + args.IP); ok {
396
return link.Link, link.Obj, nil
397
}
398
fn := func() (*extractLink, error) {
399
link, err := driverExtract(ctx, storage, path, args)
400
if err != nil {
401
return nil, errors.Wrapf(err, "failed extract archive")
402
}
403
if link.Link.Expiration != nil {
404
if link.Link.IPCacheKey {
405
key = key + ":" + args.IP
406
}
407
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
408
}
409
return link, nil
410
}
411
if storage.Config().OnlyLocal {
412
link, err := fn()
413
if err != nil {
414
return nil, nil, err
415
}
416
return link.Link, link.Obj, nil
417
}
418
link, err, _ := extractG.Do(key, fn)
419
if err != nil {
420
return nil, nil, err
421
}
422
return link.Link, link.Obj, err
423
}
424
425
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
426
storageAr, ok := storage.(driver.ArchiveReader)
427
if !ok {
428
return nil, errs.DriverExtractNotSupported
429
}
430
archiveFile, extracted, err := ArchiveGet(ctx, storage, path, model.ArchiveListArgs{
431
ArchiveInnerArgs: args,
432
Refresh: false,
433
})
434
if err != nil {
435
return nil, errors.WithMessage(err, "failed to get file")
436
}
437
if extracted.IsDir() {
438
return nil, errors.WithStack(errs.NotFile)
439
}
440
link, err := storageAr.Extract(ctx, archiveFile, args)
441
return &extractLink{Link: link, Obj: extracted}, err
442
}
443
444
type streamWithParent struct {
445
rc io.ReadCloser
446
parents []*stream.SeekableStream
447
}
448
449
func (s *streamWithParent) Read(p []byte) (int, error) {
450
return s.rc.Read(p)
451
}
452
453
func (s *streamWithParent) Close() error {
454
err := s.rc.Close()
455
for _, ss := range s.parents {
456
err = stderrors.Join(err, ss.Close())
457
}
458
return err
459
}
460
461
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
462
_, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
463
if err != nil {
464
return nil, 0, err
465
}
466
rc, size, err := t.Extract(ss, args)
467
if err != nil {
468
var e error
469
for _, s := range ss {
470
e = stderrors.Join(e, s.Close())
471
}
472
if e != nil {
473
log.Errorf("failed to close file streamer, %v", e)
474
err = stderrors.Join(err, e)
475
}
476
return nil, 0, err
477
}
478
return &streamWithParent{rc: rc, parents: ss}, size, nil
479
}
480
481
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
482
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
483
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
484
}
485
srcPath = utils.FixAndCleanPath(srcPath)
486
dstDirPath = utils.FixAndCleanPath(dstDirPath)
487
srcObj, err := GetUnwrap(ctx, storage, srcPath)
488
if err != nil {
489
return errors.WithMessage(err, "failed to get src object")
490
}
491
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
492
if err != nil {
493
return errors.WithMessage(err, "failed to get dst dir")
494
}
495
496
switch s := storage.(type) {
497
case driver.ArchiveDecompressResult:
498
var newObjs []model.Obj
499
newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
500
if err == nil {
501
if newObjs != nil && len(newObjs) > 0 {
502
for _, newObj := range newObjs {
503
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
504
}
505
} else if !utils.IsBool(lazyCache...) {
506
ClearCache(storage, dstDirPath)
507
}
508
}
509
case driver.ArchiveDecompress:
510
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
511
if err == nil && !utils.IsBool(lazyCache...) {
512
ClearCache(storage, dstDirPath)
513
}
514
default:
515
return errs.NotImplement
516
}
517
return errors.WithStack(err)
518
}
519
520