Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
alist-org
GitHub Repository: alist-org/alist
Path: blob/main/internal/driver/driver.go
1560 views
1
package driver
2
3
import (
4
"context"
5
6
"github.com/alist-org/alist/v3/internal/model"
7
)
8
9
type Driver interface {
10
Meta
11
Reader
12
//Writer
13
//Other
14
}
15
16
type Meta interface {
17
Config() Config
18
// GetStorage just get raw storage, no need to implement, because model.Storage have implemented
19
GetStorage() *model.Storage
20
SetStorage(model.Storage)
21
// GetAddition Additional is used for unmarshal of JSON, so need return pointer
22
GetAddition() Additional
23
// Init If already initialized, drop first
24
Init(ctx context.Context) error
25
Drop(ctx context.Context) error
26
}
27
28
type Other interface {
29
Other(ctx context.Context, args model.OtherArgs) (interface{}, error)
30
}
31
32
type Reader interface {
33
// List files in the path
34
// if identify files by path, need to set ID with path,like path.Join(dir.GetID(), obj.GetName())
35
// if identify files by id, need to set ID with corresponding id
36
List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error)
37
// Link get url/filepath/reader of file
38
Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error)
39
}
40
41
type GetRooter interface {
42
GetRoot(ctx context.Context) (model.Obj, error)
43
}
44
45
type Getter interface {
46
// Get file by path, the path haven't been joined with root path
47
Get(ctx context.Context, path string) (model.Obj, error)
48
}
49
50
//type Writer interface {
51
// Mkdir
52
// Move
53
// Rename
54
// Copy
55
// Remove
56
// Put
57
//}
58
59
type Mkdir interface {
60
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error
61
}
62
63
type Move interface {
64
Move(ctx context.Context, srcObj, dstDir model.Obj) error
65
}
66
67
type Rename interface {
68
Rename(ctx context.Context, srcObj model.Obj, newName string) error
69
}
70
71
type Copy interface {
72
Copy(ctx context.Context, srcObj, dstDir model.Obj) error
73
}
74
75
type Remove interface {
76
Remove(ctx context.Context, obj model.Obj) error
77
}
78
79
type Put interface {
80
// Put a file (provided as a FileStreamer) into the driver
81
// Besides the most basic upload functionality, the following features also need to be implemented:
82
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
83
// (1) Use request methods that carry context, such as the following:
84
// a. http.NewRequestWithContext
85
// b. resty.Request.SetContext
86
// c. s3manager.Uploader.UploadWithContext
87
// d. utils.CopyWithCtx
88
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
89
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
90
// this is typically applicable to chunked uploads.
91
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
92
// (1) Use `utils.CopyWithCtx`
93
// (2) Use `driver.ReaderUpdatingProgress`
94
// (3) Use `driver.Progress` with `io.TeeReader`
95
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
96
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
97
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
98
// if your file chunks are sufficiently small (less than about 50KB).
99
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
100
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
101
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
102
// memory usage caused by buffering too many file chunks awaiting upload.
103
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
104
}
105
106
type PutURL interface {
107
// PutURL directly put a URL into the storage
108
// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
109
// Called when using SimpleHttp for offline downloading, skipping creating a download task
110
PutURL(ctx context.Context, dstDir model.Obj, name, url string) error
111
}
112
113
//type WriteResult interface {
114
// MkdirResult
115
// MoveResult
116
// RenameResult
117
// CopyResult
118
// PutResult
119
// Remove
120
//}
121
122
type MkdirResult interface {
123
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error)
124
}
125
126
type MoveResult interface {
127
Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
128
}
129
130
type RenameResult interface {
131
Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error)
132
}
133
134
type CopyResult interface {
135
Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
136
}
137
138
type PutResult interface {
139
// Put a file (provided as a FileStreamer) into the driver and return the put obj
140
// Besides the most basic upload functionality, the following features also need to be implemented:
141
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
142
// (1) Use request methods that carry context, such as the following:
143
// a. http.NewRequestWithContext
144
// b. resty.Request.SetContext
145
// c. s3manager.Uploader.UploadWithContext
146
// d. utils.CopyWithCtx
147
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
148
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
149
// this is typically applicable to chunked uploads.
150
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
151
// (1) Use `utils.CopyWithCtx`
152
// (2) Use `driver.ReaderUpdatingProgress`
153
// (3) Use `driver.Progress` with `io.TeeReader`
154
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
155
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
156
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
157
// if your file chunks are sufficiently small (less than about 50KB).
158
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
159
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
160
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
161
// memory usage caused by buffering too many file chunks awaiting upload.
162
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
163
}
164
165
type PutURLResult interface {
166
// PutURL directly put a URL into the storage
167
// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
168
// Called when using SimpleHttp for offline downloading, skipping creating a download task
169
PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error)
170
}
171
172
type ArchiveReader interface {
173
// GetArchiveMeta get the meta-info of an archive
174
// return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty
175
// return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases:
176
// 1. the driver do not support the format of the archive but there may be an internal tool do
177
// 2. handling archives is a VIP feature, but the driver does not have VIP access
178
GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error)
179
// ListArchive list the children of model.ArchiveArgs.InnerPath in the archive
180
// return errs.NotImplement to use internal archive tools to list the children
181
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
182
ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error)
183
// Extract get url/filepath/reader of a file in the archive
184
// return errs.NotImplement to use internal archive tools to extract
185
Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error)
186
}
187
188
type ArchiveGetter interface {
189
// ArchiveGet get file by inner path
190
// return errs.NotImplement to use internal archive tools to get the children
191
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
192
ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error)
193
}
194
195
type ArchiveDecompress interface {
196
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error
197
}
198
199
type ArchiveDecompressResult interface {
200
// ArchiveDecompress decompress an archive
201
// when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension
202
// return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false
203
// return only the newly created folder when args.PutIntoNewDir is true
204
// return errs.NotImplement to use internal archive tools to decompress
205
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
206
}
207
208
type Reference interface {
209
InitReference(storage Driver) error
210
}
211
212