Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
alist-org
GitHub Repository: alist-org/alist
Path: blob/main/drivers/115/util.go
1987 views
1
package _115
2
3
import (
4
"bytes"
5
"context"
6
"crypto/md5"
7
"crypto/tls"
8
"encoding/hex"
9
"encoding/json"
10
"fmt"
11
"io"
12
"net/http"
13
"net/url"
14
"strconv"
15
"strings"
16
"sync"
17
"sync/atomic"
18
"time"
19
20
"github.com/alist-org/alist/v3/internal/conf"
21
"github.com/alist-org/alist/v3/internal/driver"
22
"github.com/alist-org/alist/v3/internal/model"
23
"github.com/alist-org/alist/v3/pkg/http_range"
24
"github.com/alist-org/alist/v3/pkg/utils"
25
"github.com/aliyun/aliyun-oss-go-sdk/oss"
26
27
cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115"
28
crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115"
29
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
30
"github.com/pkg/errors"
31
)
32
33
// var UserAgent = driver115.UA115Browser
34
func (d *Pan115) login() error {
35
var err error
36
opts := []driver115.Option{
37
driver115.UA(d.getUA()),
38
func(c *driver115.Pan115Client) {
39
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
40
},
41
}
42
d.client = driver115.New(opts...)
43
cr := &driver115.Credential{}
44
if d.QRCodeToken != "" {
45
s := &driver115.QRCodeSession{
46
UID: d.QRCodeToken,
47
}
48
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
49
return errors.Wrap(err, "failed to login by qrcode")
50
}
51
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID)
52
d.QRCodeToken = ""
53
} else if d.Cookie != "" {
54
if err = cr.FromCookie(d.Cookie); err != nil {
55
return errors.Wrap(err, "failed to login by cookies")
56
}
57
d.client.ImportCredential(cr)
58
} else {
59
return errors.New("missing cookie or qrcode account")
60
}
61
return d.client.LoginCheck()
62
}
63
64
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
65
res := make([]FileObj, 0)
66
if d.PageSize <= 0 {
67
d.PageSize = driver115.FileListLimit
68
}
69
files, err := d.client.ListWithLimit(fileId, d.PageSize, driver115.WithMultiUrls())
70
if err != nil {
71
return nil, err
72
}
73
for _, file := range *files {
74
res = append(res, FileObj{file})
75
}
76
return res, nil
77
}
78
79
func (d *Pan115) getNewFile(fileId string) (*FileObj, error) {
80
file, err := d.client.GetFile(fileId)
81
if err != nil {
82
return nil, err
83
}
84
return &FileObj{*file}, nil
85
}
86
87
func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) {
88
result := driver115.GetFileInfoResponse{}
89
req := d.client.NewRequest().
90
SetQueryParam("pick_code", pickCode).
91
ForceContentType("application/json;charset=UTF-8").
92
SetResult(&result)
93
resp, err := req.Get(driver115.ApiFileInfo)
94
if err := driver115.CheckErr(err, &result, resp); err != nil {
95
return nil, err
96
}
97
if len(result.Files) == 0 {
98
return nil, errors.New("not get file info")
99
}
100
fileInfo := result.Files[0]
101
102
f := &FileObj{}
103
f.From(fileInfo)
104
return f, nil
105
}
106
107
func (d *Pan115) getUA() string {
108
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
109
}
110
111
func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
112
key := crypto.GenerateKey()
113
result := driver115.DownloadResp{}
114
params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode})
115
if err != nil {
116
return nil, err
117
}
118
119
data := crypto.Encode(params, key)
120
121
bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode())
122
reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String())
123
req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader)
124
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
125
req.Header.Set("Cookie", d.Cookie)
126
req.Header.Set("User-Agent", ua)
127
128
resp, err := d.client.Client.GetClient().Do(req)
129
if err != nil {
130
return nil, err
131
}
132
defer resp.Body.Close()
133
134
body, err := io.ReadAll(resp.Body)
135
if err != nil {
136
return nil, err
137
}
138
if err := utils.Json.Unmarshal(body, &result); err != nil {
139
return nil, err
140
}
141
142
if err = result.Err(string(body)); err != nil {
143
return nil, err
144
}
145
146
b, err := crypto.Decode(string(result.EncodedData), key)
147
if err != nil {
148
return nil, err
149
}
150
151
downloadInfo := struct {
152
Url string `json:"url"`
153
}{}
154
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
155
return nil, err
156
}
157
158
info := &driver115.DownloadInfo{}
159
info.PickCode = pickCode
160
info.Header = resp.Request.Header
161
info.Url.Url = downloadInfo.Url
162
return info, nil
163
}
164
165
func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string {
166
userID := strconv.FormatInt(c.client.UserID, 10)
167
userIDMd5 := md5.Sum([]byte(userID))
168
tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer))
169
return hex.EncodeToString(tokenMd5[:])
170
}
171
172
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
173
var (
174
ecdhCipher *cipher.EcdhCipher
175
encrypted []byte
176
decrypted []byte
177
encodedToken string
178
err error
179
target = "U_1_" + dirID
180
bodyBytes []byte
181
result = driver115.UploadInitResp{}
182
fileSizeStr = strconv.FormatInt(fileSize, 10)
183
)
184
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
185
return nil, err
186
}
187
188
userID := strconv.FormatInt(d.client.UserID, 10)
189
form := url.Values{}
190
form.Set("appid", "0")
191
form.Set("appversion", appVer)
192
form.Set("userid", userID)
193
form.Set("filename", fileName)
194
form.Set("filesize", fileSizeStr)
195
form.Set("fileid", fileID)
196
form.Set("target", target)
197
form.Set("sig", d.client.GenerateSignature(fileID, target))
198
199
signKey, signVal := "", ""
200
for retry := true; retry; {
201
t := driver115.NowMilli()
202
203
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
204
return nil, err
205
}
206
207
params := map[string]string{
208
"k_ec": encodedToken,
209
}
210
211
form.Set("t", t.String())
212
form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
213
if signKey != "" && signVal != "" {
214
form.Set("sign_key", signKey)
215
form.Set("sign_val", signVal)
216
}
217
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
218
return nil, err
219
}
220
221
req := d.client.NewRequest().
222
SetQueryParams(params).
223
SetBody(encrypted).
224
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
225
SetDoNotParseResponse(true)
226
resp, err := req.Post(driver115.ApiUploadInit)
227
if err != nil {
228
return nil, err
229
}
230
data := resp.RawBody()
231
defer data.Close()
232
if bodyBytes, err = io.ReadAll(data); err != nil {
233
return nil, err
234
}
235
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
236
return nil, err
237
}
238
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
239
return nil, err
240
}
241
if result.Status == 7 {
242
// Update signKey & signVal
243
signKey = result.SignKey
244
signVal, err = UploadDigestRange(stream, result.SignCheck)
245
if err != nil {
246
return nil, err
247
}
248
} else {
249
retry = false
250
}
251
result.SHA1 = fileID
252
}
253
254
return &result, nil
255
}
256
257
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
258
var start, end int64
259
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
260
return
261
}
262
263
length := end - start + 1
264
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
265
if err != nil {
266
return "", err
267
}
268
hashStr, err := utils.HashReader(utils.SHA1, reader)
269
if err != nil {
270
return "", err
271
}
272
result = strings.ToUpper(hashStr)
273
return
274
}
275
276
// UploadByOSS use aliyun sdk to upload
277
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
278
ossToken, err := c.client.GetOSSToken()
279
if err != nil {
280
return nil, err
281
}
282
ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret)
283
if err != nil {
284
return nil, err
285
}
286
bucket, err := ossClient.Bucket(params.Bucket)
287
if err != nil {
288
return nil, err
289
}
290
291
var bodyBytes []byte
292
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
293
Reader: s,
294
UpdateProgress: up,
295
})
296
if err = bucket.PutObject(params.Object, r, append(
297
driver115.OssOption(params, ossToken),
298
oss.CallbackResult(&bodyBytes),
299
)...); err != nil {
300
return nil, err
301
}
302
303
var uploadResult UploadResult
304
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
305
return nil, err
306
}
307
return &uploadResult, uploadResult.Err(string(bodyBytes))
308
}
309
310
// UploadByMultipart upload by mutipart blocks
311
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
312
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
313
var (
314
chunks []oss.FileChunk
315
parts []oss.UploadPart
316
imur oss.InitiateMultipartUploadResult
317
ossClient *oss.Client
318
bucket *oss.Bucket
319
ossToken *driver115.UploadOSSTokenResp
320
bodyBytes []byte
321
err error
322
)
323
324
tmpF, err := s.CacheFullInTempFile()
325
if err != nil {
326
return nil, err
327
}
328
329
options := driver115.DefalutUploadMultipartOptions()
330
if len(opts) > 0 {
331
for _, f := range opts {
332
f(options)
333
}
334
}
335
// oss 启用Sequential必须按顺序上传
336
options.ThreadsNum = 1
337
338
if ossToken, err = d.client.GetOSSToken(); err != nil {
339
return nil, err
340
}
341
342
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil {
343
return nil, err
344
}
345
346
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
347
return nil, err
348
}
349
350
// ossToken一小时后就会失效,所以每50分钟重新获取一次
351
ticker := time.NewTicker(options.TokenRefreshTime)
352
defer ticker.Stop()
353
// 设置超时
354
timeout := time.NewTimer(options.Timeout)
355
356
if chunks, err = SplitFile(fileSize); err != nil {
357
return nil, err
358
}
359
360
if imur, err = bucket.InitiateMultipartUpload(params.Object,
361
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
362
oss.UserAgentHeader(driver115.OSSUserAgent),
363
oss.EnableSha1(), oss.Sequential(),
364
); err != nil {
365
return nil, err
366
}
367
368
wg := sync.WaitGroup{}
369
wg.Add(len(chunks))
370
371
chunksCh := make(chan oss.FileChunk)
372
errCh := make(chan error)
373
UploadedPartsCh := make(chan oss.UploadPart)
374
quit := make(chan struct{})
375
376
// producer
377
go chunksProducer(chunksCh, chunks)
378
go func() {
379
wg.Wait()
380
quit <- struct{}{}
381
}()
382
383
completedNum := atomic.Int32{}
384
// consumers
385
for i := 0; i < options.ThreadsNum; i++ {
386
go func(threadId int) {
387
defer func() {
388
if r := recover(); r != nil {
389
errCh <- fmt.Errorf("recovered in %v", r)
390
}
391
}()
392
for chunk := range chunksCh {
393
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
394
for retry := 0; retry < 3; retry++ {
395
select {
396
case <-ctx.Done():
397
break
398
case <-ticker.C:
399
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
400
errCh <- errors.Wrap(err, "刷新token时出现错误")
401
}
402
default:
403
}
404
buf := make([]byte, chunk.Size)
405
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
406
continue
407
}
408
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)),
409
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
410
break
411
}
412
}
413
if err != nil {
414
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
415
} else {
416
num := completedNum.Add(1)
417
up(float64(num) * 100.0 / float64(len(chunks)))
418
}
419
UploadedPartsCh <- part
420
}
421
}(i)
422
}
423
424
go func() {
425
for part := range UploadedPartsCh {
426
parts = append(parts, part)
427
wg.Done()
428
}
429
}()
430
LOOP:
431
for {
432
select {
433
case <-ticker.C:
434
// 到时重新获取ossToken
435
if ossToken, err = d.client.GetOSSToken(); err != nil {
436
return nil, err
437
}
438
case <-quit:
439
break LOOP
440
case <-errCh:
441
return nil, err
442
case <-timeout.C:
443
return nil, fmt.Errorf("time out")
444
}
445
}
446
447
// 不知道啥原因,oss那边分片上传不计算sha1,导致115服务器校验错误
448
// params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1)
449
if _, err := bucket.CompleteMultipartUpload(imur, parts, append(
450
driver115.OssOption(params, ossToken),
451
oss.CallbackResult(&bodyBytes),
452
)...); err != nil {
453
return nil, err
454
}
455
456
var uploadResult UploadResult
457
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
458
return nil, err
459
}
460
return &uploadResult, uploadResult.Err(string(bodyBytes))
461
}
462
463
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
464
for _, chunk := range chunks {
465
ch <- chunk
466
}
467
}
468
469
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
470
for i := int64(1); i < 10; i++ {
471
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
472
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
473
return
474
}
475
break
476
}
477
}
478
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
479
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
480
return
481
}
482
}
483
// 单个分片大小不能小于100KB
484
if chunks[0].Size < 100*utils.KB {
485
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
486
return
487
}
488
}
489
return
490
}
491
492
// SplitFileByPartNum splits big file into parts by the num of parts.
493
// Split the file with specified parts count, returns the split result when error is nil.
494
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
495
if chunkNum <= 0 || chunkNum > 10000 {
496
return nil, errors.New("chunkNum invalid")
497
}
498
499
if int64(chunkNum) > fileSize {
500
return nil, errors.New("oss: chunkNum invalid")
501
}
502
503
var chunks []oss.FileChunk
504
chunk := oss.FileChunk{}
505
chunkN := (int64)(chunkNum)
506
for i := int64(0); i < chunkN; i++ {
507
chunk.Number = int(i + 1)
508
chunk.Offset = i * (fileSize / chunkN)
509
if i == chunkN-1 {
510
chunk.Size = fileSize/chunkN + fileSize%chunkN
511
} else {
512
chunk.Size = fileSize / chunkN
513
}
514
chunks = append(chunks, chunk)
515
}
516
517
return chunks, nil
518
}
519
520
// SplitFileByPartSize splits big file into parts by the size of parts.
521
// Splits the file by the part size. Returns the FileChunk when error is nil.
522
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
523
if chunkSize <= 0 {
524
return nil, errors.New("chunkSize invalid")
525
}
526
527
chunkN := fileSize / chunkSize
528
if chunkN >= 10000 {
529
return nil, errors.New("Too many parts, please increase part size")
530
}
531
532
var chunks []oss.FileChunk
533
chunk := oss.FileChunk{}
534
for i := int64(0); i < chunkN; i++ {
535
chunk.Number = int(i + 1)
536
chunk.Offset = i * chunkSize
537
chunk.Size = chunkSize
538
chunks = append(chunks, chunk)
539
}
540
541
if fileSize%chunkSize > 0 {
542
chunk.Number = len(chunks) + 1
543
chunk.Offset = int64(len(chunks)) * chunkSize
544
chunk.Size = fileSize % chunkSize
545
chunks = append(chunks, chunk)
546
}
547
548
return chunks, nil
549
}
550
551