package net
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/aws/aws-sdk-go/aws/awsutil"
log "github.com/sirupsen/logrus"
)
const DefaultDownloadPartSize = utils.MB * 10
const DefaultDownloadConcurrency = 2
const DefaultPartBodyMaxRetries = 3
var DefaultConcurrencyLimit *ConcurrencyLimit
type Downloader struct {
PartSize int
PartBodyMaxRetries int
Concurrency int
HttpClient HttpRequestFunc
*ConcurrencyLimit
}
type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error)
func NewDownloader(options ...func(*Downloader)) *Downloader {
d := &Downloader{
PartBodyMaxRetries: DefaultPartBodyMaxRetries,
ConcurrencyLimit: DefaultConcurrencyLimit,
}
for _, option := range options {
option(d)
}
return d
}
func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readCloser io.ReadCloser, err error) {
var finalP HttpRequestParams
awsutil.Copy(&finalP, p)
if finalP.Range.Length == -1 {
finalP.Range.Length = finalP.Size - finalP.Range.Start
}
impl := downloader{params: &finalP, cfg: d, ctx: ctx}
if impl.cfg.Concurrency == 0 {
impl.cfg.Concurrency = DefaultDownloadConcurrency
}
if impl.cfg.PartSize == 0 {
impl.cfg.PartSize = DefaultDownloadPartSize
}
if impl.cfg.HttpClient == nil {
impl.cfg.HttpClient = DefaultHttpRequestFunc
}
return impl.download()
}
type downloader struct {
ctx context.Context
cancel context.CancelCauseFunc
cfg Downloader
params *HttpRequestParams
chunkChannel chan chunk
m sync.Mutex
nextChunk int
bufs []*Buf
written int64
err error
concurrency int
maxPart int
pos int64
maxPos int64
m2 sync.Mutex
readingID int
}
type ConcurrencyLimit struct {
_m sync.Mutex
Limit int
}
var ErrExceedMaxConcurrency = fmt.Errorf("ExceedMaxConcurrency")
func (l *ConcurrencyLimit) sub() error {
l._m.Lock()
defer l._m.Unlock()
if l.Limit-1 < 0 {
return ErrExceedMaxConcurrency
}
l.Limit--
return nil
}
func (l *ConcurrencyLimit) add() {
l._m.Lock()
defer l._m.Unlock()
l.Limit++
}
func (d *downloader) concurrencyCheck() error {
if d.cfg.ConcurrencyLimit != nil {
return d.cfg.ConcurrencyLimit.sub()
}
return nil
}
func (d *downloader) concurrencyFinish() {
if d.cfg.ConcurrencyLimit != nil {
d.cfg.ConcurrencyLimit.add()
}
}
func (d *downloader) download() (io.ReadCloser, error) {
if err := d.concurrencyCheck(); err != nil {
return nil, err
}
d.ctx, d.cancel = context.WithCancelCause(d.ctx)
maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize))
if d.params.Range.Length%int64(d.cfg.PartSize) > 0 {
maxPart++
}
if maxPart < d.cfg.Concurrency {
d.cfg.Concurrency = maxPart
}
log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency)
if d.cfg.Concurrency == 1 {
if d.cfg.ConcurrencyLimit != nil {
go func() {
<-d.ctx.Done()
d.concurrencyFinish()
}()
}
resp, err := d.cfg.HttpClient(d.ctx, d.params)
if err != nil {
return nil, err
}
return resp.Body, nil
}
d.chunkChannel = make(chan chunk, d.cfg.Concurrency)
d.maxPart = maxPart
d.pos = d.params.Range.Start
d.maxPos = d.params.Range.Start + d.params.Range.Length
d.concurrency = d.cfg.Concurrency
d.sendChunkTask(true)
var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf)
return rc, d.err
}
func (d *downloader) sendChunkTask(newConcurrency bool) error {
d.m.Lock()
defer d.m.Unlock()
isNewBuf := d.concurrency > 0
if newConcurrency {
if d.concurrency <= 0 {
return nil
}
if d.nextChunk > 0 {
if err := d.concurrencyCheck(); err != nil {
return err
}
}
d.concurrency--
go d.downloadPart()
}
var buf *Buf
if isNewBuf {
buf = NewBuf(d.ctx, d.cfg.PartSize)
d.bufs = append(d.bufs, buf)
} else {
buf = d.getBuf(d.nextChunk)
}
if d.pos < d.maxPos {
finalSize := int64(d.cfg.PartSize)
switch d.nextChunk {
case 0:
firstSize := d.params.Range.Length % finalSize
if firstSize > 0 {
minSize := finalSize / 2
if firstSize < minSize {
finalSize = minSize
} else {
finalSize = firstSize
}
}
case 1:
firstSize := d.params.Range.Length % finalSize
minSize := finalSize / 2
if firstSize > 0 && firstSize < minSize {
finalSize += firstSize - minSize
}
}
buf.Reset(int(finalSize))
ch := chunk{
start: d.pos,
size: finalSize,
id: d.nextChunk,
buf: buf,
newConcurrency: newConcurrency,
}
d.pos += finalSize
d.nextChunk++
d.chunkChannel <- ch
return nil
}
return nil
}
func (d *downloader) interrupt() error {
if d.written != d.params.Range.Length {
log.Debugf("Downloader interrupt before finish")
if d.getErr() == nil {
d.setErr(fmt.Errorf("interrupted"))
}
}
d.cancel(d.err)
defer func() {
close(d.chunkChannel)
for _, buf := range d.bufs {
buf.Close()
}
if d.concurrency > 0 {
d.concurrency = -d.concurrency
}
log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency)
}()
return d.err
}
func (d *downloader) getBuf(id int) (b *Buf) {
return d.bufs[id%len(d.bufs)]
}
func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) {
id++
if id >= d.maxPart {
return true, nil
}
d.sendChunkTask(false)
d.readingID = id
return false, d.getBuf(id)
}
func (d *downloader) downloadPart() {
for {
c, ok := <-d.chunkChannel
if !ok {
break
}
if d.getErr() != nil {
break
}
if err := d.downloadChunk(&c); err != nil {
if err == errCancelConcurrency {
break
}
if err == context.Canceled {
if e := context.Cause(d.ctx); e != nil {
err = e
}
}
d.setErr(err)
d.cancel(err)
}
}
d.concurrencyFinish()
}
func (d *downloader) downloadChunk(ch *chunk) error {
log.Debugf("start chunk_%d, %+v", ch.id, ch)
params := d.getParamsFromChunk(ch)
var n int64
var err error
for retry := 0; retry <= d.cfg.PartBodyMaxRetries; retry++ {
if d.getErr() != nil {
return nil
}
n, err = d.tryDownloadChunk(params, ch)
if err == nil {
d.incrWritten(n)
log.Debugf("chunk_%d downloaded", ch.id)
break
}
if d.getErr() != nil {
return nil
}
if utils.IsCanceled(d.ctx) {
return d.ctx.Err()
}
if e, ok := err.(*errNeedRetry); ok {
err = e.Unwrap()
if n > 0 {
d.incrWritten(n)
ch.start += n
ch.size -= n
params.Range.Start = ch.start
params.Range.Length = ch.size
}
log.Warnf("err chunk_%d, object part download error %s, retrying attempt %d. %v",
ch.id, params.URL, retry, err)
} else if err == errInfiniteRetry {
retry--
continue
} else {
break
}
}
return err
}
var errCancelConcurrency = fmt.Errorf("cancel concurrency")
var errInfiniteRetry = fmt.Errorf("infinite retry")
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil {
if resp == nil {
return 0, err
}
if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
return 0, err
}
if ch.id == 0 {
switch resp.StatusCode {
default:
return 0, err
case http.StatusTooManyRequests:
case http.StatusBadGateway:
case http.StatusServiceUnavailable:
case http.StatusGatewayTimeout:
}
<-time.After(time.Millisecond * 200)
return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)}
}
log.Debugf("err chunk_%d, try downloading:%v", ch.id, err)
d.m.Lock()
isCancelConcurrency := ch.newConcurrency
if d.concurrency > 0 {
d.concurrency = -d.concurrency
isCancelConcurrency = true
}
if isCancelConcurrency {
d.concurrency--
d.chunkChannel <- *ch
d.m.Unlock()
return 0, errCancelConcurrency
}
d.m.Unlock()
if ch.id != d.readingID {
d.m2.Lock()
defer d.m2.Unlock()
<-time.After(time.Millisecond * 200)
}
return 0, errInfiniteRetry
}
defer resp.Body.Close()
if ch.id == 0 {
err = d.checkTotalBytes(resp)
if err != nil {
return 0, err
}
}
d.sendChunkTask(true)
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
if err != nil {
return n, &errNeedRetry{err: err}
}
if n != ch.size {
err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n)
return n, &errNeedRetry{err: err}
}
return n, nil
}
func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams {
var params HttpRequestParams
awsutil.Copy(¶ms, d.params)
params.Range = http_range.Range{Start: ch.start, Length: ch.size}
return ¶ms
}
func (d *downloader) checkTotalBytes(resp *http.Response) error {
var err error
totalBytes := int64(-1)
contentRange := resp.Header.Get("Content-Range")
if len(contentRange) == 0 {
if resp.ContentLength > 0 {
totalBytes = resp.ContentLength
}
} else {
parts := strings.Split(contentRange, "/")
total := int64(-1)
totalStr := parts[len(parts)-1]
if totalStr != "*" {
total, err = strconv.ParseInt(totalStr, 10, 64)
if err != nil {
err = fmt.Errorf("failed extracting file size")
}
} else {
err = fmt.Errorf("file size unknown")
}
totalBytes = total
}
if totalBytes != d.params.Size && err == nil {
err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes)
}
if err != nil {
d.setErr(err)
d.cancel(err)
}
return err
}
func (d *downloader) incrWritten(n int64) {
d.m.Lock()
defer d.m.Unlock()
d.written += n
}
func (d *downloader) getErr() error {
d.m.Lock()
defer d.m.Unlock()
return d.err
}
func (d *downloader) setErr(e error) {
d.m.Lock()
defer d.m.Unlock()
d.err = e
}
type chunk struct {
start int64
size int64
buf *Buf
id int
newConcurrency bool
}
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef)
res, err := RequestHttp(ctx, "GET", header, params.URL)
if err != nil {
return res, err
}
return res, nil
}
type HttpRequestParams struct {
URL string
Range http_range.Range
HeaderRef http.Header
Size int64
}
type errNeedRetry struct {
err error
}
func (e *errNeedRetry) Error() string {
return e.err.Error()
}
func (e *errNeedRetry) Unwrap() error {
return e.err
}
type MultiReadCloser struct {
cfg *cfg
closer closerFunc
finish finishBufFUnc
}
type cfg struct {
rPos int
curBuf *Buf
}
type closerFunc func() error
type finishBufFUnc func(id int) (isLast bool, buf *Buf)
func NewMultiReadCloser(buf *Buf, c closerFunc, fb finishBufFUnc) *MultiReadCloser {
return &MultiReadCloser{closer: c, finish: fb, cfg: &cfg{curBuf: buf}}
}
func (mr MultiReadCloser) Read(p []byte) (n int, err error) {
if mr.cfg.curBuf == nil {
return 0, io.EOF
}
n, err = mr.cfg.curBuf.Read(p)
if err == io.EOF {
log.Debugf("read_%d finished current buffer", mr.cfg.rPos)
isLast, next := mr.finish(mr.cfg.rPos)
if isLast {
return n, io.EOF
}
mr.cfg.curBuf = next
mr.cfg.rPos++
return n, nil
}
if err == context.Canceled {
if e := context.Cause(mr.cfg.curBuf.ctx); e != nil {
err = e
}
}
return n, err
}
func (mr MultiReadCloser) Close() error {
return mr.closer()
}
type Buf struct {
buffer *bytes.Buffer
size int
ctx context.Context
off int
rw sync.Mutex
}
func NewBuf(ctx context.Context, maxSize int) *Buf {
return &Buf{
ctx: ctx,
buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
size: maxSize,
}
}
func (br *Buf) Reset(size int) {
br.buffer.Reset()
br.size = size
br.off = 0
}
func (br *Buf) Read(p []byte) (n int, err error) {
if err := br.ctx.Err(); err != nil {
return 0, err
}
if len(p) == 0 {
return 0, nil
}
if br.off >= br.size {
return 0, io.EOF
}
br.rw.Lock()
n, err = br.buffer.Read(p)
br.rw.Unlock()
if err == nil {
br.off += n
return n, err
}
if err != io.EOF {
return n, err
}
if n != 0 {
br.off += n
return n, nil
}
select {
case <-br.ctx.Done():
return 0, br.ctx.Err()
case <-time.After(time.Millisecond * 200):
return 0, nil
}
}
func (br *Buf) Write(p []byte) (n int, err error) {
if err := br.ctx.Err(); err != nil {
return 0, err
}
br.rw.Lock()
defer br.rw.Unlock()
n, err = br.buffer.Write(p)
return
}
func (br *Buf) Close() {
br.buffer = nil
}