package net
import (
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"mime"
"mime/multipart"
"net/http"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) error {
defer RangeReadCloser.Close()
setLastModified(w, modTime)
done, rangeReq := checkPreconditions(w, r, modTime)
if done {
return nil
}
if size < 0 {
http.Error(w, "negative content size not supported", http.StatusInternalServerError)
return nil
}
code := http.StatusOK
contentTypes, haveType := w.Header()["Content-Type"]
var contentType string
if !haveType {
contentType = mime.TypeByExtension(filepath.Ext(name))
if contentType == "" {
contentType = "application/octet-stream"
}
w.Header().Set("Content-Type", contentType)
} else if len(contentTypes) > 0 {
contentType = contentTypes[0]
}
sendSize := size
var sendContent io.ReadCloser
ranges, err := http_range.ParseRange(rangeReq, size)
switch {
case err == nil:
case errors.Is(err, http_range.ErrNoOverlap):
if size == 0 {
ranges = nil
break
}
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
fallthrough
default:
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return nil
}
if sumRangesSize(ranges) > size {
ranges = nil
}
ctx := context.WithValue(r.Context(), "request_header", r.Header)
switch {
case len(ranges) == 0:
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if err == ErrExceedMaxConcurrency {
code = http.StatusTooManyRequests
}
http.Error(w, err.Error(), code)
return nil
}
sendContent = reader
case len(ranges) == 1:
ra := ranges[0]
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
code = http.StatusRequestedRangeNotSatisfiable
if err == ErrExceedMaxConcurrency {
code = http.StatusTooManyRequests
}
http.Error(w, err.Error(), code)
return nil
}
sendSize = ra.Length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.ContentRange(size))
case len(ranges) > 1:
sendSize, err = rangesMIMESize(ranges, contentType, size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
}
code = http.StatusPartialContent
pr, pw := io.Pipe()
mw := multipart.NewWriter(pw)
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
sendContent = pr
defer pr.Close()
go func() {
for _, ra := range ranges {
part, err := mw.CreatePart(ra.MimeHeader(contentType, size))
if err != nil {
pw.CloseWithError(err)
return
}
reader, err := RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
pw.CloseWithError(err)
return
}
if _, err := utils.CopyWithBufferN(part, reader, ra.Length); err != nil {
pw.CloseWithError(err)
return
}
}
mw.Close()
pw.Close()
}()
}
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
if r.Method != "HEAD" {
written, err := utils.CopyWithBufferN(w, sendContent, sendSize)
if err != nil {
log.Warnf("ServeHttp error. err: %s ", err)
if written != sendSize {
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
}
code = http.StatusInternalServerError
if err == ErrExceedMaxConcurrency {
code = http.StatusTooManyRequests
}
w.WriteHeader(code)
return err
}
}
return nil
}
func ProcessHeader(origin, override http.Header) http.Header {
result := http.Header{}
for h, val := range origin {
if utils.SliceContains(conf.SlicesMap[conf.ProxyIgnoreHeaders], strings.ToLower(h)) {
continue
}
result[h] = val
}
for h, val := range override {
result[h] = val
}
return result
}
func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, httpMethod, URL, nil)
if err != nil {
return nil, err
}
req.Header = headerOverride
res, err := HttpClient().Do(req)
if err != nil {
return nil, err
}
res.Header.Del("set-cookie")
var reader io.Reader
if res.StatusCode >= 400 {
switch res.Header.Get("Content-Encoding") {
case "gzip":
reader, _ = gzip.NewReader(res.Body)
defer reader.(*gzip.Reader).Close()
default:
reader = res.Body
}
all, _ := io.ReadAll(reader)
_ = res.Body.Close()
msg := string(all)
log.Debugln(msg)
return res, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg)
}
return res, nil
}
var once sync.Once
var httpClient *http.Client
func HttpClient() *http.Client {
once.Do(func() {
httpClient = NewHttpClient()
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
req.Header.Del("Referer")
return nil
}
})
return httpClient
}
func NewHttpClient() *http.Client {
return &http.Client{
Timeout: time.Hour * 48,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
},
}
}