tusgo

package module
v0.1.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 12, 2023 License: Apache-2.0 Imports: 13 Imported by: 2

README

tusgo

codecov Go Report Card GitHub Workflow Status (with branch) Go reference GitHub go.mod Go version (subdirectory of monorepo)

Full-featured Go client for TUS, a protocol for resumable uploads built on HTTP.

Documentation is available at pkg.go.dev

Features

  • Resumable Upload writer with chunked and streamed mode support. Conforms the io.Writer/io.ReaderFrom, which allows to use the standard utils such as io.Copy
  • Client for Upload manipulation such as creation, deletion, concatenation, etc.
  • Intermediate data store (for chunked Uploads) now is only in-memory
  • Server extensions are supported:
    • creation extension -- upload creation
    • creation-defer-length -- upload creation without size. Its size is set on the first data transfer
    • creation-with-upload -- upload creation and data transferring in one HTTP request
    • expiration -- parsing the upload expiration info
    • checksum -- data integrity verification for chunked uploads. Many checksum algorithms from Go stdlib are supported
    • checksum-trailer -- data integrity verification for streamed uploads. Checksum hash is calculated for all data in stream and is put to HTTP trailer
    • termination -- deleting uploads from server
    • concatenation -- merge finished uploads into one
    • concatenation-unfinished -- merge unfinished uploads (data streams) into one upload

Installation

go get github.com/bdragon300/tusgo

Go v1.18 or newer is required

Examples

Minimal file transfer example
package main

import (
	"fmt"
	"io"
	"net/http"
	"net/url"
	"os"
)
import "github.com/bdragon300/tusgo"

func main() {
	baseURL, _ := url.Parse("http://example.com/files")
	cl := tusgo.NewClient(http.DefaultClient, baseURL)

	// Assume that the Upload has been created on server earlier with size 1KiB
	u := tusgo.Upload{Location: "http://example.com/files/foo/bar", RemoteSize: 1024 * 1024}
	// Open a file we want to upload
	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()

	s := tusgo.NewUploadStream(cl, &u)
	// Set stream and file pointers to be equal to the remote pointer
	if _, err = s.Sync(); err != nil {
		panic(err)
	}
	if _, err = f.Seek(s.Tell(), io.SeekStart); err != nil {
		panic(err)
	}
	
	written, err := io.Copy(s, f)
	if err != nil {
		panic(fmt.Sprintf("Written %d bytes, error: %s, last response: %v", written, err, s.LastResponse))
	}
	fmt.Printf("Written %d bytes\n", written)
}
Create an Upload and transfer the file with retrying on error
package main

import (
	"errors"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"time"
)
import "github.com/bdragon300/tusgo"

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {
	// Set stream and file pointer to be equal to the remote pointer
	// (if we resume the upload that was interrupted earlier)
	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err // Permanent error, no luck
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src) // Try to resume the transfer again
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func CreateUploadFromFile(f *os.File, cl *tusgo.Client) *tusgo.Upload {
	finfo, err := f.Stat()
	if err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	if _, err = cl.CreateUpload(&u, finfo.Size(), false, nil); err != nil {
		panic(err)
	}
	return &u
}

func main() {
	baseURL, _ := url.Parse("http://example.com/files")
	cl := tusgo.NewClient(http.DefaultClient, baseURL)

	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()
	u := CreateUploadFromFile(f, cl)

	stream := tusgo.NewUploadStream(cl, u)
	if err = UploadWithRetry(stream, f); err != nil {
		panic(err)
	}
}

Documentation

Overview

Example (Checksum)
package main

import (
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"time"

	"github.com/bdragon300/tusgo"
)

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {

	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src)
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	// Open a file to be transferred
	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()
	finfo, err := f.Stat()
	if err != nil {
		panic(err)
	}
	u := tusgo.Upload{Location: "http://example.com/files/foo/bar", RemoteSize: finfo.Size()}

	// We want to use sha1
	stream := tusgo.NewUploadStream(cl, &u).WithChecksumAlgorithm("sha1")
	if err = UploadWithRetry(stream, f); err != nil {
		panic(err)
	}
	fmt.Println("Uploading complete")
}
Output:

Example (CreationAndTransfer)
package main

import (
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"time"

	"github.com/bdragon300/tusgo"
)

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {

	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src)
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func CreateUploadFromFile(file *os.File, cl *tusgo.Client, partial bool) *tusgo.Upload {

	finfo, err := file.Stat()
	if err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	if _, err := cl.CreateUpload(&u, finfo.Size(), partial, nil); err != nil {
		panic(err)
	}
	fmt.Printf("Location: %s\n", u.Location)
	return &u
}

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()
	u := CreateUploadFromFile(f, cl, false)

	stream := tusgo.NewUploadStream(cl, u)
	if err = UploadWithRetry(stream, f); err != nil {
		panic(err)
	}
	fmt.Printf("Uploading complete. Offset: %d, Size: %d\n", u.RemoteOffset, u.RemoteSize)
}
Output:

Example (CreationAndTransferWithDeferredSize)
package main

import (
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"time"

	"github.com/bdragon300/tusgo"
)

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {

	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src)
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	if _, err = cl.CreateUpload(&u, tusgo.SizeUnknown, false, nil); err != nil {
		panic(err)
	}
	fmt.Printf("Location: %s\n", u.Location)

	// Open a file to be transferred
	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()
	finfo, err := f.Stat()
	if err != nil {
		panic(err)
	}
	u.RemoteSize = finfo.Size() // Set size after the upload has been created on server

	stream := tusgo.NewUploadStream(cl, &u)
	stream.SetUploadSize = true
	if err = UploadWithRetry(stream, f); err != nil {
		panic(err)
	}
	fmt.Printf("Uploading complete. Offset: %d, Size: %d\n", u.RemoteOffset, u.RemoteSize)
}
Output:

Example (Minimal)
package main

import (
	"fmt"
	"io"
	"net/http"
	"net/url"
	"os"

	"github.com/bdragon300/tusgo"
)

func main() {
	baseURL, _ := url.Parse("http://example.com/files")
	cl := tusgo.NewClient(http.DefaultClient, baseURL)

	// Assume that the Upload has been created on server earlier with size 1KiB
	u := tusgo.Upload{Location: "http://example.com/files/foo/bar", RemoteSize: 1024 * 1024}
	// Open a file we want to upload
	f, err := os.Open("/tmp/file.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()

	s := tusgo.NewUploadStream(cl, &u)
	// Set stream and file pointers to be equal to the remote pointer
	if _, err := s.Sync(); err != nil {
		panic(err)
	}
	if _, err := f.Seek(s.Tell(), io.SeekStart); err != nil {
		panic(err)
	}

	written, err := io.Copy(s, f)
	if err != nil {
		panic(fmt.Sprintf("Written %d bytes, error: %s, last response: %v", written, err, s.LastResponse))
	}
	fmt.Printf("Written %d bytes\n", written)
}
Output:

Example (TransferWithProgressWatch)
package main

import (
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"time"

	"github.com/bdragon300/tusgo"
)

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {

	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src)
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func CreateUploadFromFile(file *os.File, cl *tusgo.Client, partial bool) *tusgo.Upload {

	finfo, err := file.Stat()
	if err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	if _, err := cl.CreateUpload(&u, finfo.Size(), partial, nil); err != nil {
		panic(err)
	}
	fmt.Printf("Location: %s\n", u.Location)
	return &u
}

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	// Open a file to be transferred
	f, err := os.Open("/tmp/file1.txt")
	if err != nil {
		panic(err)
	}
	defer f.Close()
	u := CreateUploadFromFile(f, cl, false)

	go func() {
		ticker := time.NewTicker(1 * time.Second)
		defer ticker.Stop()
		for range ticker.C {
			fmt.Printf("Progress: %d/%d (%.1f%%)\n", u.RemoteOffset, u.RemoteSize, float64(u.RemoteOffset)/float64(u.RemoteSize)*100)
			if u.RemoteOffset == u.RemoteSize {
				return
			}
		}
	}()

	stream := tusgo.NewUploadStream(cl, u)
	if err = UploadWithRetry(stream, f); err != nil {
		panic(err)
	}
	fmt.Printf("Uploading complete. Offset: %d, Size: %d\n", u.RemoteOffset, u.RemoteSize)
}
Output:

Index

Examples

Constants

View Source
const (
	// SizeUnknown value passed to `remoteSize` parameter in Client.CreateUpload means, that an upload size will be
	// set later during data uploading. UploadStream.SetUploadSize must be set to true before starting data uploading.
	// Server must support "creation-defer-length" extension for this feature.
	SizeUnknown = -1

	// OffsetUnknown is a special value for Upload.RemoteOffset and means that concatenation is still in progress
	// on the server. It sets by Client.GetUpload method when we get an upload created by Client.Concatenate* methods
	// before. After server will finish concatenation, the Client.GetUpload will set the offset to a particular value.
	OffsetUnknown = -1
)
View Source
const NoChunked = 0

NoChunked assigned to UploadStream.ChunkSize makes the uploading process not to use chunking

Variables

View Source
var (
	ErrUnsupportedFeature = TusError{/* contains filtered or unexported fields */}
	ErrUploadTooLarge     = TusError{/* contains filtered or unexported fields */}
	ErrUploadDoesNotExist = TusError{/* contains filtered or unexported fields */}
	ErrOffsetsNotSynced   = TusError{/* contains filtered or unexported fields */}
	ErrChecksumMismatch   = TusError{/* contains filtered or unexported fields */}
	ErrProtocol           = TusError{/* contains filtered or unexported fields */}
	ErrCannotUpload       = TusError{/* contains filtered or unexported fields */}
	ErrUnexpectedResponse = TusError{/* contains filtered or unexported fields */}
)

Functions

func DecodeMetadata

func DecodeMetadata(raw string) (map[string]string, error)

DecodeMetadata decodes metadata in Tus Upload-Metadata header format

func EncodeMetadata

func EncodeMetadata(metadata map[string]string) (string, error)

EncodeMetadata converts map of values to the Tus Upload-Metadata header format

Types

type Client

type Client struct {
	// BaseURL is base url the client making queries to. For example, "http://example.com/files"
	BaseURL *url.URL

	// ProtocolVersion is TUS protocol version will be used in requests. Default is "1.0.0"
	ProtocolVersion string

	// Server capabilities and settings. Use UpdateCapabilities to query the capabilities from a server
	Capabilities *ServerCapabilities

	// GetRequest is a callback function that are called by the library to get a new request object
	// By default it returns a new empty http.Request
	GetRequest GetRequestFunc
	// contains filtered or unexported fields
}

Client contains methods to manipulate server uploads except for uploading data. This includes creating, deleting, getting the information, making concatenated uploads from partial ones. For uploading the data please see UploadStream

The following errors the methods may return:

  • ErrProtocol -- unexpected condition detected in a successful server response

  • ErrUnsupportedFeature -- to do requested action we need the extension, that server was not advertised in capabilities

  • ErrUploadTooLarge -- size of the requested upload more than server ready to accept. See ServerCapabilities.MaxSize

  • ErrUploadDoesNotExist -- requested upload does not exist or access denied

  • ErrUnexpectedResponse -- unexpected server response code

func NewClient

func NewClient(client *http.Client, baseURL *url.URL) *Client

NewClient returns a new Client instance with given underlying http client and base url where the requests will be headed to

func (*Client) ConcatenateStreams

func (c *Client) ConcatenateStreams(final *Upload, streams []*UploadStream, meta map[string]string) (response *http.Response, err error)

ConcatenateStreams makes a request to concatenate partial uploads from given streams into one final upload. Final Upload object will be filled with location of a created final upload. Returns http response from server (with closed body) and error (if any).

Server must support "concatenation" extension for this feature. Streams with pointers that not point to an end of streams are treated as unfinished -- server must support "concatenation-unfinished" in this case.

This method may return ErrUnsupportedFeature if server doesn't support extension, or ErrUnexpectedResponse if unexpected response has been received from server.

func (*Client) ConcatenateUploads

func (c *Client) ConcatenateUploads(final *Upload, partials []Upload, meta map[string]string) (response *http.Response, err error)

ConcatenateUploads makes a request to concatenate the partial uploads created before into one final upload. Fills `final` with upload that was created. Returns http response from server (with closed body) and error (if any).

Server must support "concatenation" extension for this feature. Typically, partial uploads must be fully uploaded to the server, but if server supports "concatenation-unfinished" extension, it may accept unfinished uploads.

This method may return ErrUnsupportedFeature if server doesn't support extension, or ErrUnexpectedResponse if unexpected response has been received from server.

Example (WithCreation)
package main

import (
	"errors"
	"fmt"
	"io"
	"net"
	"net/http"
	"net/url"
	"os"
	"sync"
	"time"

	"github.com/bdragon300/tusgo"
)

func UploadWithRetry(dst *tusgo.UploadStream, src *os.File) error {

	if _, err := dst.Sync(); err != nil {
		return err
	}
	if _, err := src.Seek(dst.Tell(), io.SeekStart); err != nil {
		return err
	}

	_, err := io.Copy(dst, src)
	attempts := 10
	for err != nil && attempts > 0 {
		if _, ok := err.(net.Error); !ok && !errors.Is(err, tusgo.ErrChecksumMismatch) {
			return err
		}
		time.Sleep(5 * time.Second)
		attempts--
		_, err = io.Copy(dst, src)
	}
	if attempts == 0 {
		return errors.New("too many attempts to upload the data")
	}
	return nil
}

func CreateUploadFromFile(file *os.File, cl *tusgo.Client, partial bool) *tusgo.Upload {

	finfo, err := file.Stat()
	if err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	if _, err := cl.CreateUpload(&u, finfo.Size(), partial, nil); err != nil {
		panic(err)
	}
	fmt.Printf("Location: %s\n", u.Location)
	return &u
}

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	wg := &sync.WaitGroup{}
	fileNames := []string{"/tmp/file1.txt", "/tmp/file2.txt"}
	// Assume that uploads were already been created
	uploads := make([]*tusgo.Upload, 2)
	wg.Add(len(fileNames))

	// Transfer partial uploads in parallel
	for ind, fn := range fileNames {
		fn := fn
		ind := ind
		go func() {
			defer wg.Done()

			f, err := os.Open(fn)
			if err != nil {
				panic(err)
			}
			defer f.Close()
			uploads[ind] = CreateUploadFromFile(f, cl, true)
			fmt.Printf("Upload #%d: Location: %s", ind, uploads[ind].Location)

			fmt.Printf("Upload #%d: transferring file %s to %s\n", ind, fn, uploads[ind].Location)
			stream := tusgo.NewUploadStream(cl, uploads[ind])
			if err = UploadWithRetry(stream, f); err != nil {
				panic(err)
			}
		}()
	}

	wg.Wait()
	fmt.Println("Uploading complete, starting concatenation...")

	// Concatenate partial uploads into a final upload
	final := tusgo.Upload{}
	if _, err = cl.ConcatenateUploads(&final, []tusgo.Upload{*uploads[0], *uploads[1]}, nil); err != nil {
		panic(err)
	}

	fmt.Printf("Final upload location: %s\n", final.Location)

	// Get file info
	u := tusgo.Upload{RemoteOffset: tusgo.OffsetUnknown}
	for {
		if _, err = cl.GetUpload(&u, final.Location); err != nil {
			panic(err)
		}
		// When concatenation still in progress the offset can be either OffsetUnknown or a value less than size
		// depending on server implementation
		if u.RemoteOffset != tusgo.OffsetUnknown && u.RemoteOffset == u.RemoteSize {
			break
		}
		fmt.Println("Waiting concatenation to be finished")
		time.Sleep(2 * time.Second)
	}

	fmt.Printf("Concatenation finished. Offset: %d, Size: %d\n", u.RemoteOffset, u.RemoteSize)
}
Output:

func (*Client) CreateUpload

func (c *Client) CreateUpload(u *Upload, remoteSize int64, partial bool, meta map[string]string) (response *http.Response, err error)

CreateUpload creates upload on the server. Fills `u` with upload that was created. Returns http response from server (with closed body) and error (if any).

Server must support "creation" extension. We create an upload with given size and metadata. If Partial flag is true, we create a partial upload. Metadata map keys must not contain spaces.

If `remoteSize` is equal to SizeUnknown, we create an upload with deferred size, i.e. upload with size that is unknown for a moment, but must be known once the upload will be started. Server must also support "creation-defer-length" extension for this feature.

This method may return ErrUploadTooLarge if upload size exceeds maximum MaxSize that server is capable to accept. If other unexpected response has received from the server, method returns ErrUnexpectedResponse

Example
package main

import (
	"fmt"
	"net/http"
	"net/url"

	"github.com/bdragon300/tusgo"
)

func main() {
	baseURL, err := url.Parse("http://example.com/files")
	if err != nil {
		panic(err)
	}
	cl := tusgo.NewClient(http.DefaultClient, baseURL)
	if _, err = cl.UpdateCapabilities(); err != nil {
		panic(err)
	}

	u := tusgo.Upload{}
	// Create an upload with 2 MiB size
	if _, err = cl.CreateUpload(&u, 1024*1024*2, false, nil); err != nil {
		panic(err)
	}
	fmt.Printf("Location: %s\n", u.Location)
}
Output:

func (*Client) CreateUploadWithData

func (c *Client) CreateUploadWithData(u *Upload, data []byte, remoteSize int64, partial bool, meta map[string]string) (uploadedBytes int64, response *http.Response, err error)

CreateUploadWithData creates an upload on the server and sends its data in the same HTTP request. Receives a stream and data to upload. Returns count of bytes uploaded and error (if any).

Server must support "creation-with-upload" extension for this feature.

This method may return ErrUnsupportedFeature if server doesn't support an extension. Also, it may return all errors the UploadStream methods may return.

func (*Client) DeleteUpload

func (c *Client) DeleteUpload(u Upload) (response *http.Response, err error)

DeleteUpload deletes an upload. Receives `u` with upload to be deleted. Returns http response from server (with closed body) and error (if any).

Server must support "termination" extension to be able to delete uploads.

This method may return ErrUploadDoesNotExist error if such upload has not found on the server, ErrUnsupportedFeature if the server doesn't support "termination" extension. If unexpected response has received from the server, the method returns ErrUnexpectedResponse

func (*Client) GetUpload

func (c *Client) GetUpload(u *Upload, location string) (response *http.Response, err error)

GetUpload obtains an upload by location. Fills `u` variable with upload info. Returns http response from server (with closed body) and error (if any).

For regular upload we fill in just a remote offset and set Partial flag. For final concatenated uploads we also may set upload size (if server provided). Also, we may set remote offset to OffsetUnknown for concatenated final uploads, if concatenation still in progress on server side.

This method may return ErrUploadDoesNotExist error if upload with such location has not found on the server. If other unexpected response has received from the server, method returns ErrUnexpectedResponse

func (*Client) UpdateCapabilities

func (c *Client) UpdateCapabilities() (response *http.Response, err error)

UpdateCapabilities gathers server capabilities and updates Capabilities client variable. Returns http response from server (with closed body) and error (if any).

func (*Client) WithContext

func (c *Client) WithContext(ctx context.Context) *Client

WithContext returns a client copy with given context object assigned to it

type GetRequestFunc

type GetRequestFunc func(method, url string, body io.Reader, tusClient *Client, httpClient *http.Client) (*http.Request, error)

type ServerCapabilities

type ServerCapabilities struct {
	// Tus protocol extensions the server supports. For full extensions list see Tus protocol description.
	// Some of them are: creation, creation-defer-length, creation-with-upload, termination, concatenation,
	// concatenation-unfinished, checksum, checksum-trailer, creation-defer-length, expiration
	Extensions []string

	// Size of upload the server is capable to accept. 0 means that server does not set such limit.
	MaxSize int64

	// Tus protocol version a server supports. A client must select one of these versions by setting
	// Client.ProtocolVersion
	ProtocolVersions []string

	// Algorithms which server supports. For this feature a server must expose at least the "checksum" extension.
	// See also checksum.Algorithms for list of hashes the tusgo can use.
	ChecksumAlgorithms []string
}

ServerCapabilities contains features and limits of a Tus server. These features are exposed by a server itself in OPTIONS endpoint and may be fetched by Client.UpdateCapabilities method.

type TusError

type TusError struct {
	// contains filtered or unexported fields
}

func (TusError) Error

func (te TusError) Error() string

func (TusError) Is

func (te TusError) Is(e error) bool

func (TusError) Unwrap

func (te TusError) Unwrap() error

func (TusError) WithErr

func (te TusError) WithErr(err error) TusError

func (TusError) WithResponse

func (te TusError) WithResponse(r *http.Response) TusError

func (TusError) WithText

func (te TusError) WithText(s string) TusError

type Upload

type Upload struct {
	// Location is the upload location. Can be either a path or URL
	Location string

	// RemoteSize is the remote upload size in bytes. Value SizeUnknown here means that the upload was created with
	// deferred size and must be determined before the first data transfer.
	RemoteSize int64

	// RemoteOffset reflects the offset of remote upload. This field is continuously updated by UploadStream
	// while transferring the data.
	RemoteOffset int64

	// Metadata is additional data assigned to the upload, when it was created on the server.
	Metadata map[string]string

	// UploadExpired represents the time when an upload expire on the server and won't be available since then. Nil
	// value means that upload will not be expired.
	UploadExpired *time.Time

	// Partial true value denotes that the upload is "partial" and meant to be concatenated into a "final" upload further.
	Partial bool
}

Upload represents an upload on the server.

type UploadStream

type UploadStream struct {
	// ChunkSize determines the chunk size and dirty buffer size for chunking uploading. You can set
	// this value to NoChunked to disable chunking which prevents using dirty buffer. Default is 2MiB
	ChunkSize int64

	// LastResponse is read-only field that contains the last response from server was received by this UploadStream.
	// This is useful, for example, if it's needed to get the response that caused an error.
	LastResponse *http.Response

	// SetUploadSize relates to the "Deferred length" TUS protocol feature. When using this feature, we create an upload
	// with unknown size, and the server expects that we will tell it the size on the first upload request.
	//
	// If SetUploadSize is true, then the very first request for an upload (i.e. when RemoteOffset == 0) will also
	// contain the upload size, which is taken from Upload.RemoteSize field.
	SetUploadSize bool

	Upload *Upload
	// contains filtered or unexported fields
}

UploadStream is write-only stream with TUS requests as underlying implementation. During creation, the UploadStream receives a pointer to Upload object, where it holds the current server offset to write data to. This offset is continuously updated during uploading data to the server. Note, that stream takes ownership of upload, so the upload available for read only.

By default, we upload data in chunks, which size is defined in ChunkSize field. To disable chunking, set it to NoChunked -- dirty buffer will not be used, and the data will be written to the request body directly.

The approach to work with this stream is described in appropriate methods, but in general it's the following:

  1. Create a stream with an Upload with the offset we want to start writing from

  2. Write the data to stream

  3. If some error has interrupted uploading, call the same method again to continue from the last successful offset

The TUS server generally expects that we write the data on the concrete offset it manages. We use Upload.RemoteOffset field to construct a request. If UploadStream local and server remote offsets are not equal, than this stream considered "not synced". To sync it with remote offset, use the Sync method.

To use checksum data verification feature, use the WithChecksumAlgorithm method. Note, that the server must support at least the 'checksum' extension and the hash algorithm you're using. If ChunkSize is set to NoChunked, the server must also support 'checksum-trailer', since we calculate the hash once the whole data will be read, and put the hash to HTTP trailer.

To use "Deferred length" feature, before the first write, set the Upload.RemoteSize to the particular size and set SetUploadSize field to true. Generally, when using "Deferred length" feature, we create an upload with unknown size, and the server expects that we will tell it the size on the first upload request. So the very first write to UploadStream for a concrete upload (i.e. when RemoteOffset == 0) generates a request with the upload size included.

Errors, which the stream methods may return, along with the Client methods, are:

  • ErrOffsetsNotSynced -- local offset and server offset are not equal. Call Sync method to adjust local offset.

  • ErrChecksumMismatch -- server detects data corruption, if checksum verification feature is used

  • ErrCannotUpload -- unable to write the data to the existing upload. Generally, it means that the upload is full, or this upload is concatenated upload, or it does not accept the data by some reason

func NewUploadStream

func NewUploadStream(client *Client, upload *Upload) *UploadStream

NewUploadStream constructs a new upload stream. Receives a http client that will be used to make requests, and an upload object. During the upload process the given upload is modified, the RemoteOffset field in the first place.

func (*UploadStream) Dirty

func (us *UploadStream) Dirty() bool

Dirty returns true if stream has been marked "dirty". This means it contains the data chunk, which was failed to upload to the server.

func (*UploadStream) ForceClean

func (us *UploadStream) ForceClean()

ForceClean marks the stream as "clean". It erases the data from the dirty buffer.

func (*UploadStream) Len

func (us *UploadStream) Len() int64

Len returns the upload size

func (*UploadStream) ReadFrom

func (us *UploadStream) ReadFrom(r io.Reader) (n int64, err error)

ReadFrom uploads the data read from r, starting from offset Upload.RemoteOffset. Uploading stops when r will be fully drawn out or the upload becomes full, whichever comes first. The Upload.RemoteOffset is continuously updated with current offset during the process. The return value n is the number of bytes read from r.

Here we read r to the dirty buffer by chunks. When the reading has been started, the stream becomes "dirty". If the error has occurred in the middle, we keep the failed chunk in the dirty buffer and return an error. The stream remains "dirty". On the repeated ReadFrom calls, we try to upload the dirty buffer first before further reading r. If error has occurred again, the dirty buffer is kept as it was.

After the uploading has finished successfully, we clear the dirty buffer, and the stream becomes "clean".

If ChunkSize is set to NoChunked, we copy data from r directly to the request body. We don't use the dirty buffer in this case, so the stream never becomes "dirty". Also, if checksum feature is used in this case, we put the hash to the HTTP trailer, so the "checksum-trailer" server extension is required.

func (*UploadStream) Seek

func (us *UploadStream) Seek(offset int64, whence int) (int64, error)

Seek moves Upload.RemoteOffset to the requested position. Returns new offset

func (*UploadStream) Sync

func (us *UploadStream) Sync() (response *http.Response, err error)

Sync method sets the stream offset to be equal the server offset. Usually this method have to be called before starting the transfer, or when an ErrOffsetsNotSynced error was returned by UploadStream

func (*UploadStream) Tell

func (us *UploadStream) Tell() int64

Tell returns the current offset

func (*UploadStream) WithChecksumAlgorithm

func (us *UploadStream) WithChecksumAlgorithm(name string) *UploadStream

WithChecksumAlgorithm sets the checksum algorithm to the copy of stream and returns it

func (*UploadStream) WithContext

func (us *UploadStream) WithContext(ctx context.Context) *UploadStream

WithContext assigns a given context to the copy of stream and returns it

func (*UploadStream) Write

func (us *UploadStream) Write(p []byte) (n int, err error)

Write uploads a bytes starting from offset Upload.RemoteOffset. The Upload.RemoteOffset is continuously updated with current offset during the process. The return value n is the number of bytes successfully uploaded to the server.

Here we read r to the dirty buffer by chunks. When the reading has been started, the stream becomes "dirty". Whether an error occurred in the middle or not, the stream will become "clean" after the call. If stream is already "dirty" before the call, we ignore this and clear the dirty buffer.

If ChunkSize is set to NoChunked, we copy the whole given bytes to the request body. We don't use the dirty buffer in this case, so the stream never becomes "dirty". Also, if checksum feature is used in this case, we put the hash to the HTTP trailer, so the "checksum-trailer" server extension is required.

If the bytes to be uploaded doesn't fit to space left in the upload, we upload the data we can and return io.ErrShortWrite.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL