Skip to content

Commit

Permalink
Allow a custom working dir to be configured
Browse files Browse the repository at this point in the history
  • Loading branch information
mgomes committed Feb 18, 2022
1 parent 43a64e9 commit 97052be
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 20 deletions.
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,13 @@ The boost will set the concurrency level. In typical concurrency scenarios you w
```
dl <file url> -boost 8
```

### Custom Working Directory

By default, `dl` will use the current working directory to store the temporary downloaded parts. If you are downloading to an external disk, you may want to use an internal disk as the working directory. This will speed up the concatenation process after the download is complete.

To override this and set your own working directory you can:

```
dl <file url> -workdir ~/Somewhere/Else
```
45 changes: 25 additions & 20 deletions dl.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@ import (
"flag"
"fmt"
"io"
"log"
"mime"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
Expand All @@ -17,13 +19,15 @@ import (
func main() {
filenamePtr := flag.String("filename", "", "custom filename")
boostPtr := flag.Int("boost", 8, "number of concurrent downloads")
workingDirPtr := flag.String("workdir", "", "working directory for downloads")

flag.Parse()

file_uris := flag.Args()

var filesize uint64
var filename string
var workingDir string
var err error

for _, uri := range file_uris {
Expand All @@ -37,13 +41,20 @@ func main() {
filename = *filenamePtr
}

if *workingDirPtr != "" {
workingDir = *workingDirPtr
} else {
workingDir, err = os.Getwd()
if err != nil {
log.Println(err)
}
}

fmt.Println(filename)

fetch(uri, filesize, *boostPtr)
concatFiles(filename, filesize, *boostPtr)
fetch(uri, filesize, workingDir, *boostPtr)
concatFiles(filename, filesize, *boostPtr, workingDir)
}

return
}

func fetchMetadata(uri string) (filesize uint64, filename string, err error) {
Expand Down Expand Up @@ -75,7 +86,7 @@ func fetchMetadata(uri string) (filesize uint64, filename string, err error) {
return
}

func fetch(uri string, filesize uint64, boost int) {
func fetch(uri string, filesize uint64, dir string, boost int) {
var wg sync.WaitGroup

bar := progressbar.DefaultBytes(
Expand All @@ -86,15 +97,13 @@ func fetch(uri string, filesize uint64, boost int) {
for part := 0; part < boost; part++ {
start, end := calculatePartBoundary(filesize, boost, part)
wg.Add(1)
go fetchPart(&wg, part, uri, start, end, bar)
go fetchPart(&wg, part, uri, dir, start, end, bar)
}

wg.Wait()

return
}

func fetchPart(wg *sync.WaitGroup, part int, uri string, startByte uint64, endByte uint64, bar *progressbar.ProgressBar) {
func fetchPart(wg *sync.WaitGroup, part int, uri string, dir string, startByte uint64, endByte uint64, bar *progressbar.ProgressBar) {
defer wg.Done()

byteRange := fmt.Sprintf("bytes=%d-%d", startByte, endByte)
Expand All @@ -110,17 +119,15 @@ func fetchPart(wg *sync.WaitGroup, part int, uri string, startByte uint64, endBy
defer resp.Body.Close()

// Create the file
filename := downloadPartFilename(part)
filename := downloadPartFilename(part, dir)
out, err := os.Create(filename)
if err != nil {
return
}
defer out.Close()

// Write the body to file
_, err = io.Copy(io.MultiWriter(out, bar), resp.Body)

return
_, _ = io.Copy(io.MultiWriter(out, bar), resp.Body)
}

func calculatePartBoundary(filesize uint64, totalParts int, part int) (startByte uint64, endByte uint64) {
Expand All @@ -145,16 +152,16 @@ func calculatePartBoundary(filesize uint64, totalParts int, part int) (startByte
return
}

func downloadPartFilename(part int) string {
return fmt.Sprintf("download.part%d", part)
func downloadPartFilename(part int, dir string) string {
return path.Join(dir, fmt.Sprintf("download.part%d", part))
}

func filenameFromURI(uri string) string {
splitURI := strings.Split(uri, "/")
return splitURI[len(splitURI)-1]
}

func concatFiles(filename string, filesize uint64, parts int) {
func concatFiles(filename string, filesize uint64, parts int, dir string) {
var readers []io.Reader

bar := progressbar.DefaultBytes(
Expand All @@ -163,11 +170,11 @@ func concatFiles(filename string, filesize uint64, parts int) {
)

for part := 0; part < parts; part++ {
downloadPart, err := os.Open(downloadPartFilename(part))
downloadPart, err := os.Open(downloadPartFilename(part, dir))
if err != nil {
panic(err)
}
defer os.Remove(downloadPartFilename(part))
defer os.Remove(downloadPartFilename(part, dir))
defer downloadPart.Close()
readers = append(readers, downloadPart)
}
Expand All @@ -183,6 +190,4 @@ func concatFiles(filename string, filesize uint64, parts int) {
if err != nil {
panic(err)
}

return
}

0 comments on commit 97052be

Please sign in to comment.