Golang上传整个目录并返回许多打开的文件

I am trying to upload a whole dir to the server. It works with small directories but whit 100 + pictures it return error "to many open files". I close the file right after it gets read from. Any idea how to fix this?

this is my code

    func uploadDir(path string) error {
    dir, err := os.Open(path)
    if err != nil {
        return err
    }

    files, err := dir.Readdirnames(-1)
    if err != nil {
        return err
    }
    dir.Close()

    errChan := make(chan error)
    resChan := make(chan *client.PutResult)
    remaining := len(files)
    for _, file := range files {
        file := file
        go func() {
            file, err := os.Open(path + "/" + file)
            if err != nil {
                errChan <- err
            }
            c := client.NewClient(os.Getenv("DROPS_SERVER"))
            res, err := c.Upload(client.NewUploadHandleFromReader(file))
            file.Close()
            if err != nil {
                errChan <- err
            }
            resChan <- res
        }()
    }

    for {
        select {
        case res := <-resChan:
            log.Println(res)
            remaining--
        case err := <-errChan:
            if err != nil {
                return err
            }
        }
        if remaining == 0 {
            break
        }
    }
    return nil
}

The original code does not limit the number of active go routines and therefore does not limit the number of open file descriptors. Several operating systems have limits on the number of open file descriptors. The fix is to create a fixed number of worker go routines.

func uploadDir(path string) error {

    // Read directory and close.

    dir, err := os.Open(path)
    if err != nil {
        return err
    }
    names, err := dir.Readdirnames(-1)
    if err != nil {
        return err
    }
    dir.Close()

    // Copy names to a channel for workers to consume. Close the
    // channel so that workers stop when all work is complete.

    namesChan := make(chan string, len(names))
    for _, name := range names {
        namesChan <- name
    }
    close(namesChan)

    // Create a maximum of 8 workers

    workers := 8
    if len(names) < workers {
        workers = len(names)
    }

    errChan := make(chan error, 1)
    resChan := make(chan *client.PutResult, len(names))

    // Run workers

    for i := 0; i < workers; i++ {
        go func() {
            // Consume work from namesChan. Loop will end when no more work.
            for name := range namesChan {
                file, err := os.Open(filepath.Join(path, name))
                if err != nil {
                    select {
                    case errChan <- err:
                        // will break parent goroutine out of loop
                    default:
                       // don't care, first error wins
                    }
                    return
                }
                c := client.NewClient(os.Getenv("DROPS_SERVER"))
                res, err := c.Upload(client.NewUploadHandleFromReader(file))
                file.Close()
                if err != nil {
                    select {
                    case errChan <- err:
                        // will break parent goroutine out of loop
                    default:
                       // don't care, first error wins
                    }
                    return
                }
                resChan <- res
            }
        }()
    }

    // Collect results from workers 

    for i := 0; i < len(names); i++ {
        select {
        case res := <-resChan:
            log.Println(res)
        case err := <-errChan:
            return err
        }
    }
    return nil
}

As a bonus, I modified channel sizes and send operations so that goroutines are not stuck when there's an error.

In order to send directories, I would simply archive/compress the directory locally and then upload it.

However, if you really want to do this, an easy trick is to set a max upload limit (i.e. max open file limit)

On any system (osx/linux, not sure about windows), you have a maximum open fd limit. You can manually increase that number to allow more, but be careful about your memory consumption. If I recall correctly, the default limit is 1024.

http://play.golang.org/p/yp-vvxiJJx

package main

import (
    "log"
    "os"
)

func uploadDir(path string, maxOpen int) error {
    dir, err := os.Open(path)
    if err != nil {
        return err
    }

    files, err := dir.Readdirnames(-1)
    if err != nil {
        return err
    }
    dir.Close()

    errChan := make(chan error)
    resChan := make(chan *client.PutResult)
    doneChan := make(chan bool)
    remaining := len(files)

    limit := make(chan struct{}, maxOpen)
    for i := 0; i < maxOpen; i++ {
        limit <- struct{}{}
    }
    for _, file := range files {
        file := file
        go func() {
            <-limit
            defer func() {
                limit <- struct{}{}
            }()
            file, err := os.Open(path + "/" + file)
            if err != nil {
                errChan <- err
            }
            c := client.NewClient(os.Getenv("DROPS_SERVER"))
            res, err := c.Upload(client.NewUploadHandleFromReader(file))
            file.Close()
            if err != nil {
                errChan <- err
            }
            resChan <- res
            doneChan <- true
        }()
    }

    for {
        select {
        case _ = <-doneChan:
            remaining--
        case res := <-resChan:
            log.Println(res)
        case err := <-errChan:
            if err != nil {
                return err
            }
        }
        if remaining == 0 {
            break
        }
    }
    return nil
}