Skip to main contentSkip to user menuSkip to navigation

Go Concurrency

Master Go concurrency: goroutines, channels, select statements, and communicating sequential processes.

30 min readIntermediate
Not Started
Loading...

What is Go Concurrency?

Go's concurrency model is built around goroutines and channels, implementing Communicating Sequential Processes (CSP). Goroutines are lightweight threads managed by the Go runtime, while channels provide a way for goroutines to communicate and synchronize. This design makes concurrent programming safer and more intuitive than traditional thread-based approaches.

Goroutines

Lightweight threads with 2KB initial stack

Channels

Type-safe communication between goroutines

CSP Model

Don't share memory; share memory by communicating

Go Concurrency Performance Calculator

1,000 goroutines
100 buffer size
10 workers
2.34 MB
Memory Usage
1,000,000
Ops/Second
10%
Context Switch Overhead
800 B
Channel Overhead
50%
GC Pressure
Low
Deadlock Risk

Core Concurrency Concepts

Goroutines

package main

import (
    "fmt"
    "time"
)

func worker(id int) {
    fmt.Printf("Worker %d starting\n", id)
    time.Sleep(time.Second)
    fmt.Printf("Worker %d done\n", id)
}

func main() {
    // Start 3 goroutines
    for i := 1; i <= 3; i++ {
        go worker(i) // 'go' keyword starts goroutine
    }
    
    // Wait for goroutines to complete
    time.Sleep(2 * time.Second)
    fmt.Println("All workers completed")
}

Channels

package main

import "fmt"

func main() {
    // Create unbuffered channel
    ch := make(chan string)
    
    // Start goroutine to send data
    go func() {
        ch <- "Hello from goroutine!"
    }()
    
    // Receive data from channel
    message := <-ch
    fmt.Println(message)
    
    // Buffered channel
    buffered := make(chan int, 3)
    buffered <- 1
    buffered <- 2
    buffered <- 3
    
    fmt.Println(<-buffered) // 1
    fmt.Println(<-buffered) // 2
    fmt.Println(<-buffered) // 3
}

Advanced Concurrency Patterns

Worker Pool Pattern

package main

import (
    "fmt"
    "sync"
    "time"
)

type Job struct {
    ID     int
    Data   string
    Result chan string
}

func worker(id int, jobs <-chan Job, wg *sync.WaitGroup) {
    defer wg.Done()
    
    for job := range jobs {
        fmt.Printf("Worker %d processing job %d\n", id, job.ID)
        
        // Simulate work
        time.Sleep(time.Millisecond * 100)
        
        // Send result back
        job.Result <- fmt.Sprintf("Processed by worker %d", id)
    }
}

func main() {
    const numWorkers = 3
    const numJobs = 10
    
    jobs := make(chan Job, numJobs)
    var wg sync.WaitGroup
    
    // Start workers
    for i := 1; i <= numWorkers; i++ {
        wg.Add(1)
        go worker(i, jobs, &wg)
    }
    
    // Send jobs
    for i := 1; i <= numJobs; i++ {
        job := Job{
            ID:     i,
            Data:   fmt.Sprintf("job-%d", i),
            Result: make(chan string, 1),
        }
        jobs <- job
        
        // Get result
        go func(j Job) {
            result := <-j.Result
            fmt.Printf("Job %d result: %s\n", j.ID, result)
        }(job)
    }
    
    close(jobs)
    wg.Wait()
}

Select Statement

func main() {
    ch1 := make(chan string)
    ch2 := make(chan string)
    
    go func() {
        time.Sleep(1 * time.Second)
        ch1 <- "Channel 1"
    }()
    
    go func() {
        time.Sleep(2 * time.Second)
        ch2 <- "Channel 2"
    }()
    
    for i := 0; i < 2; i++ {
        select {
        case msg1 := <-ch1:
            fmt.Println("Received:", msg1)
        case msg2 := <-ch2:
            fmt.Println("Received:", msg2)
        case <-time.After(3 * time.Second):
            fmt.Println("Timeout!")
        }
    }
}

Context for Cancellation

import "context"

func worker(ctx context.Context, id int) {
    for {
        select {
        case <-ctx.Done():
            fmt.Printf("Worker %d cancelled\n", id)
            return
        default:
            // Do work
            time.Sleep(100 * time.Millisecond)
            fmt.Printf("Worker %d working\n", id)
        }
    }
}

func main() {
    ctx, cancel := context.WithTimeout(
        context.Background(), 
        2*time.Second,
    )
    defer cancel()
    
    for i := 1; i <= 3; i++ {
        go worker(ctx, i)
    }
    
    time.Sleep(3 * time.Second)
}

Real-World Examples

Docker - Container Management

Docker uses Go's concurrency to manage thousands of containers simultaneously.

  • 10,000+ goroutines for container lifecycle management
  • Event-driven architecture using channels for container events
  • Worker pools for image builds and registry operations
  • Sub-millisecond container start coordination

Kubernetes - Orchestration

Kubernetes leverages Go concurrency for managing distributed container orchestration.

  • Controller pattern with goroutines for each resource type
  • Watch loops using channels for API server events
  • Workqueue pattern for reliable event processing
  • 100,000+ API calls/sec handled concurrently

Uber - Microservices

Uber's Go services handle millions of concurrent ride requests and matching.

  • 1M+ concurrent connections per service instance
  • Pub/sub patterns with channels for real-time updates
  • Circuit breaker pattern using goroutines and timeouts
  • Sub-second driver-rider matching at scale

Best Practices

✅ Do

  • Use channels for communication, mutexes for state
  • Always close channels when done sending
  • Use context for cancellation and timeouts
  • Prefer unbuffered channels for synchronization
  • Use sync.WaitGroup for waiting on multiple goroutines

❌ Don't

  • Create unbounded numbers of goroutines
  • Share memory without proper synchronization
  • Send on closed channels (causes panic)
  • Use goroutines for CPU-bound tasks without limits
  • Ignore goroutine leaks (always ensure cleanup)

Common Concurrency Patterns

Fan-out/Fan-in

// Fan-out: distribute work
func fanOut(input <-chan int, workers int) []<-chan int {
    outputs := make([]<-chan int, workers)
    
    for i := 0; i < workers; i++ {
        output := make(chan int)
        outputs[i] = output
        
        go func(out chan<- int) {
            defer close(out)
            for n := range input {
                out <- n * n // square the number
            }
        }(output)
    }
    return outputs
}

// Fan-in: merge results
func fanIn(inputs ...<-chan int) <-chan int {
    out := make(chan int)
    var wg sync.WaitGroup
    
    for _, input := range inputs {
        wg.Add(1)
        go func(ch <-chan int) {
            defer wg.Done()
            for n := range ch {
                out <- n
            }
        }(input)
    }
    
    go func() {
        wg.Wait()
        close(out)
    }()
    
    return out
}

Pipeline Pattern

// Stage 1: Generate numbers
func generate(nums ...int) <-chan int {
    out := make(chan int)
    go func() {
        defer close(out)
        for _, n := range nums {
            out <- n
        }
    }()
    return out
}

// Stage 2: Square numbers
func square(in <-chan int) <-chan int {
    out := make(chan int)
    go func() {
        defer close(out)
        for n := range in {
            out <- n * n
        }
    }()
    return out
}

// Pipeline usage
func main() {
    // Set up pipeline
    numbers := generate(2, 3, 4, 5)
    squared := square(numbers)
    
    // Consume results
    for result := range squared {
        fmt.Println(result)
    }
}
No quiz questions available
Quiz ID "go-concurrency" not found