Merge pull request #667 from kradalby/rerun-docker

Make integration tests retry on failure.
This commit is contained in:
Juan Font 2022-06-27 17:04:39 +02:00 committed by GitHub
commit e0b15c18ce
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 146 additions and 45 deletions

View file

@ -27,4 +27,9 @@ jobs:
- name: Run Integration tests - name: Run Integration tests
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration uses: nick-fields/retry@v2
with:
timeout_minutes: 240
max_attempts: 5
retry_on: error
command: nix develop --command -- make test_integration

View file

@ -40,13 +40,13 @@ func (s *IntegrationCLITestSuite) SetupTest() {
if ppool, err := dockertest.NewPool(""); err == nil { if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool s.pool = *ppool
} else { } else {
log.Fatalf("Could not connect to docker: %s", err) s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
} }
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil { if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork s.network = *pnetwork
} else { } else {
log.Fatalf("Could not create network: %s", err) s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
} }
headscaleBuildOptions := &dockertest.BuildOptions{ headscaleBuildOptions := &dockertest.BuildOptions{
@ -56,7 +56,7 @@ func (s *IntegrationCLITestSuite) SetupTest() {
currentPath, err := os.Getwd() currentPath, err := os.Getwd()
if err != nil { if err != nil {
log.Fatalf("Could not determine current path: %s", err) s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
} }
headscaleOptions := &dockertest.RunOptions{ headscaleOptions := &dockertest.RunOptions{
@ -68,11 +68,16 @@ func (s *IntegrationCLITestSuite) SetupTest() {
Cmd: []string{"headscale", "serve"}, Cmd: []string{"headscale", "serve"},
} }
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
fmt.Println("Creating headscale container") fmt.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil { if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale s.headscale = *pheadscale
} else { } else {
log.Fatalf("Could not start headscale container: %s", err) s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
} }
fmt.Println("Created headscale container") fmt.Println("Created headscale container")

View file

@ -6,7 +6,10 @@ package headscale
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os"
"strconv"
"strings" "strings"
"time" "time"
@ -16,9 +19,13 @@ import (
"inet.af/netaddr" "inet.af/netaddr"
) )
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second const (
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
)
var ( var (
errEnvVarEmpty = errors.New("getenv: environment variable empty")
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10") IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48") IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
@ -283,3 +290,25 @@ func getMagicFQDN(
return hostnames, nil return hostnames, nil
} }
func GetEnvStr(key string) (string, error) {
v := os.Getenv(key)
if v == "" {
return v, errEnvVarEmpty
}
return v, nil
}
func GetEnvBool(key string) (bool, error) {
s, err := GetEnvStr(key)
if err != nil {
return false, err
}
v, err := strconv.ParseBool(s)
if err != nil {
return false, err
}
return v, nil
}

View file

@ -40,41 +40,50 @@ type IntegrationDERPTestSuite struct {
pool dockertest.Pool pool dockertest.Pool
networks map[int]dockertest.Network // so we keep the containers isolated networks map[int]dockertest.Network // so we keep the containers isolated
headscale dockertest.Resource headscale dockertest.Resource
saveLogs bool
tailscales map[string]dockertest.Resource tailscales map[string]dockertest.Resource
joinWaitGroup sync.WaitGroup joinWaitGroup sync.WaitGroup
} }
func TestDERPIntegrationTestSuite(t *testing.T) { func TestDERPIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationDERPTestSuite) s := new(IntegrationDERPTestSuite)
s.tailscales = make(map[string]dockertest.Resource) s.tailscales = make(map[string]dockertest.Resource)
s.networks = make(map[int]dockertest.Network) s.networks = make(map[int]dockertest.Network)
s.saveLogs = saveLogs
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, tailscale := range s.tailscales { if s.saveLogs {
if err := s.pool.Purge(&tailscale); err != nil { for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err) log.Printf("Could not purge resource: %s\n", err)
} }
}
if !s.stats.Passed() { for _, network := range s.networks {
err := s.saveLog(&s.headscale, "test_output") if err := network.Close(); err != nil {
if err != nil { log.Printf("Could not close network: %s\n", err)
log.Printf("Could not save log: %s\n", err) }
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
} }
} }
} }
@ -83,14 +92,14 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
if ppool, err := dockertest.NewPool(""); err == nil { if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool s.pool = *ppool
} else { } else {
log.Fatalf("Could not connect to docker: %s", err) s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
} }
for i := 0; i < totalContainers; i++ { for i := 0; i < totalContainers; i++ {
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil { if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
s.networks[i] = *pnetwork s.networks[i] = *pnetwork
} else { } else {
log.Fatalf("Could not create network: %s", err) s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
} }
} }
@ -101,7 +110,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
currentPath, err := os.Getwd() currentPath, err := os.Getwd()
if err != nil { if err != nil {
log.Fatalf("Could not determine current path: %s", err) s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
} }
headscaleOptions := &dockertest.RunOptions{ headscaleOptions := &dockertest.RunOptions{
@ -120,11 +129,16 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
}, },
} }
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
log.Println("Creating headscale container") log.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil { if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale s.headscale = *pheadscale
} else { } else {
log.Fatalf("Could not start headscale container: %s", err) s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
} }
log.Println("Created headscale container to test DERP") log.Println("Created headscale container to test DERP")
@ -290,6 +304,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer(
} }
func (s *IntegrationDERPTestSuite) TearDownSuite() { func (s *IntegrationDERPTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, tailscale := range s.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
} }
func (s *IntegrationDERPTestSuite) HandleStats( func (s *IntegrationDERPTestSuite) HandleStats(

View file

@ -36,6 +36,7 @@ type IntegrationTestSuite struct {
pool dockertest.Pool pool dockertest.Pool
network dockertest.Network network dockertest.Network
headscale dockertest.Resource headscale dockertest.Resource
saveLogs bool
namespaces map[string]TestNamespace namespaces map[string]TestNamespace
@ -43,6 +44,11 @@ type IntegrationTestSuite struct {
} }
func TestIntegrationTestSuite(t *testing.T) { func TestIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationTestSuite) s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{ s.namespaces = map[string]TestNamespace{
@ -55,32 +61,35 @@ func TestIntegrationTestSuite(t *testing.T) {
tailscales: make(map[string]dockertest.Resource), tailscales: make(map[string]dockertest.Resource),
}, },
} }
s.saveLogs = saveLogs
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, scales := range s.namespaces { if s.saveLogs {
for _, tailscale := range scales.tailscales { for _, scales := range s.namespaces {
if err := s.pool.Purge(&tailscale); err != nil { for _, tailscale := range scales.tailscales {
log.Printf("Could not purge resource: %s\n", err) if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
} }
} }
}
if !s.stats.Passed() { if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output") err := s.saveLog(&s.headscale, "test_output")
if err != nil { if err != nil {
log.Printf("Could not save log: %s\n", err) log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
} }
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil { if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err) log.Printf("Could not close network: %s\n", err)
}
} }
} }
@ -209,13 +218,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
if ppool, err := dockertest.NewPool(""); err == nil { if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool s.pool = *ppool
} else { } else {
log.Fatalf("Could not connect to docker: %s", err) s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
} }
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil { if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork s.network = *pnetwork
} else { } else {
log.Fatalf("Could not create network: %s", err) s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
} }
headscaleBuildOptions := &dockertest.BuildOptions{ headscaleBuildOptions := &dockertest.BuildOptions{
@ -225,7 +234,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
currentPath, err := os.Getwd() currentPath, err := os.Getwd()
if err != nil { if err != nil {
log.Fatalf("Could not determine current path: %s", err) s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
} }
headscaleOptions := &dockertest.RunOptions{ headscaleOptions := &dockertest.RunOptions{
@ -237,11 +246,16 @@ func (s *IntegrationTestSuite) SetupSuite() {
Cmd: []string{"headscale", "serve"}, Cmd: []string{"headscale", "serve"},
} }
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(fmt.Sprintf("Could not remove existing container before building test: %s", err), "")
}
log.Println("Creating headscale container") log.Println("Creating headscale container")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil { if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale s.headscale = *pheadscale
} else { } else {
log.Fatalf("Could not start headscale container: %s", err) s.FailNow(fmt.Sprintf("Could not start headscale container: %s", err), "")
} }
log.Println("Created headscale container") log.Println("Created headscale container")
@ -338,6 +352,23 @@ func (s *IntegrationTestSuite) SetupSuite() {
} }
func (s *IntegrationTestSuite) TearDownSuite() { func (s *IntegrationTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
} }
func (s *IntegrationTestSuite) HandleStats( func (s *IntegrationTestSuite) HandleStats(