Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Amd64 support #18

Merged
merged 6 commits into from
Dec 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 19 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,25 @@ _Dependancies_ steps above, followed by running `cbdinocluster init` again.
Reinitializing dinocluster will maintain your existing configuration, but will
apply the neccessary colima configurations that were lost during the recreation.

#### High Performance Virtualization

Mac OS X 13+ supports a built in virtualization hypervisor which significantly
improves performance compared to the typical QEMU emulation. This can be enabled
using the options described below to your `colima start` command. If you've
previously run `colima start`, it will be neccessary to follow the
_Resetting Colima_ steps above to change these options.

```
colima start --network-address --cpu 4 --memory 6 --arch aarch64 --vm-type=vz --vz-rosetta
```

#### x86_64 Images

Prior to Couchbase Server 7.1, our docker containers were not built for
arm64, making it impossible to run them inside a arm64 colima instance.
You can add the `--arch x86_64` option to `colima start` in this case to
force colima to run in a virtualized `x86_64` environment. This option will
incur a performance penalty due to virtualization, but will enable the execution
of these older version. If you've previously run `colima start`, it will be
neccessary to follow the _Resetting Colima_ steps above to change these options.
arm64. On a typical colima instance, these do not run properly due to
the massive performance impact of emulating amd64. Using the method
mentioned in the _High Performance Virtualization_, we enable Apple's
Rosetta virtualization which allows these instances to execute at nearly
native speed. Note that due to a bug in Apple's hypervisor framework,
some Couchbase Server images using old kernels will panic and fail to
start, this is fixed in Mac OS X 13.5+.
119 changes: 77 additions & 42 deletions cmd/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@ package cmd

import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path"
"strings"
"time"
Expand Down Expand Up @@ -143,6 +146,26 @@ var initCmd = &cobra.Command{
return "unix://" + dockerSocketPath
}

getColimaAddress := func() string {
fmt.Printf("Attempting to fetch colima instance data.\n")
out, err := exec.Command("colima", "ls", "-j").Output()
if err != nil {
fmt.Printf("failed to execute colima: %s", err)
return ""
}

var instance struct {
Address string `json:"address"`
}
err = json.Unmarshal(out, &instance)
if err != nil {
fmt.Printf("failed to unmarshal colima response: %s", err)
return ""
}

return instance.Address
}

getGitHubUser := func(token string) string {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
Expand Down Expand Up @@ -366,56 +389,68 @@ var initCmd = &cobra.Command{
fmt.Printf(" %s\n", network.Name)
}

hasMacVlan0 := false
hasDinoNet := false
for _, network := range networks {
if network.Name == "macvlan0" {
hasMacVlan0 = true
if network.Name == "dinonet" {
hasDinoNet = true
}
}

if hasMacVlan0 {
fmt.Printf("Found a macvlan0 network, this is probably the one you want to use...\n")
if hasDinoNet {
fmt.Printf("Found a dinonet network, this is probably the one you want to use...\n")
} else {
var shouldCreateMacVlan0 bool
if strings.Contains(dockerHost, "colima") {
fmt.Printf("This appears to be colima, so auto-suggesting macvlan0 network.\n")
shouldCreateMacVlan0 = true
if !strings.Contains(dockerHost, "colima") {
fmt.Printf("This does not appear to be colima, cannot auto-create dinonet network.\n")
} else {
fmt.Printf("This does not appear to be colima, so not auto-suggesting macvlan0 network.\n")
shouldCreateMacVlan0 = false
}
fmt.Printf("This appears to be colima, attempting to identify network information.\n")

shouldCreateMacVlan0 = readBool("Should we auto-create a colima macvlan0 network?", shouldCreateMacVlan0)
if shouldCreateMacVlan0 {
fmt.Printf("Creating macvlan0 network.\n")
_, err := dockerCli.NetworkCreate(ctx, "macvlan0", types.NetworkCreate{
Driver: "macvlan",
IPAM: &network.IPAM{
Driver: "default",
Config: []network.IPAMConfig{
{
Subnet: "192.168.106.0/24",
IPRange: "192.168.106.128/25",
Gateway: "192.168.106.1",
},
},
},
Options: map[string]string{
"parent": "col0",
},
})
if err != nil {
fmt.Printf("Looks like something went wrong creating that network:\n%s\n", err)
colimaAddress := getColimaAddress()
fmt.Printf("Identified colima address of `%s`\n", colimaAddress)

colimaIP := net.ParseIP(colimaAddress).To4()
if colimaIP == nil {
fmt.Printf("Network identification failed, cannot auto-create dinonet network...")
} else {
fmt.Printf("Autocreation of the network succeeded!\n")
hasMacVlan0 = true
subnet := fmt.Sprintf("%d.%d.%d.0/24", colimaIP[0], colimaIP[1], colimaIP[2])
ipRange := fmt.Sprintf("%d.%d.%d.128/25", colimaIP[0], colimaIP[1], colimaIP[2])
gateway := fmt.Sprintf("%d.%d.%d.1", colimaIP[0], colimaIP[1], colimaIP[2])

shouldCreateDinoNet := readBool("Should we auto-create a colima dinonet network?", true)
if shouldCreateDinoNet {
fmt.Printf("Creating dinonet network (subnet: %s, %s, %s).\n",
subnet, ipRange, gateway)

_, err := dockerCli.NetworkCreate(ctx, "dinonet", types.NetworkCreate{
Driver: "ipvlan",
IPAM: &network.IPAM{
Driver: "default",
Config: []network.IPAMConfig{
{
Subnet: subnet,
IPRange: ipRange,
Gateway: gateway,
},
},
},
Options: map[string]string{
"parent": "col0",
},
})
if err != nil {
fmt.Printf("Looks like something went wrong creating that network:\n%s\n", err)
} else {
fmt.Printf("Autocreation of the network succeeded!\n")
hasDinoNet = true
dockerNetwork = "dinonet"
}
}
}
}
}

if dockerNetwork == "" {
if hasMacVlan0 {
dockerNetwork = "macvlan0"
if hasDinoNet {
dockerNetwork = "dinonet"
}
}
if dockerNetwork == "" {
Expand Down Expand Up @@ -949,13 +984,13 @@ func init() {
rootCmd.AddCommand(initCmd)

initCmd.Flags().Bool("auto", false, "Automatically setup without any interactivity")
initCmd.Flags().String("disable-docker", "", "Disable Docker")
initCmd.Flags().Bool("disable-docker", false, "Disable Docker")
initCmd.Flags().String("docker-host", "", "Docker host address to use")
initCmd.Flags().String("docker-network", "", "Docker network to use")
initCmd.Flags().String("disable-github", "", "Disable GitHub")
initCmd.Flags().Bool("disable-github", false, "Disable GitHub")
initCmd.Flags().String("github-token", "", "GitHub token to use")
initCmd.Flags().String("github-user", "", "GitHub user to use")
initCmd.Flags().String("disable-capella", "", "Disable Capella")
initCmd.Flags().Bool("disable-capella", false, "Disable Capella")
initCmd.Flags().String("capella-endpoint", "", "Capella endpoint to use")
initCmd.Flags().String("capella-user", "", "Capella user to use")
initCmd.Flags().String("capella-pass", "", "Capella pass to use")
Expand All @@ -964,7 +999,7 @@ func init() {
initCmd.Flags().String("capella-aws-region", "", "Capella default AWS region to use")
initCmd.Flags().String("capella-azure-region", "", "Capella default Azure region to use")
initCmd.Flags().String("capella-gcp-region", "", "Capella default GCP region to use")
initCmd.Flags().String("disable-aws", "", "Disable AWS")
initCmd.Flags().Bool("disable-aws", false, "Disable AWS")
initCmd.Flags().String("aws-region", "", "AWS default region to use")
initCmd.Flags().String("disable-azure", "", "Disable Azure")
initCmd.Flags().Bool("disable-azure", false, "Disable Azure")
}
14 changes: 5 additions & 9 deletions deployment/dockerdeploy/dockerhubimageprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"fmt"

"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"go.uber.org/zap"
Expand Down Expand Up @@ -34,14 +33,11 @@ func (p *DockerHubImageProvider) GetImage(ctx context.Context, def *ImageDef) (*
}

dhImagePath := fmt.Sprintf("couchbase:%s", serverVersion)
p.Logger.Debug("identified docker image to pull", zap.String("image", dhImagePath))
p.Logger.Debug("identified dockerhub image to pull", zap.String("image", dhImagePath))

err := dockerPullAndPipe(ctx, p.Logger, p.DockerCli, dhImagePath, types.ImagePullOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to pull from dockerhub registry")
}

return &ImageRef{
return MultiArchImagePuller{
Logger: p.Logger,
DockerCli: p.DockerCli,
ImagePath: dhImagePath,
}, nil
}.Pull(ctx)
}
16 changes: 7 additions & 9 deletions deployment/dockerdeploy/ghcrimageprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,12 @@ func (p *GhcrImageProvider) GetImage(ctx context.Context, def *ImageDef) (*Image
p.Logger.Debug("pulling image from ghcr")

ghcrImagePath := fmt.Sprintf("ghcr.io/cb-vanilla/server:%s", serverVersion)
err := dockerPullAndPipe(ctx, p.Logger, p.DockerCli, ghcrImagePath, types.ImagePullOptions{
RegistryAuth: p.genGhcrAuthStr(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to pull from ghcr registry")
}
p.Logger.Debug("identified ghcr image to pull", zap.String("image", ghcrImagePath))

return &ImageRef{
ImagePath: ghcrImagePath,
}, nil
return MultiArchImagePuller{
Logger: p.Logger,
DockerCli: p.DockerCli,
RegistryAuth: p.genGhcrAuthStr(),
ImagePath: ghcrImagePath,
}.Pull(ctx)
}
80 changes: 80 additions & 0 deletions deployment/dockerdeploy/multiarchimagepuller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
package dockerdeploy

import (
"context"

"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"go.uber.org/zap"
)

type MultiArchImagePuller struct {
Logger *zap.Logger
DockerCli *client.Client
RegistryAuth string
ImagePath string
}

func (p MultiArchImagePuller) Pull(ctx context.Context) (*ImageRef, error) {
images, err := p.DockerCli.ImageList(ctx, types.ImageListOptions{
Filters: filters.NewArgs(filters.Arg("reference", p.ImagePath)),
})
if err != nil {
return nil, errors.Wrap(err, "failed to list images")
}

if len(images) > 0 {
imageId := images[0].ID
p.Logger.Debug("identified image", zap.String("imageId", imageId))
return &ImageRef{ImagePath: imageId}, nil
}

p.Logger.Debug("image is not available locally, attempting to pull")

err = dockerPullAndPipe(ctx, p.Logger, p.DockerCli, p.ImagePath, types.ImagePullOptions{
RegistryAuth: p.RegistryAuth,
})
if err != nil {
return nil, errors.Wrap(err, "failed to pull from dockerhub registry")
}

images, err = p.DockerCli.ImageList(ctx, types.ImageListOptions{
Filters: filters.NewArgs(filters.Arg("reference", p.ImagePath)),
})
if err != nil {
return nil, errors.Wrap(err, "failed to list images after pull")
}

if len(images) > 0 {
imageId := images[0].ID
p.Logger.Debug("identified image", zap.String("imageId", imageId))
return &ImageRef{ImagePath: imageId}, nil
}

p.Logger.Debug("image is still not available locally, attempting to pull amd64 image")

err = dockerPullAndPipe(ctx, p.Logger, p.DockerCli, p.ImagePath, types.ImagePullOptions{
Platform: "linux/amd64",
RegistryAuth: p.RegistryAuth,
})
if err != nil {
return nil, errors.Wrap(err, "failed to pull from dockerhub registry")
}

images, err = p.DockerCli.ImageList(ctx, types.ImageListOptions{
Filters: filters.NewArgs(filters.Arg("reference", p.ImagePath)),
})
if err != nil {
return nil, errors.Wrap(err, "failed to list images after amd64 pull")
}

if len(images) > 0 {
imageId := images[0].ID
p.Logger.Debug("identified image", zap.String("imageId", imageId))
return &ImageRef{ImagePath: imageId}, nil
}

return nil, errors.New("could not find referenced image")
}
21 changes: 13 additions & 8 deletions utils/clustercontrol/nodemanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,20 @@ type SetupOneNodeClusterOptions struct {
func (m *NodeManager) SetupOneNodeCluster(ctx context.Context, opts *SetupOneNodeClusterOptions) error {
c := m.Controller()

err := c.NodeInit(ctx, &NodeInitOptions{
Hostname: "127.0.0.1",
Afamily: "ipv4",
})
if err != nil {
return errors.Wrap(err, "failed to setup services")
}
// While Couchbase Server 7.0+ seems to invoke this as part of cluster initialization
// it does not appear to be neccessary for a properly functioning cluster, and it is
// not supported on 6.6 and before, so it's just disabled here.
/*
err := c.NodeInit(ctx, &NodeInitOptions{
Hostname: "127.0.0.1",
Afamily: "ipv4",
})
if err != nil {
return errors.Wrap(err, "failed to perform nodeInit")
}
*/

err = c.UpdateDefaultPool(ctx, &UpdateDefaultPoolOptions{
err := c.UpdateDefaultPool(ctx, &UpdateDefaultPoolOptions{
ClusterName: "test-cluster",
KvMemoryQuotaMB: opts.KvMemoryQuotaMB,
IndexMemoryQuotaMB: opts.IndexMemoryQuotaMB,
Expand Down
Loading