diff --git a/cmd/apiserver/main.go b/cmd/apiserver/main.go index 3deb64935f..d794945260 100644 --- a/cmd/apiserver/main.go +++ b/cmd/apiserver/main.go @@ -65,7 +65,14 @@ func main() { Namespace: namespace, Logger: logger, } - rt.Kubernikus, rt.Kubernetes = rest.NewKubeClients() + rt.Kubernikus, rt.Kubernetes, err = rest.NewKubeClients(logger) + if err != nil { + logger.Log( + "msg", "failed to create kubernetes clients", + "err", err) + os.Exit(1) + } + if err := rest.Configure(api, rt); err != nil { logger.Log( "msg", "failed to configure API server", diff --git a/pkg/api/auth/authorizer.go b/pkg/api/auth/authorizer.go index 4cf2b5b743..e0f3a848b2 100644 --- a/pkg/api/auth/authorizer.go +++ b/pkg/api/auth/authorizer.go @@ -58,7 +58,7 @@ func NewOsloPolicyAuthorizer(document *loads.Document, rules map[string]string) //Add this once go-openapi/spec includes this fix: https://github.com/go-openapi/spec/pull/40 //secSchemes := document.Analyzer.SecurityDefinitionsFor(operation) //if _, ok := rules[operation.ID]; !ok && len(secSchemes) > 0 { - // glog.Errorf("No policy found for %s. The api route will not be accessible", operation.ID) + // logger.Log("msg", "policy not found. The api route will not be accessible", "operation", operation.ID) //} } } diff --git a/pkg/api/handlers/create_cluster.go b/pkg/api/handlers/create_cluster.go index 9c1d86873b..dec21c15e8 100644 --- a/pkg/api/handlers/create_cluster.go +++ b/pkg/api/handlers/create_cluster.go @@ -3,7 +3,6 @@ package handlers import ( "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/validate" - "github.com/golang/glog" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,6 +23,7 @@ type createCluster struct { } func (d *createCluster) Handle(params operations.CreateClusterParams, principal *models.Principal) middleware.Responder { + logger := getTracingLogger(params.HTTPRequest) name := params.Body.Name spec := params.Body.Spec @@ -49,7 +49,12 @@ func (d *createCluster) Handle(params operations.CreateClusterParams, principal k8sutil.EnsureNamespace(d.Kubernetes, d.Namespace) kluster, err = d.Kubernikus.Kubernikus().Klusters(d.Namespace).Create(kluster) if err != nil { - glog.Errorf("Failed to create cluster: %s", err) + logger.Log( + "msg", "failed to create cluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "err", err) + if apierrors.IsAlreadyExists(err) { return NewErrorResponse(&operations.CreateClusterDefault{}, 409, "Cluster with name %s already exists", name) } diff --git a/pkg/api/rest/kubeclient.go b/pkg/api/rest/kubeclient.go index 1d005756be..741a9a36bd 100644 --- a/pkg/api/rest/kubeclient.go +++ b/pkg/api/rest/kubeclient.go @@ -1,7 +1,9 @@ package rest import ( - "github.com/golang/glog" + "fmt" + + kitlog "github.com/go-kit/kit/log" "github.com/spf13/pflag" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" kubernetes_clientset "k8s.io/client-go/kubernetes" @@ -19,30 +21,30 @@ func init() { pflag.StringVar(&context, "context", "", "Override context") } -func NewKubeClients() (kubernikus_clientset.Interface, kubernetes_clientset.Interface) { +func NewKubeClients(logger kitlog.Logger) (kubernikus_clientset.Interface, kubernetes_clientset.Interface, error) { client, err := kubernikus.NewClient(kubeconfig, context) if err != nil { - glog.Fatal("Failed to create kubernikus clients: %s", err) + return nil, nil, fmt.Errorf("Failed to create kubernikus clients: %s", err) } - kubernetesClient, err := kubernetes.NewClient(kubeconfig, context) + kubernetesClient, err := kubernetes.NewClient(kubeconfig, context, logger) if err != nil { - glog.Fatal("Failed to create kubernetes clients: %s", err) + return nil, nil, fmt.Errorf("Failed to create kubernetes clients: %s", err) } config, err := kubernetes.NewConfig(kubeconfig, context) if err != nil { - glog.Fatalf("Failed to create kubernetes config: %s", err) + return nil, nil, fmt.Errorf("Failed to create kubernetes config: %s", err) } apiextensionsclientset, err := apiextensionsclient.NewForConfig(config) if err != nil { - glog.Fatal("Failed to create apiextenstionsclient: %s", err) + return nil, nil, fmt.Errorf("Failed to create apiextenstionsclient: %s", err) } - if err := kubernetes.EnsureCRD(apiextensionsclientset); err != nil { - glog.Fatalf("Couldn't create CRD: %s", err) + if err := kubernetes.EnsureCRD(apiextensionsclientset, logger); err != nil { + return nil, nil, fmt.Errorf("Couldn't create CRD: %s", err) } - return client, kubernetesClient + return client, kubernetesClient, nil } diff --git a/pkg/client/helm/client.go b/pkg/client/helm/client.go index c5e2fc7da7..1a595900d1 100644 --- a/pkg/client/helm/client.go +++ b/pkg/client/helm/client.go @@ -5,7 +5,7 @@ import ( "os" "runtime" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/helm/pkg/helm" @@ -13,7 +13,8 @@ import ( "github.com/sapcc/kubernikus/pkg/client/helm/portforwarder" ) -func NewClient(kubeClient kubernetes.Interface, kubeConfig *rest.Config) (*helm.Client, error) { +func NewClient(kubeClient kubernetes.Interface, kubeConfig *rest.Config, logger log.Logger) (*helm.Client, error) { + logger = log.With(logger, "client", "helm") tillerHost := os.Getenv("TILLER_DEPLOY_SERVICE_HOST") if tillerHost == "" { @@ -26,7 +27,9 @@ func NewClient(kubeClient kubernetes.Interface, kubeConfig *rest.Config) (*helm. tillerHost = fmt.Sprintf("%s:%s", tillerHost, tillerPort) if _, err := rest.InClusterConfig(); err != nil { - glog.V(2).Info("We are not running inside the cluster. Creating tunnel to tiller pod.") + logger.Log( + "msg", "We are not running inside the cluster. Creating tunnel to tiller pod.", + "v", 2) tunnel, err := portforwarder.New("kube-system", kubeClient, kubeConfig) if err != nil { return nil, err @@ -35,7 +38,10 @@ func NewClient(kubeClient kubernetes.Interface, kubeConfig *rest.Config) (*helm. client := helm.NewClient(helm.Host(tillerHost)) //Lets see how this goes: We close the tunnel as soon as the client is GC'ed. runtime.SetFinalizer(client, func(_ *helm.Client) { - glog.V(2).Info("Tearing Down tunnel to tiller at %s", tillerHost) + logger.Log( + "msg", "trearing down tunnel to tiller", + "host", tillerHost, + "v", 2) tunnel.Close() }) return client, nil diff --git a/pkg/client/kubernetes/client.go b/pkg/client/kubernetes/client.go index 70dbfa0c87..acc2b800e9 100644 --- a/pkg/client/kubernetes/client.go +++ b/pkg/client/kubernetes/client.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/golang/glog" + kitlog "github.com/go-kit/kit/log" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -28,20 +28,27 @@ import ( type SharedClientFactory struct { clients *sync.Map secretsInterface typedv1.SecretInterface + Logger kitlog.Logger } -func NewSharedClientFactory(secrets typedv1.SecretInterface, klusterEvents cache.SharedIndexInformer) *SharedClientFactory { +func NewSharedClientFactory(secrets typedv1.SecretInterface, klusterEvents cache.SharedIndexInformer, logger kitlog.Logger) *SharedClientFactory { factory := &SharedClientFactory{ clients: new(sync.Map), secretsInterface: secrets, + Logger: kitlog.With(logger, "client", "kubernetes"), } if klusterEvents != nil { klusterEvents.AddEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj interface{}) { if kluster, ok := obj.(*kubernikus_v1.Kluster); ok { - glog.V(5).Infof("Deleting shared kubernetes client for kluster %s", kluster.GetName()) factory.clients.Delete(kluster.GetUID()) + factory.Logger.Log( + "msg", "deleted shared kubernetes client", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "v", 2, + ) } }, }) @@ -50,11 +57,20 @@ func NewSharedClientFactory(secrets typedv1.SecretInterface, klusterEvents cache return factory } -func (f *SharedClientFactory) ClientFor(k *kubernikus_v1.Kluster) (kubernetes.Interface, error) { +func (f *SharedClientFactory) ClientFor(k *kubernikus_v1.Kluster) (clientset kubernetes.Interface, err error) { + defer func() { + f.Logger.Log( + "msg", "created shared kubernetes client", + "kluster", k.GetName(), + "project", k.Account(), + "v", 2, + "err", err, + ) + }() + if client, found := f.clients.Load(k.GetUID()); found { return client.(kubernetes.Interface), nil } - glog.V(5).Info("Creating new shared kubernetes client for kluster %s", k.GetName()) secret, err := f.secretsInterface.Get(k.GetName(), metav1.GetOptions{}) if err != nil { return nil, err @@ -81,7 +97,7 @@ func (f *SharedClientFactory) ClientFor(k *kubernikus_v1.Kluster) (kubernetes.In }, } - clientset, err := kubernetes.NewForConfig(&c) + clientset, err = kubernetes.NewForConfig(&c) if err != nil { return nil, err } @@ -103,15 +119,10 @@ func NewConfig(kubeconfig, context string) (*rest.Config, error) { rules.ExplicitPath = kubeconfig } - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig() - if err != nil { - glog.Fatalf("Couldn't get Kubernetes default config: %s", err) - } - - return config, nil + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig() } -func NewClient(kubeconfig, context string) (kubernetes.Interface, error) { +func NewClient(kubeconfig, context string, logger kitlog.Logger) (kubernetes.Interface, error) { config, err := NewConfig(kubeconfig, context) if err != nil { return nil, err @@ -122,7 +133,11 @@ func NewClient(kubeconfig, context string) (kubernetes.Interface, error) { return nil, err } - glog.V(3).Infof("Using Kubernetes Api at %s", config.Host) + logger.Log( + "msg", "created new kubernetes client", + "host", config.Host, + "v", 3, + ) return clientset, nil } @@ -162,7 +177,7 @@ func NewClientConfigV1(name, user, url string, key, cert, ca []byte) clientcmdap } } -func EnsureCRD(clientset apiextensionsclient.Interface) error { +func EnsureCRD(clientset apiextensionsclient.Interface, logger kitlog.Logger) error { klusterCRDName := kubernikus_v1.KlusterResourcePlural + "." + kubernikus_v1.GroupName crd := &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ @@ -197,7 +212,10 @@ func EnsureCRD(clientset apiextensionsclient.Interface) error { } case apiextensionsv1beta1.NamesAccepted: if cond.Status == apiextensionsv1beta1.ConditionFalse { - glog.Errorf("Name conflict: %v\n", cond.Reason) + logger.Log( + "msg", "name conflict while ensuring CRD", + "reason", cond.Reason, + ) } } } @@ -213,14 +231,16 @@ func EnsureCRD(clientset apiextensionsclient.Interface) error { return nil } -func WaitForServer(client kubernetes.Interface, stopCh <-chan struct{}) error { +func WaitForServer(client kubernetes.Interface, stopCh <-chan struct{}, logger kitlog.Logger) error { var healthzContent string err := wait.PollUntil(time.Second, func() (bool, error) { healthStatus := 0 resp := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus) if healthStatus != http.StatusOK { - glog.Errorf("Server isn't healthy yet. Waiting a little while.") + logger.Log( + "msg", "server isn't health yet. Waiting a little while.", + ) return false, nil } content, _ := resp.Raw() diff --git a/pkg/client/openstack/client.go b/pkg/client/openstack/client.go index d49152509c..63aa49f65b 100644 --- a/pkg/client/openstack/client.go +++ b/pkg/client/openstack/client.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" @@ -49,6 +49,8 @@ type client struct { domainNameToID sync.Map roleNameToID sync.Map + + Logger log.Logger } type Client interface { @@ -165,7 +167,7 @@ func (r *StateExt) UnmarshalJSON(b []byte) error { return nil } -func NewClient(secrets typedv1.SecretInterface, klusterEvents cache.SharedIndexInformer, authURL, username, password, domain, project, projectDomain string) Client { +func NewClient(secrets typedv1.SecretInterface, klusterEvents cache.SharedIndexInformer, authURL, username, password, domain, project, projectDomain string, logger log.Logger) Client { c := &client{ authURL: authURL, @@ -175,12 +177,16 @@ func NewClient(secrets typedv1.SecretInterface, klusterEvents cache.SharedIndexI authProject: project, authProjectDomain: projectDomain, secrets: secrets, + Logger: log.With(logger, "client", "openstack"), } klusterEvents.AddEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj interface{}) { if kluster, ok := obj.(*kubernikus_v1.Kluster); ok { - glog.V(5).Infof("Deleting shared openstack client for kluster %s", kluster.Name) + c.Logger.Log( + "msg", "deleting shared openstack client", + "kluster", kluster.Name, + "v", 5) c.klusterClients.Delete(kluster.GetUID()) } }, @@ -248,7 +254,13 @@ func (c *client) klusterClientFor(kluster *kubernikus_v1.Kluster) (*gophercloud. }, } - glog.V(5).Infof("AuthOptions: %#v", authOptions) + c.Logger.Log( + "msg", "using authOptions from secret", + "identity_endpoint", authOptions.IdentityEndpoint, + "username", authOptions.Username, + "domain_name", authOptions.DomainName, + "project_id", authOptions.Scope.ProjectID, + "v", 5) err = openstack.AuthenticateV3(provider, authOptions, gophercloud.EndpointOpts{}) if err != nil { @@ -526,7 +538,6 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *models.NodePool) err = servers.List(client, opts).EachPage(func(page pagination.Page) (bool, error) { nodes, err = ExtractServers(page) if err != nil { - glog.V(5).Infof("Couldn't extract server %v", err) return false, err } @@ -539,7 +550,22 @@ func (c *client) GetNodes(kluster *kubernikus_v1.Kluster, pool *models.NodePool) return nodes, nil } -func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *models.NodePool, userData []byte) (string, error) { +func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *models.NodePool, userData []byte) (id string, err error) { + var name string + + defer func() { + c.Logger.Log( + "msg", "created node", + "kluster", kluster.Name, + "project", kluster.Account(), + "name", name, + "id", id, + "v", 5, + "err", err) + }() + + name = v1.SimpleNameGenerator.GenerateName(fmt.Sprintf("%v-%v-", kluster.Spec.Name, pool.Name)) + provider, err := c.klusterClientFor(kluster) if err != nil { return "", err @@ -550,8 +576,7 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *models.NodePoo return "", err } - name := v1.SimpleNameGenerator.GenerateName(fmt.Sprintf("%v-%v-", kluster.Spec.Name, pool.Name)) - glog.V(5).Infof("Creating node %v", name) + name = v1.SimpleNameGenerator.GenerateName(fmt.Sprintf("%v-%v-", kluster.Spec.Name, pool.Name)) server, err := servers.Create(client, compute.CreateOpts{ CreateOpts: servers.CreateOpts{ @@ -566,13 +591,23 @@ func (c *client) CreateNode(kluster *kubernikus_v1.Kluster, pool *models.NodePoo }).Extract() if err != nil { - glog.V(5).Infof("Couldn't create node %v: %v", name, err) return "", err } + return server.ID, nil } -func (c *client) DeleteNode(kluster *kubernikus_v1.Kluster, ID string) error { +func (c *client) DeleteNode(kluster *kubernikus_v1.Kluster, ID string) (err error) { + defer func() { + c.Logger.Log( + "msg", "deleted node", + "kluster", kluster.Name, + "project", kluster.Account(), + "id", ID, + "v", 5, + "err", err) + }() + provider, err := c.klusterClientFor(kluster) if err != nil { return err @@ -585,7 +620,6 @@ func (c *client) DeleteNode(kluster *kubernikus_v1.Kluster, ID string) error { err = servers.Delete(client, ID).ExtractErr() if err != nil { - glog.V(5).Infof("Couldn't delete node %v: %v", kluster.Name, err) return err } diff --git a/pkg/cmd/kubernikus/certificates.go b/pkg/cmd/kubernikus/certificates.go index 67e2354979..c75fc36625 100644 --- a/pkg/cmd/kubernikus/certificates.go +++ b/pkg/cmd/kubernikus/certificates.go @@ -1,12 +1,21 @@ package kubernikus import ( + "os" + + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/sapcc/kubernikus/pkg/cmd/kubernikus/certificates" + logutil "github.com/sapcc/kubernikus/pkg/util/log" ) func NewCertificatesCommand() *cobra.Command { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = logutil.NewTrailingNilFilter(logger) + logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", Caller(3)) + c := &cobra.Command{ Use: "certificates", Short: "Debug certificates", @@ -15,7 +24,7 @@ func NewCertificatesCommand() *cobra.Command { c.AddCommand( certificates.NewFilesCommand(), certificates.NewPlainCommand(), - certificates.NewSignCommand(), + certificates.NewSignCommand(logger), ) return c diff --git a/pkg/cmd/kubernikus/certificates/sign.go b/pkg/cmd/kubernikus/certificates/sign.go index 32e424f91f..2ef4ff54c7 100644 --- a/pkg/cmd/kubernikus/certificates/sign.go +++ b/pkg/cmd/kubernikus/certificates/sign.go @@ -7,6 +7,7 @@ import ( "os" "github.com/ghodss/yaml" + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,8 +18,8 @@ import ( "github.com/sapcc/kubernikus/pkg/util" ) -func NewSignCommand() *cobra.Command { - o := NewSignOptions() +func NewSignCommand(logger log.Logger) *cobra.Command { + o := NewSignOptions(logger) c := &cobra.Command{ Use: "sign KLUSTER", @@ -41,14 +42,17 @@ type SignOptions struct { CA string Organization string ApiURL string + + Logger log.Logger } -func NewSignOptions() *SignOptions { +func NewSignOptions(logger log.Logger) *SignOptions { return &SignOptions{ Namespace: "kubernikus", CA: "apiserver-clients-ca", CN: os.Getenv("USER"), Organization: "system:masters", + Logger: logger, } } @@ -81,7 +85,7 @@ func (o *SignOptions) Complete(args []string) error { } func (o *SignOptions) Run(c *cobra.Command) error { - client, err := kubernetes.NewClient(o.KubeConfig, "") + client, err := kubernetes.NewClient(o.KubeConfig, "", o.Logger) if err != nil { return err } diff --git a/pkg/cmd/kubernikus/operator.go b/pkg/cmd/kubernikus/operator.go index acb3290578..384585506d 100644 --- a/pkg/cmd/kubernikus/operator.go +++ b/pkg/cmd/kubernikus/operator.go @@ -101,8 +101,13 @@ func (o *Options) Run(c *cobra.Command) error { signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) // Push signals into channel wg := &sync.WaitGroup{} // Goroutines can add themselves to this to be waited on - go controller.NewKubernikusOperator(&o.KubernikusOperatorOptions, logger).Run(stop, wg) - go metrics.ExposeMetrics(o.MetricPort, stop, wg) + operator, err := controller.NewKubernikusOperator(&o.KubernikusOperatorOptions, logger) + if err != nil { + return err + } + + go operator.Run(stop, wg) + go metrics.ExposeMetrics("0.0.0.0", o.MetricPort, stop, wg, logger) <-sigs // Wait for signals (this hangs until a signal arrives) logger.Log("msg", "shutting down", "v", 1) diff --git a/pkg/cmd/kubernikus/seed.go b/pkg/cmd/kubernikus/seed.go index ec4f625ac1..384146067d 100644 --- a/pkg/cmd/kubernikus/seed.go +++ b/pkg/cmd/kubernikus/seed.go @@ -1,19 +1,28 @@ package kubernikus import ( + "os" + + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/sapcc/kubernikus/pkg/cmd/kubernikus/seed" + logutil "github.com/sapcc/kubernikus/pkg/util/log" ) func NewSeedCommand() *cobra.Command { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = logutil.NewTrailingNilFilter(logger) + logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", Caller(3)) + c := &cobra.Command{ Use: "seed", Short: "Seed stuff", } c.AddCommand( - seed.NewKubeDNSCommand(), + seed.NewKubeDNSCommand(logger), ) return c diff --git a/pkg/cmd/kubernikus/seed/dns.go b/pkg/cmd/kubernikus/seed/dns.go index d5c8fa98cb..0c51406ae7 100644 --- a/pkg/cmd/kubernikus/seed/dns.go +++ b/pkg/cmd/kubernikus/seed/dns.go @@ -3,6 +3,7 @@ package seed import ( "errors" + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -11,8 +12,8 @@ import ( "github.com/sapcc/kubernikus/pkg/controller/ground/bootstrap/dns" ) -func NewKubeDNSCommand() *cobra.Command { - o := NewKubeDNSOptions() +func NewKubeDNSCommand(logger log.Logger) *cobra.Command { + o := NewKubeDNSOptions(logger) c := &cobra.Command{ Use: "dns", @@ -36,13 +37,16 @@ type KubeDNSOptions struct { version string domain string clusterIP string + + Logger log.Logger } -func NewKubeDNSOptions() *KubeDNSOptions { +func NewKubeDNSOptions(logger log.Logger) *KubeDNSOptions { return &KubeDNSOptions{ repository: dns.DEFAULT_REPOSITORY, version: dns.DEFAULT_VERSION, domain: dns.DEFAULT_DOMAIN, + Logger: logger, } } @@ -67,7 +71,7 @@ func (o *KubeDNSOptions) Complete(args []string) error { } func (o *KubeDNSOptions) Run(c *cobra.Command) error { - client, err := kubernetes.NewClient(o.kubeConfig, o.context) + client, err := kubernetes.NewClient(o.kubeConfig, o.context, o.Logger) if err != nil { return err } diff --git a/pkg/cmd/wormhole/client.go b/pkg/cmd/wormhole/client.go index c4caa84db3..c9e2889404 100644 --- a/pkg/cmd/wormhole/client.go +++ b/pkg/cmd/wormhole/client.go @@ -12,7 +12,7 @@ import ( "syscall" "github.com/databus23/guttle" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/client-go/tools/clientcmd" @@ -21,8 +21,8 @@ import ( "github.com/sapcc/kubernikus/pkg/cmd" ) -func NewClientCommand() *cobra.Command { - o := NewClientOptions() +func NewClientCommand(logger log.Logger) *cobra.Command { + o := NewClientOptions(log.With(logger, "wormhole", "client")) c := &cobra.Command{ Use: "client", @@ -44,11 +44,14 @@ type ClientOptions struct { Server string Context string ListenAddr string + + Logger log.Logger } -func NewClientOptions() *ClientOptions { +func NewClientOptions(logger log.Logger) *ClientOptions { return &ClientOptions{ ListenAddr: "198.18.128.1:6443", + Logger: logger, } } @@ -139,7 +142,10 @@ func (o *ClientOptions) Run(c *cobra.Command) error { Certificates: []tls.Certificate{certificate}, }) if err != nil { - glog.Warningf("Failed to connect to %s: %s", address, err) + o.Logger.Log( + "msg", "failed to open connection", + "address", address, + "err", err) } return conn, err }, @@ -149,7 +155,7 @@ func (o *ClientOptions) Run(c *cobra.Command) error { go func() { <-sigs - glog.Info("Shutting down...") + o.Logger.Log("msg", "Shutting down...") client.Stop() }() return client.Start() diff --git a/pkg/cmd/wormhole/server.go b/pkg/cmd/wormhole/server.go index d3d7a5873a..f0318d4c07 100644 --- a/pkg/cmd/wormhole/server.go +++ b/pkg/cmd/wormhole/server.go @@ -8,7 +8,7 @@ import ( "sync" "syscall" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -16,8 +16,8 @@ import ( "github.com/sapcc/kubernikus/pkg/wormhole" ) -func NewServerCommand() *cobra.Command { - o := NewServerOptions() +func NewServerCommand(logger log.Logger) *cobra.Command { + o := NewServerOptions(log.With(logger, "wormhole", "server")) c := &cobra.Command{ Use: "server", @@ -38,8 +38,10 @@ type ServerOptions struct { wormhole.ServerOptions } -func NewServerOptions() *ServerOptions { - return &ServerOptions{} +func NewServerOptions(logger log.Logger) *ServerOptions { + o := &ServerOptions{} + o.Logger = logger + return o } func (o *ServerOptions) BindFlags(flags *pflag.FlagSet) { @@ -76,7 +78,7 @@ func (o *ServerOptions) Run(c *cobra.Command) error { go server.Run(stop, wg) <-sigs // Wait for signals (this hangs until a signal arrives) - glog.Info("Shutting down...") + o.Logger.Log("msg", "Shutting down...") close(stop) // Tell goroutines to stop themselves wg.Wait() // Wait for all to be stopped diff --git a/pkg/cmd/wormhole/wormhole.go b/pkg/cmd/wormhole/wormhole.go index fc773bfa58..04efc4b24d 100644 --- a/pkg/cmd/wormhole/wormhole.go +++ b/pkg/cmd/wormhole/wormhole.go @@ -2,11 +2,21 @@ package wormhole import ( "flag" + "os" + "github.com/go-kit/kit/log" "github.com/spf13/cobra" + + "github.com/sapcc/kubernikus/pkg/cmd/kubernikus" + logutil "github.com/sapcc/kubernikus/pkg/util/log" ) func NewCommand(name string) *cobra.Command { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = logutil.NewTrailingNilFilter(logger) + logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", kubernikus.Caller(3)) + c := &cobra.Command{ Use: name, Short: "Wormhole as a Service", @@ -14,8 +24,8 @@ func NewCommand(name string) *cobra.Command { } c.AddCommand( - NewServerCommand(), - NewClientCommand(), + NewServerCommand(logger), + NewClientCommand(logger), ) c.PersistentFlags().AddGoFlagSet(flag.CommandLine) diff --git a/pkg/controller/ground.go b/pkg/controller/ground.go index 3f4891d032..95e52181c7 100644 --- a/pkg/controller/ground.go +++ b/pkg/controller/ground.go @@ -9,7 +9,7 @@ import ( "time" "github.com/Masterminds/goutils" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "google.golang.org/grpc" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,9 +49,14 @@ type GroundControl struct { queue workqueue.RateLimitingInterface klusterInformer cache.SharedIndexInformer podInformer cache.SharedIndexInformer + + Logger log.Logger } -func NewGroundController(factories config.Factories, clients config.Clients, recorder record.EventRecorder, config config.Config) *GroundControl { +func NewGroundController(factories config.Factories, clients config.Clients, recorder record.EventRecorder, config config.Config, logger log.Logger) *GroundControl { + logger = log.With(logger, + "controller", "ground") + operator := &GroundControl{ Clients: clients, Factories: factories, @@ -60,6 +65,7 @@ func NewGroundController(factories config.Factories, clients config.Clients, rec queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)), klusterInformer: factories.Kubernikus.Kubernikus().V1().Klusters().Informer(), podInformer: factories.Kubernetes.Core().V1().Pods().Informer(), + Logger: logger, } operator.klusterInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -81,7 +87,9 @@ func (op *GroundControl) Run(threadiness int, stopCh <-chan struct{}, wg *sync.W defer op.queue.ShutDown() defer wg.Done() wg.Add(1) - glog.Infof(`Starting GroundControl with %d "threads"`, threadiness) + op.Logger.Log( + "msg", "starting GroundControl", + "threadiness", threadiness) for i := 0; i < threadiness; i++ { go wait.Until(op.runWorker, time.Second, stopCh) @@ -92,7 +100,10 @@ func (op *GroundControl) Run(threadiness int, stopCh <-chan struct{}, wg *sync.W for { select { case <-ticker.C: - glog.V(2).Infof("I now would do reconciliation if its was implemented. Next run in %v", KLUSTER_RECHECK_INTERVAL) + op.Logger.Log( + "msg", "I now would do reconciliation if its was implemented", + "kluster_recheck_interval", KLUSTER_RECHECK_INTERVAL, + "v", 2) //op.queue.Add(true) case <-stopCh: ticker.Stop() @@ -122,7 +133,9 @@ func (op *GroundControl) processNextWorkItem() bool { return true } - glog.Warningf("Error running handler: %v", err) + op.Logger.Log( + "msg", "Error running handler", + "err", err) op.queue.AddRateLimited(key) return true @@ -134,10 +147,18 @@ func (op *GroundControl) handler(key string) error { return fmt.Errorf("Failed to fetch key %s from cache: %s", key, err) } if !exists { - glog.Infof("kluster resource %s deleted", key) + op.Logger.Log( + "msg", "kluster resource already deleted", + "kluster", key, + "v", 2) } else { kluster := obj.(*v1.Kluster) - glog.V(5).Infof("Handling kluster %v in phase %q", kluster.Name, kluster.Status.Phase) + op.Logger.Log( + "msg", "handling kluster", + "kluster", kluster.GetName(), + "phase", kluster.Status.Phase, + "project", kluster.Account(), + "v", 5) metrics.SetMetricKlusterInfo(kluster.GetNamespace(), kluster.GetName(), kluster.Status.Version, kluster.Spec.Openstack.ProjectID, kluster.GetAnnotations(), kluster.GetLabels()) metrics.SetMetricKlusterStatusPhase(kluster.GetName(), kluster.Status.Phase) @@ -160,15 +181,28 @@ func (op *GroundControl) handler(key string) error { return nil } - glog.Infof("Creating Kluster %s", kluster.GetName()) + op.Logger.Log( + "msg", "creating kluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "phase", kluster.Status.Phase) + if err := op.createKluster(kluster); err != nil { op.Recorder.Eventf(kluster, api_v1.EventTypeWarning, FailedCreate, "Failed to create cluster: %s", err) return err } if err := op.updatePhase(kluster, models.KlusterPhaseCreating, "Creating Cluster"); err != nil { - glog.Errorf("Failed to update status of kluster %s:%s", kluster.GetName(), err) + op.Logger.Log( + "msg", "failed to update status of kluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "err", err) } - glog.Infof("Kluster %s created", kluster.GetName()) + op.Logger.Log( + "msg", "created kluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "phase", kluster.Status.Phase) } case models.KlusterPhaseCreating: pods, err := op.podInformer.GetIndexer().ByIndex("kluster", kluster.GetName()) @@ -181,7 +215,12 @@ func (op *GroundControl) handler(key string) error { podsReady++ } } - glog.V(5).Infof("%d of %d pods ready for kluster %s", podsReady, len(pods), key) + op.Logger.Log( + "msg", "pod readiness", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "expected", len(pods), + "actual", podsReady) if podsReady == 4 { clientset, err := op.Clients.Satellites.ClientFor(kluster) if err != nil { @@ -191,21 +230,38 @@ func (op *GroundControl) handler(key string) error { return err } if err := op.updatePhase(kluster, models.KlusterPhaseRunning, ""); err != nil { - glog.Errorf("Failed to update status of kluster %s:%s", kluster.GetName(), err) + op.Logger.Log( + "msg", "failed to update status of kluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "err", err) } metrics.SetMetricBootDurationSummary(kluster.GetCreationTimestamp().Time, time.Now()) - glog.Infof("Kluster %s is ready!", kluster.GetName()) + op.Logger.Log( + "msg", "kluster is ready", + "kluster", kluster.GetName(), + "project", kluster.Account()) } case models.KlusterPhaseTerminating: { - glog.Infof("Terminating Kluster %s", kluster.GetName()) + op.Logger.Log( + "msg", "terminating kluster", + "kluster", kluster.GetName(), + "project", kluster.Account()) if err := op.terminateKluster(kluster); err != nil { op.Recorder.Eventf(kluster, api_v1.EventTypeWarning, "", "Failed to terminate cluster: %s", err) - glog.Errorf("Failed to terminate kluster %s: %s", kluster.Name, err) + op.Logger.Log( + "msg", "Failed to terminate kluster", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "err", err) return err } metrics.SetMetricKlusterTerminated(kluster.GetName()) - glog.Infof("Terminated kluster %s", kluster.GetName()) + op.Logger.Log( + "msg", "terminated kluster", + "kluster", kluster.GetName(), + "project", kluster.Account()) return nil } } @@ -219,7 +275,9 @@ func (op *GroundControl) klusterAdd(obj interface{}) { if err != nil { return } - glog.Infof("Added kluster resource %s", key) + op.Logger.Log( + "msg", "Added kluster resource", + "key", key) op.queue.Add(key) } @@ -229,7 +287,9 @@ func (op *GroundControl) klusterTerminate(obj interface{}) { if err != nil { return } - glog.Infof("Deleted kluster resource %s", key) + op.Logger.Log( + "msg", "Deleted kluster resource", + "key", key) op.queue.Add(key) } @@ -241,7 +301,9 @@ func (op *GroundControl) klusterUpdate(cur, old interface{}) { if err != nil { return } - glog.Infof("Updated kluster resource %s", key) + op.Logger.Log( + "msg", "Updated kluster resource", + "key", key) op.queue.Add(key) } } @@ -298,7 +360,12 @@ func (op *GroundControl) createKluster(kluster *v1.Kluster) error { return err } - glog.Infof("Creating service user %s", username) + op.Logger.Log( + "msg", "creating service user", + "username", username, + "kluster", kluster.GetName(), + "project", kluster.Account()) + if err := op.Clients.Openstack.CreateKlusterServiceUser( username, password, @@ -320,8 +387,16 @@ func (op *GroundControl) createKluster(kluster *v1.Kluster) error { if err != nil { return err } - glog.Infof("Installing helm release %s", kluster.GetName()) - glog.V(6).Infof("Chart values:\n%s", string(rawValues)) + + op.Logger.Log( + "msg", "Installing helm release", + "kluster", kluster.GetName(), + "project", kluster.Account()) + + op.Logger.Log( + "msg", "Debug Chart Values", + "values", string(rawValues), + "v", 6) _, err = op.Clients.Helm.InstallRelease(path.Join(op.Config.Helm.ChartDirectory, "kube-master"), kluster.Namespace, helm.ValueOverrides(rawValues), helm.ReleaseName(kluster.GetName())) return err @@ -335,13 +410,23 @@ func (op *GroundControl) terminateKluster(kluster *v1.Kluster) error { username := string(secret.Data["openstack-username"]) domain := string(secret.Data["openstack-domain-name"]) - glog.Infof("Deleting openstack user %s@%s", username, domain) + op.Logger.Log( + "msg", "Deleting openstack user", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "username", username, + "domain", domain) + if err := op.Clients.Openstack.DeleteUser(username, domain); err != nil { return err } } - glog.Infof("Deleting helm release %s", kluster.GetName()) + op.Logger.Log( + "msg", "Deleting helm release", + "kluster", kluster.GetName(), + "project", kluster.Account()) + _, err := op.Clients.Helm.DeleteRelease(kluster.GetName(), helm.DeletePurge(true)) if err != nil && !strings.Contains(grpc.ErrorDesc(err), fmt.Sprintf(`release: "%s" not found`, kluster.GetName())) { return err @@ -372,7 +457,11 @@ func (op *GroundControl) requiresKubernikusInfo(kluster *v1.Kluster) bool { } func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error { - glog.V(5).Infof("[%v] Discovering KubernikusInfo", kluster.Name) + op.Logger.Log( + "msg", "discovering KubernikusInfo", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "v", 5) copy, err := op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Get(kluster.Name, metav1.GetOptions{}) if err != nil { @@ -381,12 +470,20 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error { if copy.Status.Apiserver == "" { copy.Status.Apiserver = fmt.Sprintf("https://%s.%s", kluster.GetName(), op.Config.Kubernikus.Domain) - glog.Infof("[%v] Setting ServerURL to %v", kluster.Name, copy.Status.Apiserver) + op.Logger.Log( + "msg", "discovered ServerURL", + "url", copy.Status.Apiserver, + "kluster", kluster.GetName(), + "project", kluster.Account()) } if copy.Status.Wormhole == "" { copy.Status.Wormhole = fmt.Sprintf("https://%s-wormhole.%s", kluster.GetName(), op.Config.Kubernikus.Domain) - glog.Infof("[%v] Setting WormholeURL to %v", kluster.Name, copy.Status.Wormhole) + op.Logger.Log( + "msg", "discovered WormholeURL", + "url", copy.Status.Wormhole, + "kluster", kluster.GetName(), + "project", kluster.Account()) } _, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy) @@ -394,7 +491,11 @@ func (op *GroundControl) discoverKubernikusInfo(kluster *v1.Kluster) error { } func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { - glog.V(5).Infof("[%v] Discovering OpenstackInfo", kluster.Name) + op.Logger.Log( + "msg", "discovering OpenstackInfo", + "kluster", kluster.GetName(), + "project", kluster.Account(), + "v", 5) routers, err := op.Clients.Openstack.GetRouters(kluster.Account()) if err != nil { @@ -408,7 +509,11 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { if copy.Spec.Openstack.ProjectID == "" { copy.Spec.Openstack.ProjectID = kluster.Account() - glog.Infof("[%v] Setting ProjectID to %v", kluster.Name, copy.Spec.Openstack.ProjectID) + op.Logger.Log( + "msg", "discovered ProjectID", + "id", copy.Spec.Openstack.ProjectID, + "kluster", kluster.GetName(), + "project", kluster.Account()) } var selectedRouter *openstack.Router @@ -426,7 +531,11 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { } else { if numRouters := len(routers); numRouters == 1 { selectedRouter = &routers[0] - glog.Infof("[%v] Setting RouterID to %v", kluster.Name, selectedRouter.ID) + op.Logger.Log( + "msg", "discovered RouterID", + "id", selectedRouter.ID, + "kluster", kluster.GetName(), + "project", kluster.Account()) copy.Spec.Openstack.RouterID = selectedRouter.ID } else { return fmt.Errorf("Found %d routers in project. Autoconfiguration not possible.", numRouters) @@ -450,7 +559,11 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { if numNetworks := len(selectedRouter.Networks); numNetworks == 1 { selectedNetwork = &selectedRouter.Networks[0] copy.Spec.Openstack.NetworkID = selectedNetwork.ID - glog.Infof("[%v] Setting NetworkID to %v", kluster.Name, selectedNetwork.ID) + op.Logger.Log( + "msg", "discovered NetworkID", + "id", selectedNetwork.ID, + "kluster", kluster.GetName(), + "project", kluster.Account()) } else { return fmt.Errorf("Found %d networks on router %s. Auto-configuration not possible. Please choose one.", numNetworks, selectedRouter.ID) @@ -471,7 +584,11 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { } else { if numSubnets := len(selectedNetwork.Subnets); numSubnets == 1 { copy.Spec.Openstack.LBSubnetID = selectedNetwork.Subnets[0].ID - glog.V(5).Infof("[%v] Setting LBSubnetID to %v", kluster.Name, copy.Spec.Openstack.LBSubnetID) + op.Logger.Log( + "msg", "discovered LBSubnetID", + "id", copy.Spec.Openstack.LBSubnetID, + "kluster", kluster.GetName(), + "project", kluster.Account()) } else { return fmt.Errorf("Found %d subnets for network %s. Auto-configuration not possible. Please choose one.", numSubnets, selectedNetwork.ID) } @@ -485,8 +602,12 @@ func (op *GroundControl) discoverOpenstackInfo(kluster *v1.Kluster) error { if err != nil { return fmt.Errorf("Failed to get id for default securitygroup in project %s: %s", err, kluster.Account()) } - glog.V(5).Infof("[%v] Setting SecurityGroupID to %v", kluster.Name, copy.Spec.Openstack.SecurityGroupID) copy.Spec.Openstack.SecurityGroupID = id + op.Logger.Log( + "msg", "discovered SecurityGroupID", + "id", copy.Spec.Openstack.SecurityGroupID, + "kluster", kluster.GetName(), + "project", kluster.Account()) } _, err = op.Clients.Kubernikus.Kubernikus().Klusters(kluster.Namespace).Update(copy) @@ -498,7 +619,11 @@ func (op *GroundControl) podAdd(obj interface{}) { if klusterName, found := pod.GetLabels()["release"]; found && len(klusterName) > 0 { klusterKey := pod.GetNamespace() + "/" + klusterName - glog.V(5).Infof("Pod %s added for kluster %s", pod.GetName(), klusterKey) + op.Logger.Log( + "msg", "pod added", + "name", pod.GetName(), + "kluster", klusterKey, + "v", 5) op.queue.Add(klusterKey) } @@ -508,7 +633,11 @@ func (op *GroundControl) podDelete(obj interface{}) { pod := obj.(*api_v1.Pod) if klusterName, found := pod.GetLabels()["release"]; found && len(klusterName) > 0 { klusterKey := pod.GetNamespace() + "/" + klusterName - glog.V(5).Infof("Pod %s deleted for kluster %s", pod.GetName(), klusterKey) + op.Logger.Log( + "msg", "pod deleted", + "name", pod.GetName(), + "kluster", klusterKey, + "v", 5) op.queue.Add(klusterKey) } } @@ -519,7 +648,11 @@ func (op *GroundControl) podUpdate(cur, old interface{}) { if klusterName, found := pod.GetLabels()["release"]; found && len(klusterName) > 0 { if !reflect.DeepEqual(oldPod, pod) { klusterKey := pod.GetNamespace() + "/" + klusterName - glog.V(5).Infof("Pod %s updated for kluster %s", pod.GetName(), klusterKey) + op.Logger.Log( + "msg", "pod updated", + "name", pod.GetName(), + "kluster", klusterKey, + "v", 5) op.queue.Add(klusterKey) } } diff --git a/pkg/controller/launch/pool_manager.go b/pkg/controller/launch/pool_manager.go index 97ceec9aca..0581ca444a 100644 --- a/pkg/controller/launch/pool_manager.go +++ b/pkg/controller/launch/pool_manager.go @@ -108,7 +108,7 @@ func (cpm *ConcretePoolManager) CreateNode() (id string, err error) { return "", err } - userdata, err := templates.Ignition.GenerateNode(cpm.Kluster, secret) + userdata, err := templates.Ignition.GenerateNode(cpm.Kluster, secret, cpm.Logger) if err != nil { return "", err } diff --git a/pkg/controller/metrics/metrics.go b/pkg/controller/metrics/metrics.go index ebb56317f1..f2fdfdfa2a 100644 --- a/pkg/controller/metrics/metrics.go +++ b/pkg/controller/metrics/metrics.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -180,8 +180,12 @@ func init() { ) } -func ExposeMetrics(metricPort int, stopCh <-chan struct{}, wg *sync.WaitGroup) error { - glog.Infof("Exposing metrics on localhost:%v/metrics ", metricPort) +func ExposeMetrics(host string, metricPort int, stopCh <-chan struct{}, wg *sync.WaitGroup, logger log.Logger) error { + logger.Log( + "msg", "Exposing metrics", + "host", host, + "port", metricPort, + "path", "/metrics") defer wg.Done() wg.Add(1) for { @@ -191,7 +195,7 @@ func ExposeMetrics(metricPort int, stopCh <-chan struct{}, wg *sync.WaitGroup) e default: http.Handle("/metrics", promhttp.Handler()) return http.ListenAndServe( - fmt.Sprintf("0.0.0.0:%v", metricPort), + fmt.Sprintf("%v:%v", host, metricPort), nil, ) } diff --git a/pkg/controller/operator.go b/pkg/controller/operator.go index a8e7ad76bf..54e6c2fb5c 100644 --- a/pkg/controller/operator.go +++ b/pkg/controller/operator.go @@ -7,7 +7,6 @@ import ( "time" "github.com/go-kit/kit/log" - "github.com/golang/glog" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,7 +58,7 @@ type KubernikusOperator struct { config.Config config.Factories config.Clients - logger log.Logger + Logger log.Logger } const ( @@ -75,7 +74,7 @@ var ( } ) -func NewKubernikusOperator(options *KubernikusOperatorOptions, logger log.Logger) *KubernikusOperator { +func NewKubernikusOperator(options *KubernikusOperatorOptions, logger log.Logger) (*KubernikusOperator, error) { var err error o := &KubernikusOperator{ @@ -98,36 +97,36 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions, logger log.Logger Controllers: make(map[string]config.Controller), }, }, - logger: logger, + Logger: logger, } - o.Clients.Kubernetes, err = kube.NewClient(options.KubeConfig, options.Context) + o.Clients.Kubernetes, err = kube.NewClient(options.KubeConfig, options.Context, logger) if err != nil { - glog.Fatalf("Failed to create kubernetes clients: %s", err) + return nil, fmt.Errorf("Failed to create kubernetes clients: %s", err) } o.Clients.Kubernikus, err = kubernikus.NewClient(options.KubeConfig, options.Context) if err != nil { - glog.Fatalf("Failed to create kubernikus clients: %s", err) + return nil, fmt.Errorf("Failed to create kubernikus clients: %s", err) } config, err := kube.NewConfig(options.KubeConfig, options.Context) if err != nil { - glog.Fatalf("Failed to create kubernetes config: %s", err) + return nil, fmt.Errorf("Failed to create kubernetes config: %s", err) } - o.Clients.Helm, err = helmutil.NewClient(o.Clients.Kubernetes, config) + o.Clients.Helm, err = helmutil.NewClient(o.Clients.Kubernetes, config, logger) if err != nil { - glog.Fatalf("Failed to create helm client: %s", err) + return nil, fmt.Errorf("Failed to create helm client: %s", err) } apiextensionsclientset, err := apiextensionsclient.NewForConfig(config) if err != nil { - glog.Fatal("Failed to create apiextenstionsclient: %s", err) + return nil, fmt.Errorf("Failed to create apiextenstionsclient: %s", err) } - if err := kube.EnsureCRD(apiextensionsclientset); err != nil { - glog.Fatalf("Couldn't create CRD: %s", err) + if err := kube.EnsureCRD(apiextensionsclientset, logger); err != nil { + return nil, fmt.Errorf("Couldn't create CRD: %s", err) } o.Factories.Kubernikus = kubernikus_informers.NewSharedInformerFactory(o.Clients.Kubernikus, DEFAULT_RECONCILIATION) @@ -144,34 +143,50 @@ func NewKubernikusOperator(options *KubernikusOperatorOptions, logger log.Logger options.AuthDomain, options.AuthProject, options.AuthProjectDomain, + logger, ) - o.Clients.Satellites = kube.NewSharedClientFactory(secrets, klusters) + o.Clients.Satellites = kube.NewSharedClientFactory(secrets, klusters, logger) // Add kubernikus types to the default Kubernetes Scheme so events can be // logged for those types. v1.AddToScheme(scheme.Scheme) eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartEventWatcher(func(e *api_v1.Event) { + logger.Log( + "controller", "operator", + "resource", "event", + "msg", e.Message, + "reason", e.Reason, + "type", e.Type, + "kind", e.InvolvedObject.Kind, + "namespace", e.InvolvedObject.Namespace, + "name", e.InvolvedObject.Name, + "v", 2) + }) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: o.Clients.Kubernetes.CoreV1().Events(o.Config.Kubernikus.Namespace)}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, api_v1.EventSource{Component: "operator"}) for _, k := range options.Controllers { switch k { case "groundctl": - o.Config.Kubernikus.Controllers["groundctl"] = NewGroundController(o.Factories, o.Clients, recorder, o.Config) + o.Config.Kubernikus.Controllers["groundctl"] = NewGroundController(o.Factories, o.Clients, recorder, o.Config, logger) case "launchctl": o.Config.Kubernikus.Controllers["launchctl"] = launch.NewController(o.Factories, o.Clients, recorder, logger) } } - return o + return o, err } func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { - glog.Infof("Starting kubernikus operator v%v. Using namespace %s\n", version.GitCommit, o.Config.Kubernikus.Namespace) + o.Logger.Log( + "msg", "starting kubernikus operator", + "namespace", o.Config.Kubernikus.Namespace, + "version", version.GitCommit) - kube.WaitForServer(o.Clients.Kubernetes, stopCh) + kube.WaitForServer(o.Clients.Kubernetes, stopCh, o.Logger) o.Factories.Kubernikus.Start(stopCh) o.Factories.Kubernetes.Start(stopCh) @@ -179,7 +194,7 @@ func (o *KubernikusOperator) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { o.Factories.Kubernikus.WaitForCacheSync(stopCh) o.Factories.Kubernetes.WaitForCacheSync(stopCh) - glog.Info("Cache primed. Ready for Action!") + o.Logger.Log("msg", "Cache primed. Ready for Action!") for name, controller := range o.Config.Kubernikus.Controllers { go controller.Run(CONTROLLER_OPTIONS[name], stopCh, wg) @@ -193,10 +208,8 @@ func MetaLabelReleaseIndexFunc(obj interface{}) ([]string, error) { return []string{""}, fmt.Errorf("object has no meta: %v", err) } if release, found := meta.GetLabels()["release"]; found { - glog.V(6).Infof("Found release %v for pod %v", release, meta.GetName()) return []string{release}, nil } - glog.V(6).Infof("meta labels: %v", meta.GetLabels()) return []string{""}, errors.New("object has no release label") } diff --git a/pkg/templates/ignition.go b/pkg/templates/ignition.go index 2d58a5a9d0..2b2356eacd 100644 --- a/pkg/templates/ignition.go +++ b/pkg/templates/ignition.go @@ -9,7 +9,8 @@ import ( "github.com/Masterminds/sprig" "github.com/coreos/container-linux-config-transpiler/config" "github.com/coreos/container-linux-config-transpiler/config/platform" - "github.com/golang/glog" + "github.com/coreos/ignition/config/validate/report" + "github.com/go-kit/kit/log" "k8s.io/client-go/pkg/api/v1" kubernikusv1 "github.com/sapcc/kubernikus/pkg/apis/kubernikus/v1" @@ -35,8 +36,7 @@ var Ignition = &ignition{ }, } -func (i *ignition) GenerateNode(kluster *kubernikusv1.Kluster, secret *v1.Secret) ([]byte, error) { - +func (i *ignition) GenerateNode(kluster *kubernikusv1.Kluster, secret *v1.Secret, logger log.Logger) ([]byte, error) { for _, field := range i.requiredNodeSecrets { if _, ok := secret.Data[field]; !ok { return nil, fmt.Errorf("Field %s missing in secret", field) @@ -90,18 +90,28 @@ func (i *ignition) GenerateNode(kluster *kubernikusv1.Kluster, secret *v1.Secret KubernikusImageTag: version.GitCommit, } + var dataOut []byte var buffer bytes.Buffer + var report report.Report + + defer func() { + logger.Log( + "msg", "ignition debug", + "data", data, + "yaml", string(buffer.Bytes()), + "json", string(dataOut), + "report", report.String(), + "v", 6, + "err", err) + }() + err = tmpl.Execute(&buffer, data) if err != nil { return nil, err } - glog.V(6).Infof("IgnitionData: %v", data) - glog.V(6).Infof("IgnitionYAML: %v", string(buffer.Bytes())) - ignitionConfig, ast, report := config.Parse(buffer.Bytes()) if len(report.Entries) > 0 { - glog.V(2).Infof("Something odd while transpiling ignition file: %v", report.String()) if report.IsFatal() { return nil, fmt.Errorf("Couldn't transpile ignition file: %v", report.String()) } @@ -109,18 +119,14 @@ func (i *ignition) GenerateNode(kluster *kubernikusv1.Kluster, secret *v1.Secret ignitionConfig2_0, report := config.ConvertAs2_0(ignitionConfig, platform.OpenStackMetadata, ast) if len(report.Entries) > 0 { - glog.V(2).Infof("Something odd while convertion ignition config: %v", report.String()) if report.IsFatal() { return nil, fmt.Errorf("Couldn't convert ignition config: %v", report.String()) } } - var dataOut []byte dataOut, err = json.MarshalIndent(&ignitionConfig2_0, "", " ") dataOut = append(dataOut, '\n') - glog.V(6).Infof("IgnitionJSON: %v", string(dataOut)) - if err != nil { return nil, err } diff --git a/pkg/templates/ignition_test.go b/pkg/templates/ignition_test.go index 047afd40b2..9c12b91f79 100644 --- a/pkg/templates/ignition_test.go +++ b/pkg/templates/ignition_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/pkg/api/v1" @@ -42,6 +43,6 @@ func TestGenerateNode(t *testing.T) { ObjectMeta: kluster.ObjectMeta, Data: secretData, } - _, err := Ignition.GenerateNode(&kluster, &secret) + _, err := Ignition.GenerateNode(&kluster, &secret, log.NewNopLogger()) assert.NoError(t, err) } diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index f08069b800..c7de058d5c 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "k8s.io/apimachinery/pkg/util/sets" utilexec "k8s.io/utils/exec" @@ -121,16 +121,20 @@ type runner struct { waitFlag []string restoreWaitFlag []string lockfilePath string + logger log.Logger reloadFuncs []func() } // newInternal returns a new Interface which will exec iptables, and allows the // caller to change the iptables-restore lockfile path -func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath string) Interface { +func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath string, logger log.Logger) Interface { vstring, err := getIPTablesVersionString(exec) if err != nil { - glog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) + logger.Log( + "msg", "Error checking iptables version", + "min_version", MinCheckVersion, + "err", err) vstring = MinCheckVersion } @@ -141,18 +145,19 @@ func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath string runner := &runner{ exec: exec, protocol: protocol, - hasCheck: getIPTablesHasCheckCommand(vstring), - waitFlag: getIPTablesWaitFlag(vstring), - restoreWaitFlag: getIPTablesRestoreWaitFlag(exec), + hasCheck: getIPTablesHasCheckCommand(vstring, logger), + waitFlag: getIPTablesWaitFlag(vstring, logger), + restoreWaitFlag: getIPTablesRestoreWaitFlag(exec, logger), lockfilePath: lockfilePath, + logger: logger, } return runner } // New returns a new Interface which will exec iptables. -func New(exec utilexec.Interface, protocol Protocol) Interface { - return newInternal(exec, protocol, "") +func New(exec utilexec.Interface, protocol Protocol, logger log.Logger) Interface { + return newInternal(exec, protocol, "", logger) } // Destroy is part of Interface. @@ -265,7 +270,10 @@ func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error { // run and return args := []string{"-t", string(table)} - glog.V(4).Infof("running iptables-save %v", args) + runner.logger.Log( + "msg", "running iptables-save", + "args", args, + "v", 4) cmd := runner.exec.Command(cmdIPTablesSave, args...) // Since CombinedOutput() doesn't support redirecting it to a buffer, // we need to workaround it by redirecting stdout and stderr to buffer @@ -317,14 +325,19 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla } defer func(locker iptablesLocker) { if err := locker.Close(); err != nil { - glog.Errorf("Failed to close iptables locks: %v", err) + runner.logger.Log( + "msg", "Failed to close iptables locks", + "err", err) } }(locker) } // run the command and return the output or an error including the output and error fullArgs := append(runner.restoreWaitFlag, args...) - glog.V(4).Infof("running iptables-restore %v", fullArgs) + runner.logger.Log( + "msg", "running iptables-restore", + "args", fullArgs, + "v", 4) cmd := runner.exec.Command(cmdIPTablesRestore, fullArgs...) cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() @@ -347,7 +360,11 @@ func (runner *runner) run(op operation, args []string) ([]byte, error) { fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) - glog.V(5).Infof("running iptables %s %v", string(op), args) + runner.logger.Log( + "msg", "running iptables", + "op", string(op), + "args", args, + "v", 5) return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() // Don't log err here - callers might not think it is an error. } @@ -372,7 +389,9 @@ func trimhex(s string) string { // Present for compatibility with <1.4.11 versions of iptables. This is full // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { - glog.V(1).Infof("running iptables-save -t %s", string(table)) + runner.logger.Log( + "msg", "running iptables-sav", + "table", string(table)) out, err := runner.exec.Command(cmdIPTablesSave, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) @@ -411,7 +430,11 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st if sets.NewString(fields...).IsSuperset(argset) { return true, nil } - glog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) + runner.logger.Log( + "msg", "DBG: fields is not a superset of args", + "fields", fields, + "args", args, + "v", 5) } return false, nil @@ -449,31 +472,43 @@ func makeFullArgs(table Table, chain Chain, args ...string) []string { } // Checks if iptables has the "-C" flag -func getIPTablesHasCheckCommand(vstring string) bool { +func getIPTablesHasCheckCommand(vstring string, logger log.Logger) bool { minVersion, err := utilversion.ParseGeneric(MinCheckVersion) if err != nil { - glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) + logger.Log( + "msg", "MinCheckVersion is not a valid version string", + "min_version", MinCheckVersion, + "err", err) return true } version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + logger.Log( + "msg", "vstring is not a valid version string", + "vstring", vstring, + "err", err) return true } return version.AtLeast(minVersion) } // Checks if iptables version has a "wait" flag -func getIPTablesWaitFlag(vstring string) []string { +func getIPTablesWaitFlag(vstring string, logger log.Logger) []string { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + logger.Log( + "msg", "vstring is not a valid version string", + "vstring", vstring, + "err", err) return nil } minVersion, err := utilversion.ParseGeneric(MinWaitVersion) if err != nil { - glog.Errorf("MinWaitVersion (%s) is not a valid version string: %v", MinWaitVersion, err) + logger.Log( + "msg", "MinWaitVersion is not a valid version string", + "min_wait_version", MinWaitVersion, + "err", err) return nil } if version.LessThan(minVersion) { @@ -482,7 +517,10 @@ func getIPTablesWaitFlag(vstring string) []string { minVersion, err = utilversion.ParseGeneric(MinWait2Version) if err != nil { - glog.Errorf("MinWait2Version (%s) is not a valid version string: %v", MinWait2Version, err) + logger.Log( + "msg", "MinWait2Version is not a valid version string", + "min_wait2_version", MinWait2Version, + "err", err) return nil } if version.LessThan(minVersion) { @@ -512,14 +550,20 @@ func getIPTablesVersionString(exec utilexec.Interface) (string, error) { // --wait support landed in v1.6.1+ right before --version support, so // any version of iptables-restore that supports --version will also // support --wait -func getIPTablesRestoreWaitFlag(exec utilexec.Interface) []string { +func getIPTablesRestoreWaitFlag(exec utilexec.Interface, logger log.Logger) []string { vstring, err := getIPTablesRestoreVersionString(exec) if err != nil || vstring == "" { - glog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") + logger.Log( + "msg", "couldn't get iptables-restore version; assuming it doesn't support --wait", + "v", 3, + "err", err) return nil } if _, err := utilversion.ParseGeneric(vstring); err != nil { - glog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") + logger.Log( + "msg", "couldn't parse iptables-restore version; assuming it doesn't support --wait", + "v", 3, + "err", err) return nil } @@ -555,8 +599,8 @@ func (runner *runner) AddReloadFunc(reloadFunc func()) { // runs all reload funcs to re-sync iptables rules func (runner *runner) reload() { - glog.V(1).Infof("reloading iptables rules") - + runner.logger.Log( + "msg", "reloading iptables rules") for _, f := range runner.reloadFuncs { f() } diff --git a/pkg/wormhole/server.go b/pkg/wormhole/server.go index 8c24b9fc99..c0fa464561 100644 --- a/pkg/wormhole/server.go +++ b/pkg/wormhole/server.go @@ -1,11 +1,10 @@ package wormhole import ( - "fmt" "sync" "time" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" @@ -23,6 +22,8 @@ type ServerOptions struct { Context string ServiceCIDR string server.TunnelOptions + + Logger log.Logger } type Server struct { @@ -30,12 +31,14 @@ type Server struct { client kubernetes.Interface controller *server.Controller tunnel *server.Tunnel + + Logger log.Logger } func NewServer(options *ServerOptions) (*Server, error) { - s := &Server{} + s := &Server{Logger: log.With(options.Logger, "wormhole", "server")} - client, err := kube.NewClient(options.KubeConfig, options.Context) + client, err := kube.NewClient(options.KubeConfig, options.Context, options.Logger) if err != nil { return nil, err } @@ -46,20 +49,25 @@ func NewServer(options *ServerOptions) (*Server, error) { if err != nil { return nil, err } - s.controller = server.NewController(s.factory.Core().V1().Nodes(), options.ServiceCIDR, s.tunnel.Server) + s.controller = server.NewController(s.factory.Core().V1().Nodes(), options.ServiceCIDR, s.tunnel.Server, options.Logger) return s, nil } func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { - fmt.Printf("Welcome to Wormhole %v\n", version.GitCommit) + s.Logger.Log( + "msg", "powering up wormhole generator", + "version", version.GitCommit, + ) - kube.WaitForServer(s.client, stopCh) + kube.WaitForServer(s.client, stopCh, s.Logger) s.factory.Start(stopCh) s.factory.WaitForCacheSync(stopCh) - glog.Info("Cache primed. Ready for Action!") + s.Logger.Log( + "msg", "Cache primed. Ready for Action!", + ) go s.controller.Run(1, stopCh, wg) go s.tunnel.Run(stopCh, wg) diff --git a/pkg/wormhole/server/controller.go b/pkg/wormhole/server/controller.go index c4f824be91..c9f8569203 100644 --- a/pkg/wormhole/server/controller.go +++ b/pkg/wormhole/server/controller.go @@ -10,7 +10,7 @@ import ( "time" "github.com/databus23/guttle" - "github.com/golang/glog" + "github.com/go-kit/kit/log" "k8s.io/apimachinery/pkg/util/wait" informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/pkg/api/v1" @@ -33,6 +33,8 @@ type Controller struct { iptables iptables.Interface hijackPort int serviceCIDR string + + Logger log.Logger } type route struct { @@ -40,15 +42,17 @@ type route struct { identifier string } -func NewController(informer informers.NodeInformer, serviceCIDR string, tunnel *guttle.Server) *Controller { +func NewController(informer informers.NodeInformer, serviceCIDR string, tunnel *guttle.Server, logger log.Logger) *Controller { + logger = log.With(logger, "controller", "tunnel") c := &Controller{ nodes: informer, tunnel: tunnel, queue: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(5*time.Second, 300*time.Second)), store: make(map[string][]route), - iptables: iptables.New(utilexec.New(), iptables.ProtocolIpv4), + iptables: iptables.New(utilexec.New(), iptables.ProtocolIpv4, logger), hijackPort: 9191, serviceCIDR: serviceCIDR, + Logger: logger, } c.nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -73,7 +77,9 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitG defer c.queue.ShutDown() defer wg.Done() wg.Add(1) - glog.Infof(`Starting WormholeGenerator with %d workers`, threadiness) + c.Logger.Log( + "msg", "starting WormholeGenerator", + "threadiness", threadiness) for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) @@ -84,7 +90,9 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}, wg *sync.WaitG for { select { case <-ticker.C: - glog.V(5).Infof("Running periodic recheck. Queuing all known nodes...") + c.Logger.Log( + "msg", "Running periodic recheck. Queuing all known nodes...", + "v", 5) for key, _ := range c.store { c.queue.Add(key) } @@ -124,7 +132,10 @@ func (c *Controller) handleErr(err error, key interface{}) { c.queue.Forget(key) return } - glog.Errorf("Requeuing %v: %v", key, err) + c.Logger.Log( + "msg", "requeuing because of error", + "key", key, + "err", err) // This controller retries 5 times if something goes wrong. After that, it stops trying. if c.queue.NumRequeues(key) < 5 { @@ -134,7 +145,9 @@ func (c *Controller) handleErr(err error, key interface{}) { return } - glog.Infof("Dropping %v. Too many errors", key) + c.Logger.Log( + "msg", "dropping because of too many error", + "key", key) c.queue.Forget(key) } @@ -154,7 +167,9 @@ func (c *Controller) reconcile(key string) error { func (c *Controller) addNode(key string, node *v1.Node) error { identifier := fmt.Sprintf("system:node:%v", node.GetName()) - glog.Infof("Adding tunnel routes for node %v", identifier) + c.Logger.Log( + "msg", "adding tunnel routes", + "node", identifier) podCIDR := node.Spec.PodCIDR @@ -195,13 +210,22 @@ func (c *Controller) redoIPTablesSpratz() error { table := iptables.TableNAT if _, err := c.iptables.EnsureChain(table, KUBERNIKUS_TUNNELS); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", table, KUBERNIKUS_TUNNELS, err) + c.Logger.Log( + "msg", "failed to ensure that chain exists", + "table", table, + "chain", KUBERNIKUS_TUNNELS, + "err", err) return err } args := []string{"-m", "comment", "--comment", "kubernikus tunnels", "-j", string(KUBERNIKUS_TUNNELS)} if _, err := c.iptables.EnsureRule(iptables.Append, table, iptables.ChainOutput, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", table, iptables.ChainOutput, KUBERNIKUS_TUNNELS, err) + c.Logger.Log( + "msg", "failed to ensure jump", + "table", table, + "target", iptables.ChainOutput, + "chain", KUBERNIKUS_TUNNELS, + "err", err) return err } @@ -209,7 +233,9 @@ func (c *Controller) redoIPTablesSpratz() error { existingNatChains := make(map[iptables.Chain]string) err := c.iptables.SaveInto(table, iptablesSaveRaw) if err != nil { - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + c.Logger.Log( + "msg", "failed to execute iptables-save, syncing all rules", + "err", err) } else { existingNatChains = iptables.GetChainLines(table, iptablesSaveRaw.Bytes()) } @@ -242,10 +268,16 @@ func (c *Controller) redoIPTablesSpratz() error { writeLine(natRules, "COMMIT") lines := append(natChains.Bytes(), natRules.Bytes()...) - glog.V(6).Infof("Restoring iptables rules: %s", lines) + c.Logger.Log( + "msg", "Restoring iptables rules", + "rules", lines, + "v", 6) + err = c.iptables.RestoreAll(lines, iptables.NoFlushTables, iptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v", err) + c.Logger.Log( + "msg", "Failed to execute iptables-restore", + "err", err) return err } diff --git a/pkg/wormhole/server/tunnel.go b/pkg/wormhole/server/tunnel.go index c962f9a1fc..7397f54af4 100644 --- a/pkg/wormhole/server/tunnel.go +++ b/pkg/wormhole/server/tunnel.go @@ -9,20 +9,26 @@ import ( "sync" "github.com/databus23/guttle" - "github.com/golang/glog" + "github.com/go-kit/kit/log" ) type Tunnel struct { Server *guttle.Server + + Logger log.Logger } type TunnelOptions struct { ClientCA string Certificate string PrivateKey string + + Logger log.Logger } func NewTunnel(options *TunnelOptions) (*Tunnel, error) { + logger := log.With(options.Logger, "wormhole", "tunnel") + var listener net.Listener if options.Certificate != "" { tlsConfig, err := newTLSConfig(options.Certificate, options.PrivateKey) @@ -46,7 +52,9 @@ func NewTunnel(options *TunnelOptions) (*Tunnel, error) { return nil, fmt.Errorf("Failed to listen to 127.0.0.1:8080: %s", err) } } - glog.Infof("Listening for tunnel clients on %s", listener.Addr()) + logger.Log( + "msg", "Listening for tunnel clients", + "addr", listener.Addr()) opts := guttle.ServerOptions{ Listener: listener, @@ -54,7 +62,7 @@ func NewTunnel(options *TunnelOptions) (*Tunnel, error) { ProxyFunc: guttle.StaticProxy("127.0.0.1:6443"), } - return &Tunnel{Server: guttle.NewServer(&opts)}, nil + return &Tunnel{Server: guttle.NewServer(&opts), Logger: logger}, nil } func (t *Tunnel) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { @@ -68,7 +76,7 @@ func (t *Tunnel) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { err := t.Server.Start() if err != nil { - glog.Error(err) + t.Logger.Log("err", err) } }