-
Notifications
You must be signed in to change notification settings - Fork 145
/
Copy pathdashboards_controller.go
376 lines (333 loc) · 16.3 KB
/
dashboards_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
// Copyright (c) 2024 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dashboards
import (
"context"
"fmt"
"net/url"
esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1"
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
"github.com/tigera/operator/pkg/common"
"github.com/tigera/operator/pkg/controller/certificatemanager"
"github.com/tigera/operator/pkg/controller/utils/imageset"
"github.com/tigera/operator/pkg/render/logstorage"
"github.com/tigera/operator/pkg/render/logstorage/dashboards"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
operatorv1 "github.com/tigera/operator/api/v1"
"github.com/tigera/operator/pkg/controller/options"
"github.com/tigera/operator/pkg/controller/status"
"github.com/tigera/operator/pkg/controller/utils"
"github.com/tigera/operator/pkg/render"
"github.com/tigera/operator/pkg/render/common/networkpolicy"
"github.com/tigera/operator/pkg/tls/certificatemanagement"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller_logstorage_dashboards")
const (
tigeraStatusName = "log-storage-dashboards"
)
type DashboardsSubController struct {
client client.Client
scheme *runtime.Scheme
status status.StatusManager
provider operatorv1.Provider
clusterDomain string
usePSP bool
multiTenant bool
elasticExternal bool
tierWatchReady *utils.ReadyFlag
}
func Add(mgr manager.Manager, opts options.AddOptions) error {
if !opts.EnterpriseCRDExists {
return nil
}
r := &DashboardsSubController{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
status: status.New(mgr.GetClient(), tigeraStatusName, opts.KubernetesVersion),
clusterDomain: opts.ClusterDomain,
provider: opts.DetectedProvider,
tierWatchReady: &utils.ReadyFlag{},
}
r.status.Run(opts.ShutdownContext)
// Create a controller using the reconciler and register it with the manager to receive reconcile calls.
c, err := controller.New("log-storage-dashboards-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Determine how to handle watch events for cluster-scoped resources. For multi-tenant clusters,
// we should update all tenants whenever one changes. For single-tenant clusters, we can just queue the object.
var eventHandler handler.EventHandler = &handler.EnqueueRequestForObject{}
if opts.MultiTenant {
eventHandler = utils.EnqueueAllTenants(mgr.GetClient())
}
// Configure watches for operator.tigera.io APIs this controller cares about.
if err = c.Watch(&source.Kind{Type: &operatorv1.LogStorage{}}, eventHandler); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch LogStorage resource: %w", err)
}
if err = c.Watch(&source.Kind{Type: &operatorv1.Installation{}}, eventHandler); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch Installation resource: %w", err)
}
if err = c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, eventHandler); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch ManagementClusterConnection resource: %w", err)
}
if err = utils.AddTigeraStatusWatch(c, "log-storage-dashboards"); err != nil {
return fmt.Errorf("logstorage-dashboards-controller failed to watch logstorage Tigerastatus: %w", err)
}
if opts.MultiTenant {
if err = c.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch Tenant resource: %w", err)
}
}
// The namespace(s) we need to monitor depend upon what tenancy mode we're running in.
// For single-tenant, everything is installed in the tigera-manager namespace.
// Make a helper for determining which namespaces to use based on tenancy mode.
helper := utils.NewNamespaceHelper(opts.MultiTenant, render.ElasticsearchNamespace, "")
// Watch secrets this controller cares about.
secretsToWatch := []string{
dashboards.ElasticCredentialsSecret,
}
// Determine namespaces to watch.
namespacesToWatch := []string{helper.TruthNamespace(), helper.InstallNamespace()}
if helper.TruthNamespace() == helper.InstallNamespace() {
namespacesToWatch = []string{helper.InstallNamespace()}
}
for _, ns := range namespacesToWatch {
for _, name := range secretsToWatch {
if err := utils.AddSecretsWatch(c, name, ns); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch Secret: %w", err)
}
}
}
// Catch if something modifies the resources that this controller consumes.
if err := utils.AddServiceWatch(c, render.KibanaServiceName, helper.InstallNamespace()); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err)
}
if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), &handler.EnqueueRequestForObject{}); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err)
}
// Check if something modifies resources this controller creates.
err = c.Watch(&source.Kind{Type: &batchv1.Job{ObjectMeta: metav1.ObjectMeta{
Namespace: helper.InstallNamespace(),
Name: dashboards.Name,
}}}, &handler.EnqueueRequestForObject{})
if err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch installer job: %v", err)
}
k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to establish a connection to k8s: %w", err)
}
go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, c, k8sClient, log, r.tierWatchReady)
go utils.WaitToAddNetworkPolicyWatches(c, k8sClient, log, []types.NamespacedName{
{Name: dashboards.PolicyName, Namespace: helper.InstallNamespace()},
})
// Perform periodic reconciliation. This acts as a backstop to catch reconcile issues,
// and also makes sure we spot when things change that might not trigger a reconciliation.
err = utils.AddPeriodicReconcile(c, utils.PeriodicReconcileTime, eventHandler)
if err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to create periodic reconcile watch: %w", err)
}
return nil
}
func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
helper := utils.NewNamespaceHelper(d.multiTenant, render.ElasticsearchNamespace, request.Namespace)
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name, "installNS", helper.InstallNamespace(), "truthNS", helper.TruthNamespace())
reqLogger.Info("Reconciling LogStorage - Dashboards")
// We skip requests without a namespace specified in multi-tenant setups.
if d.multiTenant && request.Namespace == "" {
return reconcile.Result{}, nil
}
// When running in multi-tenant mode, we need to install Dashboards in tenant Namespaces.
// We use the tenant API to determine the set of namespaces that should have a K8S job that installs dashboards.
tenant, _, err := utils.GetTenant(ctx, d.multiTenant, d.client, request.Namespace)
if errors.IsNotFound(err) {
reqLogger.Info("No Tenant in this Namespace, skip")
return reconcile.Result{}, nil
} else if err != nil {
d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Tenant", err, reqLogger)
return reconcile.Result{}, err
}
// Get Installation resource.
variant, install, err := utils.GetInstallation(context.Background(), d.client)
if err != nil {
if errors.IsNotFound(err) {
d.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger)
return reconcile.Result{}, err
}
d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Installation", err, reqLogger)
return reconcile.Result{}, err
}
// Validate that the tier watch is ready before querying the tier to ensure we utilize the cache.
if !d.tierWatchReady.IsReady() {
d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger)
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
}
// Ensure the allow-tigera tier exists, before rendering any network policies within it.
if err := d.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil {
if errors.IsNotFound(err) {
d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created", err, reqLogger)
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
} else {
d.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger)
return reconcile.Result{}, err
}
}
managementClusterConnection, err := utils.GetManagementClusterConnection(ctx, d.client)
if err != nil {
d.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementClusterConnection", err, reqLogger)
return reconcile.Result{}, err
}
if managementClusterConnection != nil {
// Dashboard job installer is only relevant for management and standalone clusters. If this is a managed cluster, we can
// simply return early.
reqLogger.V(1).Info("Not installing dashboard job installer on managed cluster")
return reconcile.Result{}, nil
}
pullSecrets, err := utils.GetNetworkingPullSecrets(install, d.client)
if err != nil {
d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurring while retrieving the pull secrets", err, reqLogger)
return reconcile.Result{}, err
}
// Get LogStorage resource.
logStorage := &operatorv1.LogStorage{}
key := utils.DefaultTSEEInstanceKey
err = d.client.Get(ctx, key, logStorage)
if err != nil {
if errors.IsNotFound(err) {
d.status.OnCRNotFound()
return reconcile.Result{}, nil
}
d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying LogStorage", err, reqLogger)
return reconcile.Result{}, err
}
// Determine where to access Kibana.
kibanaHost := "tigera-secure-kb-http.tigera-kibana.svc"
kibanaPort := "5601"
kibanaScheme := "https"
var externalKibanaSecret *corev1.Secret
if !d.elasticExternal {
// Wait for Elasticsearch to be installed and available.
elasticsearch, err := utils.GetElasticsearch(ctx, d.client)
if err != nil {
d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Elasticsearch", err, reqLogger)
return reconcile.Result{}, err
}
if elasticsearch == nil || elasticsearch.Status.Phase != esv1.ElasticsearchReadyPhase {
d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", nil, reqLogger)
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
}
} else {
// If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint.
if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" {
reqLogger.Error(nil, "Kibana URL must be specified for this tenant")
d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL must be specified for this tenant", nil, reqLogger)
return reconcile.Result{}, nil
}
// Determine the host and port from the URL.
url, err := url.Parse(tenant.Spec.Elastic.KibanaURL)
if err != nil {
reqLogger.Error(err, "Kibana URL is invalid")
d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL is invalid", err, reqLogger)
return reconcile.Result{}, nil
}
kibanaScheme = url.Scheme
kibanaHost = url.Hostname()
kibanaPort = url.Port()
if tenant.ElasticMTLS() {
// If mTLS is enabled, get the secret containing the CA and client certificate.
externalKibanaSecret = &corev1.Secret{}
err = d.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, externalKibanaSecret)
if err != nil {
reqLogger.Error(err, "Failed to read external Kibana client certificate secret")
d.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Kibana client certificate secret to be available", err, reqLogger)
return reconcile.Result{}, err
}
}
}
// Query the username and password this Dashboards Installer instance should use to authenticate with Elasticsearch.
// ALINA : To check the statement below
// For multi-tenant systems, credentials are created by the elasticsearch users controller.
// For single-tenant system, these are created by es-kube-controllers.
key = types.NamespacedName{Name: dashboards.ElasticCredentialsSecret, Namespace: helper.InstallNamespace()}
credentials := corev1.Secret{}
if err = d.client.Get(ctx, key, &credentials); err != nil && !errors.IsNotFound(err) {
d.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Error getting Secret %s", key), err, reqLogger)
return reconcile.Result{}, err
} else if errors.IsNotFound(err) {
d.status.SetDegraded(operatorv1.ResourceNotFound, fmt.Sprintf("Waiting for Dashboards credential Secret %s", key), err, reqLogger)
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
}
// Collect the certificates we need to provision Dashboards. These will have been provisioned already by the ES secrets controller.
opts := []certificatemanager.Option{
certificatemanager.WithLogger(reqLogger),
certificatemanager.WithTenant(tenant),
}
cm, err := certificatemanager.Create(d.client, install, d.clusterDomain, helper.TruthNamespace(), opts...)
if err != nil {
d.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger)
return reconcile.Result{}, err
}
// Query the trusted bundle from the namespace.
trustedBundle, err := cm.LoadTrustedBundle(ctx, d.client, helper.InstallNamespace())
if err != nil {
d.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger)
return reconcile.Result{}, err
}
cfg := &dashboards.Config{
Installation: install,
PullSecrets: pullSecrets,
Namespace: helper.InstallNamespace(),
TrustedBundle: trustedBundle,
ClusterDomain: d.clusterDomain,
UsePSP: d.usePSP,
IsManaged: managementClusterConnection != nil,
Tenant: tenant,
KibanaHost: kibanaHost,
KibanaScheme: kibanaScheme,
KibanaPort: kibanaPort,
ExternalKibanaClientSecret: externalKibanaSecret,
Credentials: []*corev1.Secret{&credentials},
}
dashboardsComponent := dashboards.Dashboards(cfg)
if err := imageset.ApplyImageSet(ctx, d.client, variant, dashboardsComponent); err != nil {
d.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger)
return reconcile.Result{}, err
}
// In standard installs, the LogStorage owns the dashboards. For multi-tenant, it's owned by the Tenant instance.
var hdler utils.ComponentHandler
if d.multiTenant {
hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, tenant)
} else {
hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, logStorage)
}
if err := hdler.CreateOrUpdateOrDelete(ctx, dashboardsComponent, d.status); err != nil {
d.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating / deleting resource", err, reqLogger)
return reconcile.Result{}, err
}
d.status.ReadyToMonitor()
d.status.ClearDegraded()
return reconcile.Result{}, nil
}