Skip to content

Commit

Permalink
Refactor for writing status and config on connection
Browse files Browse the repository at this point in the history
  • Loading branch information
sjberman committed Jan 16, 2025
1 parent 41103c8 commit 0c70f9b
Show file tree
Hide file tree
Showing 35 changed files with 1,048 additions and 535 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
# readOnlyRootFilesystem: true
readOnlyRootFilesystem: true
runAsUser: 101
runAsGroup: 1001
volumeMounts:
Expand Down
1 change: 1 addition & 0 deletions deploy/aws-nlb/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/azure/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/default/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/experimental-nginx-plus/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/experimental/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/nginx-plus/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/nodeport/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/openshift/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/snippets-filters-nginx-plus/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
1 change: 1 addition & 0 deletions deploy/snippets-filters/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,7 @@ spec:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsUser: 101
seccompProfile:
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ require (
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2
github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4
github.com/nginx/agent/v3 v3.0.0-20250116154650-db82ebd210da
github.com/nginxinc/telemetry-exporter v0.1.2
github.com/onsi/ginkgo/v2 v2.22.1
github.com/onsi/gomega v1.36.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4 h1:Tn0SOlxq9uaJuqc6DUGZGYszrtHAHOaLnhbBWMzK1Bs=
github.com/nginx/agent/v3 v3.0.0-20241220140549-28adb688a8b4/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8=
github.com/nginx/agent/v3 v3.0.0-20250116154650-db82ebd210da h1:O9jpWnCGO0peM5km5GQ4yDXQZeer5hHFvc1ND/MoWUk=
github.com/nginx/agent/v3 v3.0.0-20250116154650-db82ebd210da/go.mod h1:HDi/Je5AKCe5by/hWs2jbzUqi3BN4K32hMD2/hWN5G8=
github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1 h1:5VVK38bnELMDWnwfF6dSv57ResXh9AUzeDa72ENj94o=
github.com/nginxinc/nginx-plus-go-client/v2 v2.0.1/go.mod h1:He+1izxYxVVO5/C9ZTukwOpvkAx5eS19nRQgKXDhX5I=
github.com/nginxinc/telemetry-exporter v0.1.2 h1:97vUGhQYgQ2KEsXKCBmr5gqfuujJCKPHwdg5HKoANUs=
Expand Down
124 changes: 77 additions & 47 deletions internal/mode/static/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package static

import (
"context"
"errors"
"fmt"
"sync"
"time"
Expand Down Expand Up @@ -35,6 +36,7 @@ type handlerMetricsCollector interface {

// eventHandlerConfig holds configuration parameters for eventHandlerImpl.
type eventHandlerConfig struct {
ctx context.Context
// nginxUpdater updates nginx configuration using the NGINX agent.
nginxUpdater agent.NginxUpdater
// metricsCollector collects metrics for this controller.
Expand All @@ -59,6 +61,12 @@ type eventHandlerConfig struct {
deployCtxCollector licensing.Collector
// graphBuiltHealthChecker sets the health of the Pod to Ready once we've built our initial graph.
graphBuiltHealthChecker *graphBuiltHealthChecker
// statusQueue contains updates when the handler should write statuses.
statusQueue *status.Queue
// nginxDeployments contains a map of all nginx Deployments, and data about them.
nginxDeployments *agent.DeploymentStore
// logger is the logger for the event handler.
logger logr.Logger
// gatewayPodConfig contains information about this Pod.
gatewayPodConfig ngfConfig.GatewayPodConfig
// controlConfigNSName is the NamespacedName of the NginxGateway config for this controller.
Expand Down Expand Up @@ -102,7 +110,7 @@ type eventHandlerImpl struct {
// objectFilters contains all created objectFilters, with the key being a filterKey
objectFilters map[filterKey]objectFilter

latestReloadResult status.NginxReloadResult
// latestReloadResult status.NginxReloadResult

cfg eventHandlerConfig
lock sync.Mutex
Expand Down Expand Up @@ -137,6 +145,8 @@ func newEventHandlerImpl(cfg eventHandlerConfig) *eventHandlerImpl {
},
}

go handler.waitForStatusUpdates(cfg.ctx)

return handler
}

Expand Down Expand Up @@ -164,10 +174,22 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log
h.cfg.graphBuiltHealthChecker.setAsReady()
}

// TODO(sberman): hardcode this deployment name until we support provisioning data planes
// If no deployments exist, we should just return without doing anything.
deploymentName := types.NamespacedName{
Name: "tmp-nginx-deployment",
Namespace: h.cfg.gatewayPodConfig.Namespace,
}

// TODO(sberman): if nginx Deployment is scaled down, we should remove the pod from the ConnectionsTracker
// and Deployment.
// If fully deleted, then delete the deployment from the Store
var err error
var configApplied bool
deployment := h.cfg.nginxDeployments.GetOrStore(ctx, deploymentName)
if deployment == nil {
panic("expected deployment, got nil")
}

switch changeType {
case state.NoChange:
logger.Info("Handling events didn't result into NGINX configuration changes")
Expand All @@ -183,16 +205,13 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log

h.setLatestConfiguration(&cfg)

deployment.Lock.Lock()
if h.cfg.plus {
// TODO(sberman): hardcode this deployment name until we support provisioning data planes
deployment := types.NamespacedName{
Name: "tmp-nginx-deployment",
Namespace: h.cfg.gatewayPodConfig.Namespace,
}
configApplied, err = h.cfg.nginxUpdater.UpdateUpstreamServers(ctx, deployment, cfg)
configApplied = h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, cfg)
} else {
configApplied, err = h.updateNginxConf(ctx, cfg)
configApplied = h.updateNginxConf(deployment, cfg)
}
deployment.Lock.Unlock()
case state.ClusterStateChange:
h.version++
cfg := dataplane.BuildConfiguration(ctx, gr, h.cfg.serviceResolver, h.version)
Expand All @@ -204,31 +223,53 @@ func (h *eventHandlerImpl) HandleEventBatch(ctx context.Context, logger logr.Log

h.setLatestConfiguration(&cfg)

configApplied, err = h.updateNginxConf(ctx, cfg)
deployment.Lock.Lock()
configApplied = h.updateNginxConf(deployment, cfg)
deployment.Lock.Unlock()
}

var nginxReloadRes status.NginxReloadResult
switch {
case err != nil:
logger.Error(err, "Failed to update NGINX configuration")
nginxReloadRes.Error = err
case configApplied:
logger.Info("NGINX configuration was successfully updated")
default:
logger.Info("No NGINX instances to configure")
configErr := deployment.GetLatestConfigError()
upstreamErr := deployment.GetLatestUpstreamError()
err := errors.Join(configErr, upstreamErr)

if configApplied || err != nil {
obj := &status.QueueObject{
Error: err,
Deployment: deploymentName,
}
h.cfg.statusQueue.Enqueue(obj)
}
}

h.latestReloadResult = nginxReloadRes
func (h *eventHandlerImpl) waitForStatusUpdates(ctx context.Context) {
for {
item := h.cfg.statusQueue.Dequeue(ctx)
if item == nil {
return
}

if configApplied || err != nil {
h.updateStatuses(ctx, logger, gr)
var nginxReloadRes graph.NginxReloadResult
switch {
case item.Error != nil:
h.cfg.logger.Error(item.Error, "Failed to update NGINX configuration")
nginxReloadRes.Error = item.Error
default:
h.cfg.logger.Info("NGINX configuration was successfully updated")
}

// TODO(sberman): once we support multiple Gateways, we'll have to get
// the correct Graph for the Deployment contained in the update message
gr := h.cfg.processor.GetLatestGraph()
gr.LatestReloadResult = nginxReloadRes

h.updateStatuses(ctx, gr)
}
}

func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logger, gr *graph.Graph) {
func (h *eventHandlerImpl) updateStatuses(ctx context.Context, gr *graph.Graph) {
gwAddresses, err := getGatewayAddresses(ctx, h.cfg.k8sClient, nil, h.cfg.gatewayPodConfig)
if err != nil {
logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address")
h.cfg.logger.Error(err, "Setting GatewayStatusAddress to Pod IP Address")
}

transitionTime := metav1.Now()
Expand All @@ -241,7 +282,7 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logge
gr.L4Routes,
gr.Routes,
transitionTime,
h.latestReloadResult,
gr.LatestReloadResult,
h.cfg.gatewayCtlrName,
)

Expand Down Expand Up @@ -273,7 +314,7 @@ func (h *eventHandlerImpl) updateStatuses(ctx context.Context, logger logr.Logge
gr.IgnoredGateways,
transitionTime,
gwAddresses,
h.latestReloadResult,
gr.LatestReloadResult,
)
h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gwReqs...)
}
Expand Down Expand Up @@ -308,30 +349,19 @@ func (h *eventHandlerImpl) parseAndCaptureEvent(ctx context.Context, logger logr
}

// updateNginxConf updates nginx conf files and reloads nginx.
func (h *eventHandlerImpl) updateNginxConf(ctx context.Context, conf dataplane.Configuration) (bool, error) {
func (h *eventHandlerImpl) updateNginxConf(
deployment *agent.Deployment,
conf dataplane.Configuration,
) bool {
files := h.cfg.generator.Generate(conf)

// TODO(sberman): hardcode this deployment name until we support provisioning data planes
deployment := types.NamespacedName{
Name: "tmp-nginx-deployment",
Namespace: h.cfg.gatewayPodConfig.Namespace,
}

applied, err := h.cfg.nginxUpdater.UpdateConfig(ctx, deployment, files)
if err != nil {
return false, err
}
applied := h.cfg.nginxUpdater.UpdateConfig(deployment, files)

// If using NGINX Plus, update upstream servers using the API.
var plusApplied bool
if h.cfg.plus && applied {
plusApplied, err = h.cfg.nginxUpdater.UpdateUpstreamServers(ctx, deployment, conf)
if err != nil {
return false, err
}
if h.cfg.plus {
h.cfg.nginxUpdater.UpdateUpstreamServers(deployment, conf)
}

return applied || plusApplied, nil
return applied
}

// updateControlPlaneAndSetStatus updates the control plane configuration and then sets the status
Expand Down Expand Up @@ -508,7 +538,7 @@ func (h *eventHandlerImpl) nginxGatewayServiceUpsert(ctx context.Context, logger
gr.IgnoredGateways,
transitionTime,
gwAddresses,
h.latestReloadResult,
gr.LatestReloadResult,
)
h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...)
}
Expand All @@ -534,7 +564,7 @@ func (h *eventHandlerImpl) nginxGatewayServiceDelete(
gr.IgnoredGateways,
transitionTime,
gwAddresses,
h.latestReloadResult,
gr.LatestReloadResult,
)
h.cfg.statusUpdater.UpdateGroup(ctx, groupGateways, gatewayStatuses...)
}
Loading

0 comments on commit 0c70f9b

Please sign in to comment.