Skip to content

Commit 54fabed

Browse files
authored
chore: various changes + fixes (#50)
- change FeatureFlags embed FeatureFlag than string type - move generic function getOperatorNamespace into utils - fix DistributionType Name and Image can only be set one of them - move get port value in to generic function GetServicePort - remove networkpolicy ingress which matches everything - allow ContainerSpec to be empty which falls to use zero-value - add extra label "app.kubernetes.io/instance" on deployment/pod for service to work if multiple CR gets created - change from types.NamespacedName{} to client.ObjectKeyFromObject() Approved-by: leseb Approved-by: VaishnaviHire
1 parent ed504fd commit 54fabed

File tree

10 files changed

+155
-121
lines changed

10 files changed

+155
-121
lines changed

api/v1alpha1/llamastackdistribution_types.go

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/llamastack.io_llamastackdistributions.yaml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ spec:
243243
type: object
244244
x-kubernetes-validations:
245245
- message: Only one of name or image can be specified
246-
rule: has(self.name) != has(self.image)
246+
rule: '!(has(self.name) && has(self.image))'
247247
podOverrides:
248248
description: PodOverrides allows advanced pod-level customization.
249249
properties:
@@ -1978,7 +1978,6 @@ spec:
19781978
x-kubernetes-int-or-string: true
19791979
type: object
19801980
required:
1981-
- containerSpec
19821981
- distribution
19831982
type: object
19841983
required:

controllers/llamastackdistribution_controller.go

Lines changed: 91 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"io"
2424
"net/http"
2525
"net/url"
26-
"strings"
2726
"sync"
2827
"time"
2928

@@ -47,6 +46,10 @@ import (
4746
"sigs.k8s.io/controller-runtime/pkg/log"
4847
)
4948

49+
const (
50+
operatorConfigData = "llama-stack-operator-config"
51+
)
52+
5053
// LlamaStackDistributionReconciler reconciles a LlamaStack object.
5154
type LlamaStackDistributionReconciler struct {
5255
client.Client
@@ -74,9 +77,8 @@ func (r *LlamaStackDistributionReconciler) Reconcile(ctx context.Context, req ct
7477
if err != nil {
7578
return ctrl.Result{}, err
7679
}
77-
78-
// Instance not found - skip reconciliation
7980
if instance == nil {
81+
r.Log.Info("LlamaStackDistribution resource not found, skipping reconciliation", "namespacedName", req.NamespacedName)
8082
return ctrl.Result{}, nil
8183
}
8284

@@ -182,7 +184,7 @@ func (r *LlamaStackDistributionReconciler) reconcilePVC(ctx context.Context, ins
182184
}
183185

184186
found := &corev1.PersistentVolumeClaim{}
185-
err := r.Get(ctx, types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, found)
187+
err := r.Get(ctx, client.ObjectKeyFromObject(pvc), found)
186188
if err != nil {
187189
if k8serrors.IsNotFound(err) {
188190
logger.Info("Creating PVC", "pvc", pvc.Name)
@@ -224,11 +226,17 @@ func (r *LlamaStackDistributionReconciler) reconcileDeployment(ctx context.Conte
224226
Spec: appsv1.DeploymentSpec{
225227
Replicas: &instance.Spec.Replicas,
226228
Selector: &metav1.LabelSelector{
227-
MatchLabels: map[string]string{llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue},
229+
MatchLabels: map[string]string{
230+
llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue,
231+
"app.kubernetes.io/instance": instance.Name,
232+
},
228233
},
229234
Template: corev1.PodTemplateSpec{
230235
ObjectMeta: metav1.ObjectMeta{
231-
Labels: map[string]string{llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue},
236+
Labels: map[string]string{
237+
llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue,
238+
"app.kubernetes.io/instance": instance.Name,
239+
},
232240
},
233241
Spec: podSpec,
234242
},
@@ -241,18 +249,18 @@ func (r *LlamaStackDistributionReconciler) reconcileDeployment(ctx context.Conte
241249
// reconcileService manages the Service if ports are defined.
242250
func (r *LlamaStackDistributionReconciler) reconcileService(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error {
243251
// Use the container's port (defaulted to 8321 if unset)
244-
port := instance.Spec.Server.ContainerSpec.Port
245-
if port == 0 {
246-
port = llamav1alpha1.DefaultServerPort
247-
}
252+
port := deploy.GetServicePort(instance)
248253

249254
service := &corev1.Service{
250255
ObjectMeta: metav1.ObjectMeta{
251-
Name: instance.Name + "-service",
256+
Name: deploy.GetServiceName(instance),
252257
Namespace: instance.Namespace,
253258
},
254259
Spec: corev1.ServiceSpec{
255-
Selector: map[string]string{llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue},
260+
Selector: map[string]string{
261+
llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue,
262+
"app.kubernetes.io/instance": instance.Name,
263+
},
256264
Ports: []corev1.ServicePort{{
257265
Name: llamav1alpha1.DefaultServicePortName,
258266
Port: port,
@@ -269,11 +277,8 @@ func (r *LlamaStackDistributionReconciler) reconcileService(ctx context.Context,
269277

270278
// getServerURL returns the URL for the LlamaStack server.
271279
func (r *LlamaStackDistributionReconciler) getServerURL(instance *llamav1alpha1.LlamaStackDistribution, path string) *url.URL {
272-
serviceName := fmt.Sprintf("%s-service", instance.Name)
273-
port := instance.Spec.Server.ContainerSpec.Port
274-
if port == 0 {
275-
port = llamav1alpha1.DefaultServerPort
276-
}
280+
serviceName := deploy.GetServiceName(instance)
281+
port := deploy.GetServicePort(instance)
277282

278283
return &url.URL{
279284
Scheme: "http",
@@ -342,7 +347,7 @@ func (r *LlamaStackDistributionReconciler) getProviderInfo(ctx context.Context,
342347
return response.Data, nil
343348
}
344349

345-
// updateStatus refreshes the LlamaStack status.
350+
// updateStatus refreshes the LlamaStackDistribution status.
346351
func (r *LlamaStackDistributionReconciler) updateStatus(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error {
347352
deployment := &appsv1.Deployment{}
348353
err := r.Get(ctx, types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, deployment)
@@ -441,11 +446,7 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co
441446
return deploy.HandleDisabledNetworkPolicy(ctx, r.Client, networkPolicy, r.Log)
442447
}
443448

444-
// Use the container's port (defaulted to 8321 if unset)
445-
port := instance.Spec.Server.ContainerSpec.Port
446-
if port == 0 {
447-
port = llamav1alpha1.DefaultServerPort
448-
}
449+
port := deploy.GetServicePort(instance)
449450

450451
// get operator namespace
451452
operatorNamespace, err := deploy.GetOperatorNamespace()
@@ -457,6 +458,7 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co
457458
PodSelector: metav1.LabelSelector{
458459
MatchLabels: map[string]string{
459460
llamav1alpha1.DefaultLabelKey: llamav1alpha1.DefaultLabelValue,
461+
"app.kubernetes.io/instance": instance.Name,
460462
},
461463
},
462464
PolicyTypes: []networkingv1.PolicyType{
@@ -465,7 +467,7 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co
465467
Ingress: []networkingv1.NetworkPolicyIngressRule{
466468
{
467469
From: []networkingv1.NetworkPolicyPeer{
468-
{
470+
{ // to match all pods in all namespaces
469471
PodSelector: &metav1.LabelSelector{
470472
MatchLabels: map[string]string{
471473
"app.kubernetes.io/part-of": llamav1alpha1.DefaultContainerName,
@@ -485,8 +487,8 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co
485487
},
486488
{
487489
From: []networkingv1.NetworkPolicyPeer{
488-
{
489-
PodSelector: &metav1.LabelSelector{}, // Empty podSelector to match all pods
490+
{ // to match all pods in matched namespace
491+
PodSelector: &metav1.LabelSelector{},
490492
NamespaceSelector: &metav1.LabelSelector{
491493
MatchLabels: map[string]string{
492494
"kubernetes.io/metadata.name": operatorNamespace,
@@ -503,27 +505,53 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co
503505
},
504506
},
505507
},
506-
{
507-
From: []networkingv1.NetworkPolicyPeer{
508-
{
509-
PodSelector: &metav1.LabelSelector{}, // Empty podSelector to match all pods
510-
},
511-
},
512-
Ports: []networkingv1.NetworkPolicyPort{
513-
{
514-
Protocol: (*corev1.Protocol)(ptr.To("TCP")),
515-
Port: &intstr.IntOrString{
516-
IntVal: port,
517-
},
518-
},
519-
},
520-
},
521508
},
522509
}
523510

524511
return deploy.ApplyNetworkPolicy(ctx, r.Client, r.Scheme, instance, networkPolicy, r.Log)
525512
}
526513

514+
// createDefaultConfigMap creates a ConfigMap with default feature flag values.
515+
func createDefaultConfigMap(configMapName types.NamespacedName) (*corev1.ConfigMap, error) {
516+
featureFlags := featureflags.FeatureFlags{
517+
EnableNetworkPolicy: featureflags.FeatureFlag{
518+
Enabled: featureflags.NetworkPolicyDefaultValue,
519+
},
520+
}
521+
522+
featureFlagsYAML, err := yaml.Marshal(featureFlags)
523+
if err != nil {
524+
return nil, fmt.Errorf("failed to marshal default feature flags: %w", err)
525+
}
526+
527+
return &corev1.ConfigMap{
528+
ObjectMeta: metav1.ObjectMeta{
529+
Name: configMapName.Name,
530+
Namespace: configMapName.Namespace,
531+
},
532+
Data: map[string]string{
533+
featureflags.FeatureFlagsKey: string(featureFlagsYAML),
534+
},
535+
}, nil
536+
}
537+
538+
// parseFeatureFlags extracts and parses feature flags from ConfigMap data.
539+
func parseFeatureFlags(configMapData map[string]string) (bool, error) {
540+
enableNetworkPolicy := featureflags.NetworkPolicyDefaultValue
541+
542+
featureFlagsYAML, exists := configMapData[featureflags.FeatureFlagsKey]
543+
if !exists {
544+
return enableNetworkPolicy, nil
545+
}
546+
547+
var flags featureflags.FeatureFlags
548+
if err := yaml.Unmarshal([]byte(featureFlagsYAML), &flags); err != nil {
549+
return false, fmt.Errorf("failed to parse feature flags: %w", err)
550+
}
551+
552+
return flags.EnableNetworkPolicy.Enabled, nil
553+
}
554+
527555
// NewLlamaStackDistributionReconciler creates a new reconciler with default image mappings.
528556
func NewLlamaStackDistributionReconciler(ctx context.Context, client client.Client, scheme *runtime.Scheme,
529557
clusterInfo *cluster.ClusterInfo) (*LlamaStackDistributionReconciler, error) {
@@ -534,44 +562,36 @@ func NewLlamaStackDistributionReconciler(ctx context.Context, client client.Clie
534562
return nil, fmt.Errorf("failed to get operator namespace: %w", err)
535563
}
536564

537-
// Read the ConfigMap
565+
// Get the ConfigMap
566+
// If the ConfigMap doesn't exist, create it with default feature flags
567+
// If the ConfigMap exists, parse the feature flags from the Configmap
538568
configMap := &corev1.ConfigMap{}
539569
configMapName := types.NamespacedName{
540-
Name: "llama-stack-operator-config",
570+
Name: operatorConfigData,
541571
Namespace: operatorNamespace,
542572
}
543-
err = client.Get(ctx, configMapName, configMap)
544-
if err != nil {
545-
if k8serrors.IsNotFound(err) {
546-
// Create the ConfigMap if it doesn't exist
547-
configMap = &corev1.ConfigMap{
548-
ObjectMeta: metav1.ObjectMeta{
549-
Name: configMapName.Name,
550-
Namespace: configMapName.Namespace,
551-
},
552-
Data: map[string]string{
553-
featureflags.FeatureFlagsKey: fmt.Sprintf("%s: %v",
554-
featureflags.EnableNetworkPolicyKey, featureflags.NetworkPolicyDefaultValue),
555-
},
556-
}
557-
if err = client.Create(ctx, configMap); err != nil {
558-
return nil, fmt.Errorf("failed to create ConfigMap: %w", err)
559-
}
560-
} else {
561-
return nil, fmt.Errorf("failed to read ConfigMap: %w", err)
573+
574+
if err = client.Get(ctx, configMapName, configMap); err != nil {
575+
if !k8serrors.IsNotFound(err) {
576+
return nil, fmt.Errorf("failed to get ConfigMap: %w", err)
562577
}
563-
}
564578

565-
// Parse the feature flag
566-
enableNetworkPolicy := featureflags.NetworkPolicyDefaultValue
567-
if featureFlagsYAML, exists := configMap.Data[featureflags.FeatureFlagsKey]; exists {
568-
var flags featureflags.FeatureFlags
569-
if err := yaml.Unmarshal([]byte(featureFlagsYAML), &flags); err != nil {
570-
log.Error(err, "failed to parse feature flags")
571-
} else if flags.EnableNetworkPolicy != "" {
572-
enableNetworkPolicy = strings.ToLower(flags.EnableNetworkPolicy) == "true"
579+
// ConfigMap doesn't exist, create it with defaults
580+
configMap, err = createDefaultConfigMap(configMapName)
581+
if err != nil {
582+
return nil, fmt.Errorf("failed to generate default configMap: %w", err)
583+
}
584+
585+
if err = client.Create(ctx, configMap); err != nil {
586+
return nil, fmt.Errorf("failed to create ConfigMap: %w", err)
573587
}
574588
}
589+
590+
// Parse feature flags from ConfigMap
591+
enableNetworkPolicy, err := parseFeatureFlags(configMap.Data)
592+
if err != nil {
593+
return nil, fmt.Errorf("failed to parse feature flags: %w", err)
594+
}
575595
return &LlamaStackDistributionReconciler{
576596
Client: client,
577597
Scheme: scheme,

controllers/resource_helper.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,11 +100,10 @@ func configurePodStorage(instance *llamav1alpha1.LlamaStackDistribution, contain
100100
func (r *LlamaStackDistributionReconciler) validateDistribution(instance *llamav1alpha1.LlamaStackDistribution) error {
101101
// If using distribution name, validate it exists in clusterInfo
102102
if instance.Spec.Server.Distribution.Name != "" {
103-
clusterInfo := r.ClusterInfo
104-
if clusterInfo == nil {
103+
if r.ClusterInfo == nil {
105104
return errors.New("failed to initialize cluster info")
106105
}
107-
if _, exists := clusterInfo.DistributionImages[instance.Spec.Server.Distribution.Name]; !exists {
106+
if _, exists := r.ClusterInfo.DistributionImages[instance.Spec.Server.Distribution.Name]; !exists {
108107
return fmt.Errorf("failed to validate distribution: %s. Distribution name not supported", instance.Spec.Server.Distribution.Name)
109108
}
110109
}

0 commit comments

Comments
 (0)