diff --git a/PROJECT b/PROJECT
index cbdb56a..dba03e9 100644
--- a/PROJECT
+++ b/PROJECT
@@ -98,4 +98,13 @@ resources:
   kind: ChiaNetwork
   path: github.com/chia-network/chia-operator/api/v1
   version: v1
+- api:
+    crdVersion: v1
+    namespaced: true
+  controller: true
+  domain: chia.net
+  group: k8s
+  kind: ChiaDataLayer
+  path: github.com/chia-network/chia-operator/api/v1
+  version: v1
 version: "3"
diff --git a/README.md b/README.md
index 009f2d9..fb6e62a 100644
--- a/README.md
+++ b/README.md
@@ -10,6 +10,7 @@ Kubernetes operator for managing Chia components in kubernetes. Currently suppor
 - introducers
 - seeders
 - crawlers
+- data_layer
 
 Easily run Chia components in Kubernetes by applying simple manifests. A whole farm can be ran with each component isolated in its own pod, with a chia-exporter sidecar to scrape Prometheus metrics.
 
diff --git a/api/v1/chiacommon_types.go b/api/v1/chiacommon_types.go
index 4d0e224..82e2536 100644
--- a/api/v1/chiacommon_types.go
+++ b/api/v1/chiacommon_types.go
@@ -237,37 +237,6 @@ type AdditionalMetadata struct {
 	Annotations map[string]string `json:"annotations,omitempty"`
 }
 
-/*
-Full storage config example:
-
-storage:
-  chiaRoot:
-    // Only one of persistentVolumeClaim or hostPathVolume should be specified, persistentVolumeClaim will be preferred if both are specified
-    persistentVolumeClaim:
-	  claimName: "chiaroot-data"
-	hostPathVolume:
-      path: "/home/user/storage/chiaroot"
-
-  plots:
-    persistentVolumeClaim:
-	  - claimName: "plot1"
-	  - claimName: "plot2"
-	hostPathVolume:
-	  - path: "/home/user/storage/plots1"
-	  - path: "/home/user/storage/plots2"
-*/
-
-// StorageConfig contains storage configuration settings
-type StorageConfig struct {
-	// Storage configuration for CHIA_ROOT
-	// +optional
-	ChiaRoot *ChiaRootConfig `json:"chiaRoot,omitempty"`
-
-	// Storage configuration for harvester plots
-	// +optional
-	Plots *PlotsConfig `json:"plots,omitempty"`
-}
-
 // Service contains kubernetes Service related configuration options
 type Service struct {
 	AdditionalMetadata `json:",inline"`
@@ -307,6 +276,21 @@ type Service struct {
 	RollIntoPeerService *bool `json:"rollIntoPeerService,omitempty"`
 }
 
+// StorageConfig contains storage configuration settings
+type StorageConfig struct {
+	// Storage configuration for CHIA_ROOT
+	// +optional
+	ChiaRoot *ChiaRootConfig `json:"chiaRoot,omitempty"`
+
+	// Storage configuration for harvester plots
+	// +optional
+	Plots *PlotsConfig `json:"plots,omitempty"`
+
+	// Storage configuration for data_layer server files
+	// +optional
+	DataLayerServerFiles *DataLayerServerFilesConfig `json:"dataLayerServerFiles,omitempty"`
+}
+
 // ChiaRootConfig optional config for CHIA_ROOT persistent storage, likely only needed for Chia full_nodes, but may help in startup time for other components.
 // Both options may be specified but only one can be used, therefore PersistentVolumeClaims will be respected over HostPath volumes if both are specified.
 type ChiaRootConfig struct {
@@ -314,7 +298,7 @@ type ChiaRootConfig struct {
 	// +optional
 	PersistentVolumeClaim *PersistentVolumeClaimConfig `json:"persistentVolumeClaim,omitempty"`
 
-	// HostPathVolume use an existing persistent volume claim to store CHIA_ROOT data
+	// HostPathVolume use an existing directory on the host to store CHIA_ROOT data
 	// +optional
 	HostPathVolume *HostPathVolumeConfig `json:"hostPathVolume,omitempty"`
 }
@@ -331,6 +315,18 @@ type PlotsConfig struct {
 	HostPathVolume []*HostPathVolumeConfig `json:"hostPathVolume,omitempty"`
 }
 
+// DataLayerServerFilesConfig optional config for data_layer server file persistent storage.
+// Both options may be specified but only one can be used, therefore PersistentVolumeClaims will be respected over HostPath volumes if both are specified.
+type DataLayerServerFilesConfig struct {
+	// PersistentVolumeClaim use an existing persistent volume claim to store server files
+	// +optional
+	PersistentVolumeClaim *PersistentVolumeClaimConfig `json:"persistentVolumeClaim,omitempty"`
+
+	// HostPathVolume use an existing directory on the host to store server files
+	// +optional
+	HostPathVolume *HostPathVolumeConfig `json:"hostPathVolume,omitempty"`
+}
+
 // PersistentVolumeClaimConfig config for PVC volumes in kubernetes
 type PersistentVolumeClaimConfig struct {
 	// ClaimName is the name of an existing PersistentVolumeClaim in the target namespace
diff --git a/api/v1/chiadatalayer_types.go b/api/v1/chiadatalayer_types.go
new file mode 100644
index 0000000..5cbb6dc
--- /dev/null
+++ b/api/v1/chiadatalayer_types.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2024 Chia Network Inc.
+*/
+
+package v1
+
+import (
+	appsv1 "k8s.io/api/apps/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ChiaDataLayerSpec defines the desired state of ChiaDataLayer
+type ChiaDataLayerSpec struct {
+	CommonSpec `json:",inline"`
+
+	// ChiaConfig defines the configuration options available to Chia component containers
+	ChiaConfig ChiaDataLayerSpecChia `json:"chia"`
+
+	// DataLayerHTTPConfig defines the desired state of an optional data_layer_http sidecar
+	DataLayerHTTPConfig ChiaDataLayerHTTPSpecChia `json:"dataLayerHTTP"`
+
+	// Strategy describes how to replace existing pods with new ones.
+	// +optional
+	Strategy *appsv1.DeploymentStrategy `json:"strategy,omitempty"`
+}
+
+// ChiaDataLayerSpecChia defines the desired state of Chia component configuration
+type ChiaDataLayerSpecChia struct {
+	CommonSpecChia `json:",inline"`
+
+	// CASecretName is the name of the secret that contains the CA crt and key.
+	// +optional
+	CASecretName *string `json:"caSecretName"`
+
+	// SecretKey defines the k8s Secret name and key for a Chia mnemonic
+	SecretKey ChiaSecretKey `json:"secretKey"`
+
+	// FullNodePeers is a list of hostnames/IPs and port numbers to full_node peers.
+	// Either fullNodePeer or fullNodePeers should be specified. fullNodePeers takes precedence.
+	// +optional
+	FullNodePeers *[]Peer `json:"fullNodePeers,omitempty"`
+
+	// TrustedCIDRs is a list of CIDRs that this chia component should trust peers from
+	// See: https://docs.chia.net/faq/?_highlight=trust#what-are-trusted-peers-and-how-do-i-add-them
+	// +optional
+	TrustedCIDRs *[]string `json:"trustedCIDRs,omitempty"`
+}
+
+// ChiaDataLayerHTTPSpecChia defines the desired state of an optional data_layer_http sidecar
+// data_layer_http is a chia component, and therefore inherits most of the generic configuration options for any chia component
+type ChiaDataLayerHTTPSpecChia struct {
+	CommonSpecChia `json:",inline"`
+
+	// Enabled defines whether a data_layer_http sidecar container should run as a sidecar to the chia container
+	// +kubebuilder:default=true
+	// +optional
+	Enabled bool `json:"enabled,omitempty"`
+
+	// CASecretName is the name of the secret that contains the CA crt and key.
+	// +optional
+	CASecretName *string `json:"caSecretName"`
+
+	// Service defines settings for the Service optionally installed with any data_layer_http resource.
+	// This Service will default to being enabled with a ClusterIP Service type if data_layer_http is enabled.
+	// +optional
+	Service Service `json:"service,omitempty"`
+}
+
+// ChiaDataLayerStatus defines the observed state of ChiaDataLayer
+type ChiaDataLayerStatus struct {
+	// Ready says whether the chia component is ready, this should be true when the data_layer resource is in the target namespace
+	// +kubebuilder:default=false
+	Ready bool `json:"ready,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+
+// ChiaDataLayer is the Schema for the chiadatalayers API
+type ChiaDataLayer struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   ChiaDataLayerSpec   `json:"spec,omitempty"`
+	Status ChiaDataLayerStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ChiaDataLayerList contains a list of ChiaDataLayer
+type ChiaDataLayerList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []ChiaDataLayer `json:"items"`
+}
+
+func init() {
+	SchemeBuilder.Register(&ChiaDataLayer{}, &ChiaDataLayerList{})
+}
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index f09d6b6..1a3ce34 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -262,6 +262,95 @@ func (in *ChiaCrawlerStatus) DeepCopy() *ChiaCrawlerStatus {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiaDataLayer) DeepCopyInto(out *ChiaDataLayer) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiaDataLayer.
+func (in *ChiaDataLayer) DeepCopy() *ChiaDataLayer {
+	if in == nil {
+		return nil
+	}
+	out := new(ChiaDataLayer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChiaDataLayer) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiaDataLayerList) DeepCopyInto(out *ChiaDataLayerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ChiaDataLayer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiaDataLayerList.
+func (in *ChiaDataLayerList) DeepCopy() *ChiaDataLayerList {
+	if in == nil {
+		return nil
+	}
+	out := new(ChiaDataLayerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChiaDataLayerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiaDataLayerSpec) DeepCopyInto(out *ChiaDataLayerSpec) {
+	*out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiaDataLayerSpec.
+func (in *ChiaDataLayerSpec) DeepCopy() *ChiaDataLayerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ChiaDataLayerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChiaDataLayerStatus) DeepCopyInto(out *ChiaDataLayerStatus) {
+	*out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiaDataLayerStatus.
+func (in *ChiaDataLayerStatus) DeepCopy() *ChiaDataLayerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ChiaDataLayerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ChiaFarmer) DeepCopyInto(out *ChiaFarmer) {
 	*out = *in
diff --git a/cmd/main.go b/cmd/main.go
index a3631e2..1cdd57d 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -6,6 +6,7 @@ package main
 
 import (
 	"flag"
+	"github.com/chia-network/chia-operator/internal/controller/chiadatalayer"
 	"os"
 
 	_ "k8s.io/client-go/plugin/pkg/client/auth"
@@ -19,6 +20,7 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
 
 	k8schianetv1 "github.com/chia-network/chia-operator/api/v1"
+	k8sv1 "github.com/chia-network/chia-operator/api/v1"
 	"github.com/chia-network/chia-operator/internal/controller/chiaca"
 	"github.com/chia-network/chia-operator/internal/controller/chiacrawler"
 	"github.com/chia-network/chia-operator/internal/controller/chiafarmer"
@@ -41,6 +43,7 @@ func init() {
 	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
 
 	utilruntime.Must(k8schianetv1.AddToScheme(scheme))
+	utilruntime.Must(k8sv1.AddToScheme(scheme))
 	//+kubebuilder:scaffold:scheme
 }
 
@@ -164,6 +167,14 @@ func main() {
 		setupLog.Error(err, "unable to create controller", "controller", "ChiaNetwork")
 		os.Exit(1)
 	}
+	if err = (&chiadatalayer.ChiaDataLayerReconciler{
+		Client:   mgr.GetClient(),
+		Scheme:   mgr.GetScheme(),
+		Recorder: mgr.GetEventRecorderFor("chiadatalayer-controller"),
+	}).SetupWithManager(mgr); err != nil {
+		setupLog.Error(err, "unable to create controller", "controller", "ChiaDataLayer")
+		os.Exit(1)
+	}
 	//+kubebuilder:scaffold:builder
 
 	if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index d39c9d4..9b6c503 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -12,6 +12,7 @@ resources:
 - bases/k8s.chia.net_chiaintroducers.yaml
 - bases/k8s.chia.net_chiacrawlers.yaml
 - bases/k8s.chia.net_chianetworks.yaml
+- bases/k8s.chia.net_chiadatalayers.yaml
 #+kubebuilder:scaffold:crdkustomizeresource
 
 patchesStrategicMerge:
@@ -36,6 +37,7 @@ patchesStrategicMerge:
 #- path: patches/cainjection_in_chiaintroducers.yaml
 #- path: patches/cainjection_in_chiacrawlers.yaml
 #- path: patches/cainjection_in_chianetworks.yaml
+#- path: patches/cainjection_in_chiadatalayers.yaml
 #+kubebuilder:scaffold:crdkustomizecainjectionpatch
 
 # the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml
index b406b48..5d0feb3 100644
--- a/config/default/kustomization.yaml
+++ b/config/default/kustomization.yaml
@@ -15,7 +15,7 @@ namePrefix: chia-operator-
 #    someName: someValue
 
 resources:
-#- ../crd # Commented to avoid `make release` building the CRDs into the manager manifests
+- ../crd # Commented to avoid `make release` building the CRDs into the manager manifests
 - ../rbac
 - ../manager
 # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
diff --git a/config/rbac/chiadatalayer_editor_role.yaml b/config/rbac/chiadatalayer_editor_role.yaml
new file mode 100644
index 0000000..cef66f6
--- /dev/null
+++ b/config/rbac/chiadatalayer_editor_role.yaml
@@ -0,0 +1,27 @@
+# permissions for end users to edit chiadatalayers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/name: chia-operator
+    app.kubernetes.io/managed-by: kustomize
+  name: chiadatalayer-editor-role
+rules:
+- apiGroups:
+  - k8s.chia.net
+  resources:
+  - chiadatalayers
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - k8s.chia.net
+  resources:
+  - chiadatalayers/status
+  verbs:
+  - get
diff --git a/config/rbac/chiadatalayer_viewer_role.yaml b/config/rbac/chiadatalayer_viewer_role.yaml
new file mode 100644
index 0000000..2fc85a5
--- /dev/null
+++ b/config/rbac/chiadatalayer_viewer_role.yaml
@@ -0,0 +1,23 @@
+# permissions for end users to view chiadatalayers.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/name: chia-operator
+    app.kubernetes.io/managed-by: kustomize
+  name: chiadatalayer-viewer-role
+rules:
+- apiGroups:
+  - k8s.chia.net
+  resources:
+  - chiadatalayers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - k8s.chia.net
+  resources:
+  - chiadatalayers/status
+  verbs:
+  - get
diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml
index b2384da..f9180e2 100644
--- a/config/rbac/kustomization.yaml
+++ b/config/rbac/kustomization.yaml
@@ -13,9 +13,13 @@ resources:
 # default, aiding admins in cluster management. Those roles are
 # not used by the Project itself. You can comment the following lines
 # if you do not want those helpers be installed with your Project.
+- chiadatalayer_editor_role.yaml
+- chiadatalayer_viewer_role.yaml
 - chianetwork_editor_role.yaml
 - chianetwork_viewer_role.yaml
 - chiacrawler_editor_role.yaml
 - chiacrawler_viewer_role.yaml
 - chiaintroducer_editor_role.yaml
 - chiaintroducer_viewer_role.yaml
+
+
diff --git a/config/samples/k8s_v1_chiadatalayer.yaml b/config/samples/k8s_v1_chiadatalayer.yaml
new file mode 100644
index 0000000..d7a599e
--- /dev/null
+++ b/config/samples/k8s_v1_chiadatalayer.yaml
@@ -0,0 +1,9 @@
+apiVersion: k8s.chia.net/v1
+kind: ChiaDataLayer
+metadata:
+  labels:
+    app.kubernetes.io/name: chia-operator
+    app.kubernetes.io/managed-by: kustomize
+  name: chiadatalayer-sample
+spec:
+  # TODO(user): Add fields here
diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml
index 57e7f00..d3e2e6a 100644
--- a/config/samples/kustomization.yaml
+++ b/config/samples/kustomization.yaml
@@ -3,4 +3,5 @@ resources:
 - chiaintroducer.yaml
 - chiacrawler.yaml
 - chianetwork.yaml
+- k8s_v1_chiadatalayer.yaml
 # +kubebuilder:scaffold:manifestskustomizesamples
diff --git a/docs/chiadatalayer.md b/docs/chiadatalayer.md
new file mode 100644
index 0000000..bc6e98f
--- /dev/null
+++ b/docs/chiadatalayer.md
@@ -0,0 +1,17 @@
+# ChiaDataLayer
+
+ChiaDataLayers run the data_layer Chia component, which comes bundled with a Chia wallet. In a future time, the wallet may be able to be run separately, but it is not currently possible.
+
+The data_layer_http server runs as an optional sidecar. In a future release, it may be possible to run the HTTP server separately from the data_layer server, but it is not currently implemented.
+
+Here's a minimal ChiaDataLayer example custom resource (CR):
+
+```yaml
+apiVersion: k8s.chia.net/v1
+kind: ChiaDataLayer
+metadata:
+  name: my-datalayer
+spec:
+  chia:
+    caSecretName: chiaca-secret # A kubernetes Secret containing certificate authority files
+```
\ No newline at end of file
diff --git a/internal/controller/chiacrawler/controller.go b/internal/controller/chiacrawler/controller.go
index 5398d01..ee3d453 100644
--- a/internal/controller/chiacrawler/controller.go
+++ b/internal/controller/chiacrawler/controller.go
@@ -144,7 +144,7 @@ func (r *ChiaCrawlerReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(crawler.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(crawler.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(crawler)
 		if err != nil {
 			r.Recorder.Event(&crawler, corev1.EventTypeWarning, "Failed", "Failed to assemble crawler PVC -- Check operator logs.")
diff --git a/internal/controller/chiacrawler/helpers.go b/internal/controller/chiacrawler/helpers.go
index 9a4dc49..817f6f3 100644
--- a/internal/controller/chiacrawler/helpers.go
+++ b/internal/controller/chiacrawler/helpers.go
@@ -30,7 +30,7 @@ func getChiaVolumes(crawler k8schianetv1.ChiaCrawler) []corev1.Volume {
 	}
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(crawler.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(crawler.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiadatalayer/assemblers.go b/internal/controller/chiadatalayer/assemblers.go
new file mode 100644
index 0000000..e41148a
--- /dev/null
+++ b/internal/controller/chiadatalayer/assemblers.go
@@ -0,0 +1,372 @@
+/*
+Copyright 2024 Chia Network Inc.
+*/
+
+package chiadatalayer
+
+import (
+	"context"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/resource"
+
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/intstr"
+
+	k8schianetv1 "github.com/chia-network/chia-operator/api/v1"
+	"github.com/chia-network/chia-operator/internal/controller/common/consts"
+	"github.com/chia-network/chia-operator/internal/controller/common/kube"
+)
+
+const chiadatalayerNamePattern = "%s-data-layer"
+
+// assembleDaemonService assembles the daemon Service resource for a ChiaDataLayer CR
+func assembleDaemonService(datalayer k8schianetv1.ChiaDataLayer) corev1.Service {
+	inputs := kube.AssembleCommonServiceInputs{
+		Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name) + "-daemon",
+		Namespace: datalayer.Namespace,
+		Ports:     kube.GetChiaDaemonServicePorts(),
+	}
+
+	inputs.ServiceType = datalayer.Spec.ChiaConfig.DaemonService.ServiceType
+	inputs.ExternalTrafficPolicy = datalayer.Spec.ChiaConfig.DaemonService.ExternalTrafficPolicy
+	inputs.SessionAffinity = datalayer.Spec.ChiaConfig.DaemonService.SessionAffinity
+	inputs.SessionAffinityConfig = datalayer.Spec.ChiaConfig.DaemonService.SessionAffinityConfig
+	inputs.IPFamilyPolicy = datalayer.Spec.ChiaConfig.DaemonService.IPFamilyPolicy
+	inputs.IPFamilies = datalayer.Spec.ChiaConfig.DaemonService.IPFamilies
+
+	// Labels
+	var additionalServiceLabels = make(map[string]string)
+	if datalayer.Spec.ChiaConfig.DaemonService.Labels != nil {
+		additionalServiceLabels = datalayer.Spec.ChiaConfig.DaemonService.Labels
+	}
+	inputs.Labels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels, additionalServiceLabels)
+	inputs.SelectorLabels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels)
+
+	// Annotations
+	var additionalServiceAnnotations = make(map[string]string)
+	if datalayer.Spec.ChiaConfig.DaemonService.Annotations != nil {
+		additionalServiceAnnotations = datalayer.Spec.ChiaConfig.DaemonService.Annotations
+	}
+	inputs.Annotations = kube.CombineMaps(datalayer.Spec.AdditionalMetadata.Annotations, additionalServiceAnnotations)
+
+	return kube.AssembleCommonService(inputs)
+}
+
+// assembleRPCService assembles the RPC Service resource for a ChiaDataLayer CR
+func assembleRPCService(datalayer k8schianetv1.ChiaDataLayer) corev1.Service {
+	inputs := kube.AssembleCommonServiceInputs{
+		Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name) + "-rpc",
+		Namespace: datalayer.Namespace,
+		Ports: []corev1.ServicePort{
+			{
+				Port:       consts.DataLayerRPCPort,
+				TargetPort: intstr.FromString("rpc"),
+				Protocol:   "TCP",
+				Name:       "rpc",
+			},
+			{
+				Port:       consts.WalletRPCPort,
+				TargetPort: intstr.FromString("wallet-rpc"),
+				Protocol:   "TCP",
+				Name:       "wallet-rpc",
+			},
+		},
+	}
+
+	inputs.ServiceType = datalayer.Spec.ChiaConfig.RPCService.ServiceType
+	inputs.ExternalTrafficPolicy = datalayer.Spec.ChiaConfig.RPCService.ExternalTrafficPolicy
+	inputs.SessionAffinity = datalayer.Spec.ChiaConfig.RPCService.SessionAffinity
+	inputs.SessionAffinityConfig = datalayer.Spec.ChiaConfig.RPCService.SessionAffinityConfig
+	inputs.IPFamilyPolicy = datalayer.Spec.ChiaConfig.RPCService.IPFamilyPolicy
+	inputs.IPFamilies = datalayer.Spec.ChiaConfig.RPCService.IPFamilies
+
+	// Labels
+	var additionalServiceLabels = make(map[string]string)
+	if datalayer.Spec.ChiaConfig.RPCService.Labels != nil {
+		additionalServiceLabels = datalayer.Spec.ChiaConfig.RPCService.Labels
+	}
+	inputs.Labels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels, additionalServiceLabels)
+	inputs.SelectorLabels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels)
+
+	// Annotations
+	var additionalServiceAnnotations = make(map[string]string)
+	if datalayer.Spec.ChiaConfig.RPCService.Annotations != nil {
+		additionalServiceAnnotations = datalayer.Spec.ChiaConfig.RPCService.Annotations
+	}
+	inputs.Annotations = kube.CombineMaps(datalayer.Spec.AdditionalMetadata.Annotations, additionalServiceAnnotations)
+
+	return kube.AssembleCommonService(inputs)
+}
+
+// assembleDataLayerHTTPService assembles the data_layer_http Service resource for a ChiaDataLayer CR
+func assembleDataLayerHTTPService(datalayer k8schianetv1.ChiaDataLayer) corev1.Service {
+	inputs := kube.AssembleCommonServiceInputs{
+		Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name) + "-http",
+		Namespace: datalayer.Namespace,
+		Ports: []corev1.ServicePort{
+			{
+				Port:       consts.DataLayerHTTPPort,
+				TargetPort: intstr.FromString("http"),
+				Protocol:   "TCP",
+				Name:       "http",
+			},
+		},
+	}
+
+	inputs.ServiceType = datalayer.Spec.DataLayerHTTPConfig.Service.ServiceType
+	inputs.ExternalTrafficPolicy = datalayer.Spec.DataLayerHTTPConfig.Service.ExternalTrafficPolicy
+	inputs.SessionAffinity = datalayer.Spec.DataLayerHTTPConfig.Service.SessionAffinity
+	inputs.SessionAffinityConfig = datalayer.Spec.DataLayerHTTPConfig.Service.SessionAffinityConfig
+	inputs.IPFamilyPolicy = datalayer.Spec.DataLayerHTTPConfig.Service.IPFamilyPolicy
+	inputs.IPFamilies = datalayer.Spec.DataLayerHTTPConfig.Service.IPFamilies
+
+	// Labels
+	var additionalServiceLabels = make(map[string]string)
+	if datalayer.Spec.DataLayerHTTPConfig.Service.Labels != nil {
+		additionalServiceLabels = datalayer.Spec.DataLayerHTTPConfig.Service.Labels
+	}
+	inputs.Labels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels, additionalServiceLabels)
+	inputs.SelectorLabels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels)
+
+	// Annotations
+	var additionalServiceAnnotations = make(map[string]string)
+	if datalayer.Spec.DataLayerHTTPConfig.Service.Annotations != nil {
+		additionalServiceAnnotations = datalayer.Spec.DataLayerHTTPConfig.Service.Annotations
+	}
+	inputs.Annotations = kube.CombineMaps(datalayer.Spec.AdditionalMetadata.Annotations, additionalServiceAnnotations)
+
+	return kube.AssembleCommonService(inputs)
+}
+
+// assembleChiaExporterService assembles the chia-exporter Service resource for a ChiaDataLayer CR
+func assembleChiaExporterService(datalayer k8schianetv1.ChiaDataLayer) corev1.Service {
+	inputs := kube.AssembleCommonServiceInputs{
+		Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name) + "-metrics",
+		Namespace: datalayer.Namespace,
+		Ports:     kube.GetChiaExporterServicePorts(),
+	}
+
+	inputs.ServiceType = datalayer.Spec.ChiaExporterConfig.Service.ServiceType
+	inputs.ExternalTrafficPolicy = datalayer.Spec.ChiaExporterConfig.Service.ExternalTrafficPolicy
+	inputs.SessionAffinity = datalayer.Spec.ChiaExporterConfig.Service.SessionAffinity
+	inputs.SessionAffinityConfig = datalayer.Spec.ChiaExporterConfig.Service.SessionAffinityConfig
+	inputs.IPFamilyPolicy = datalayer.Spec.ChiaExporterConfig.Service.IPFamilyPolicy
+	inputs.IPFamilies = datalayer.Spec.ChiaExporterConfig.Service.IPFamilies
+
+	// Labels
+	var additionalServiceLabels = make(map[string]string)
+	if datalayer.Spec.ChiaExporterConfig.Service.Labels != nil {
+		additionalServiceLabels = datalayer.Spec.ChiaExporterConfig.Service.Labels
+	}
+	inputs.Labels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels, additionalServiceLabels)
+	inputs.SelectorLabels = kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels)
+
+	// Annotations
+	var additionalServiceAnnotations = make(map[string]string)
+	if datalayer.Spec.ChiaExporterConfig.Service.Annotations != nil {
+		additionalServiceAnnotations = datalayer.Spec.ChiaExporterConfig.Service.Annotations
+	}
+	inputs.Annotations = kube.CombineMaps(datalayer.Spec.AdditionalMetadata.Annotations, additionalServiceAnnotations)
+
+	return kube.AssembleCommonService(inputs)
+}
+
+// assembleChiaRootVolumeClaim assembles the CHIA_ROOT PVC resource for a ChiaDataLayer CR
+func assembleChiaRootVolumeClaim(datalayer k8schianetv1.ChiaDataLayer) (*corev1.PersistentVolumeClaim, error) {
+	if datalayer.Spec.Storage == nil || datalayer.Spec.Storage.ChiaRoot == nil || datalayer.Spec.Storage.ChiaRoot.PersistentVolumeClaim == nil {
+		return nil, nil
+	}
+
+	resourceReq, err := resource.ParseQuantity(datalayer.Spec.Storage.ChiaRoot.PersistentVolumeClaim.ResourceRequest)
+	if err != nil {
+		return nil, err
+	}
+
+	accessModes := []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}
+	if len(datalayer.Spec.Storage.ChiaRoot.PersistentVolumeClaim.AccessModes) != 0 {
+		accessModes = datalayer.Spec.Storage.ChiaRoot.PersistentVolumeClaim.AccessModes
+	}
+
+	pvc := corev1.PersistentVolumeClaim{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name),
+			Namespace: datalayer.Namespace,
+		},
+		Spec: corev1.PersistentVolumeClaimSpec{
+			AccessModes:      accessModes,
+			StorageClassName: &datalayer.Spec.Storage.ChiaRoot.PersistentVolumeClaim.StorageClass,
+			Resources: corev1.VolumeResourceRequirements{
+				Requests: corev1.ResourceList{
+					corev1.ResourceStorage: resourceReq,
+				},
+			},
+		},
+	}
+
+	return &pvc, nil
+}
+
+// assembleDataLayerFilesVolumeClaim assembles the data_layer server files PVC resource for a ChiaDataLayer CR
+func assembleDataLayerFilesVolumeClaim(datalayer k8schianetv1.ChiaDataLayer) (*corev1.PersistentVolumeClaim, error) {
+	if datalayer.Spec.Storage == nil || datalayer.Spec.Storage.DataLayerServerFiles == nil || datalayer.Spec.Storage.DataLayerServerFiles.PersistentVolumeClaim == nil {
+		return nil, nil
+	}
+
+	resourceReq, err := resource.ParseQuantity(datalayer.Spec.Storage.DataLayerServerFiles.PersistentVolumeClaim.ResourceRequest)
+	if err != nil {
+		return nil, err
+	}
+
+	accessModes := []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}
+	if len(datalayer.Spec.Storage.DataLayerServerFiles.PersistentVolumeClaim.AccessModes) != 0 {
+		accessModes = datalayer.Spec.Storage.DataLayerServerFiles.PersistentVolumeClaim.AccessModes
+	}
+
+	pvc := corev1.PersistentVolumeClaim{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name) + "-server",
+			Namespace: datalayer.Namespace,
+		},
+		Spec: corev1.PersistentVolumeClaimSpec{
+			AccessModes:      accessModes,
+			StorageClassName: &datalayer.Spec.Storage.DataLayerServerFiles.PersistentVolumeClaim.StorageClass,
+			Resources: corev1.VolumeResourceRequirements{
+				Requests: corev1.ResourceList{
+					corev1.ResourceStorage: resourceReq,
+				},
+			},
+		},
+	}
+
+	return &pvc, nil
+}
+
+// assembleDeployment assembles the datalayer Deployment resource for a ChiaDataLayer CR
+func assembleDeployment(ctx context.Context, datalayer k8schianetv1.ChiaDataLayer, networkData *map[string]string) (appsv1.Deployment, error) {
+	var deploy = appsv1.Deployment{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:        fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name),
+			Namespace:   datalayer.Namespace,
+			Labels:      kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels),
+			Annotations: datalayer.Spec.AdditionalMetadata.Annotations,
+		},
+		Spec: appsv1.DeploymentSpec{
+			Selector: &metav1.LabelSelector{
+				MatchLabels: kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta),
+			},
+			Template: corev1.PodTemplateSpec{
+				ObjectMeta: metav1.ObjectMeta{
+					Labels:      kube.GetCommonLabels(datalayer.Kind, datalayer.ObjectMeta, datalayer.Spec.AdditionalMetadata.Labels),
+					Annotations: datalayer.Spec.AdditionalMetadata.Annotations,
+				},
+				Spec: corev1.PodSpec{
+					Affinity:                  datalayer.Spec.Affinity,
+					TopologySpreadConstraints: datalayer.Spec.TopologySpreadConstraints,
+					NodeSelector:              datalayer.Spec.NodeSelector,
+					Volumes:                   getChiaVolumes(datalayer),
+				},
+			},
+		},
+	}
+
+	if datalayer.Spec.ServiceAccountName != nil && *datalayer.Spec.ServiceAccountName != "" {
+		deploy.Spec.Template.Spec.ServiceAccountName = *datalayer.Spec.ServiceAccountName
+	}
+
+	chiaContainer, err := assembleChiaContainer(ctx, datalayer, networkData)
+	if err != nil {
+		return appsv1.Deployment{}, err
+	}
+	deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, chiaContainer)
+
+	// Get Init Containers
+	deploy.Spec.Template.Spec.InitContainers = kube.GetExtraContainers(datalayer.Spec.InitContainers, chiaContainer)
+	// Add Init Container Volumes
+	for _, init := range datalayer.Spec.InitContainers {
+		deploy.Spec.Template.Spec.Volumes = append(deploy.Spec.Template.Spec.Volumes, init.Volumes...)
+	}
+	// Get Sidecar Containers
+	deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, kube.GetExtraContainers(datalayer.Spec.Sidecars, chiaContainer)...)
+	// Add Sidecar Container Volumes
+	for _, sidecar := range datalayer.Spec.Sidecars {
+		deploy.Spec.Template.Spec.Volumes = append(deploy.Spec.Template.Spec.Volumes, sidecar.Volumes...)
+	}
+
+	if datalayer.Spec.ImagePullSecrets != nil && len(*datalayer.Spec.ImagePullSecrets) != 0 {
+		deploy.Spec.Template.Spec.ImagePullSecrets = *datalayer.Spec.ImagePullSecrets
+	}
+
+	if datalayer.Spec.ChiaExporterConfig.Enabled {
+		deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, assembleChiaExporterContainer(datalayer))
+	}
+
+	if datalayer.Spec.Strategy != nil {
+		deploy.Spec.Strategy = *datalayer.Spec.Strategy
+	}
+
+	if datalayer.Spec.PodSecurityContext != nil {
+		deploy.Spec.Template.Spec.SecurityContext = datalayer.Spec.PodSecurityContext
+	}
+
+	// TODO add pod tolerations
+
+	return deploy, nil
+}
+
+func assembleChiaContainer(ctx context.Context, datalayer k8schianetv1.ChiaDataLayer, networkData *map[string]string) (corev1.Container, error) {
+	input := kube.AssembleChiaContainerInputs{
+		Image:           datalayer.Spec.ChiaConfig.Image,
+		ImagePullPolicy: datalayer.Spec.ImagePullPolicy,
+		Ports:           getChiaPorts(),
+		VolumeMounts:    getChiaVolumeMounts(datalayer),
+	}
+
+	env, err := getChiaEnv(ctx, datalayer, networkData)
+	if err != nil {
+		return corev1.Container{}, err
+	}
+	input.Env = env
+
+	if datalayer.Spec.ChiaConfig.SecurityContext != nil {
+		input.SecurityContext = datalayer.Spec.ChiaConfig.SecurityContext
+	}
+
+	if datalayer.Spec.ChiaConfig.LivenessProbe != nil {
+		input.LivenessProbe = datalayer.Spec.ChiaConfig.LivenessProbe
+	}
+
+	if datalayer.Spec.ChiaConfig.ReadinessProbe != nil {
+		input.ReadinessProbe = datalayer.Spec.ChiaConfig.ReadinessProbe
+	}
+
+	if datalayer.Spec.ChiaConfig.StartupProbe != nil {
+		input.StartupProbe = datalayer.Spec.ChiaConfig.StartupProbe
+	}
+
+	if datalayer.Spec.ChiaConfig.Resources != nil {
+		input.ResourceRequirements = datalayer.Spec.ChiaConfig.Resources
+	}
+
+	return kube.AssembleChiaContainer(input), nil
+}
+
+func assembleChiaExporterContainer(datalayer k8schianetv1.ChiaDataLayer) corev1.Container {
+	input := kube.AssembleChiaExporterContainerInputs{
+		Image:            datalayer.Spec.ChiaExporterConfig.Image,
+		ConfigSecretName: datalayer.Spec.ChiaExporterConfig.ConfigSecretName,
+		ImagePullPolicy:  datalayer.Spec.ImagePullPolicy,
+	}
+
+	if datalayer.Spec.ChiaConfig.SecurityContext != nil {
+		input.SecurityContext = datalayer.Spec.ChiaConfig.SecurityContext
+	}
+
+	if datalayer.Spec.ChiaConfig.Resources != nil {
+		input.ResourceRequirements = *datalayer.Spec.ChiaConfig.Resources
+	}
+
+	return kube.AssembleChiaExporterContainer(input)
+}
diff --git a/internal/controller/chiadatalayer/controller.go b/internal/controller/chiadatalayer/controller.go
new file mode 100644
index 0000000..a4d8adf
--- /dev/null
+++ b/internal/controller/chiadatalayer/controller.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2024 Chia Network Inc.
+*/
+
+package chiadatalayer
+
+import (
+	"context"
+	stdErrors "errors"
+	"fmt"
+	"strings"
+	"time"
+
+	k8schianetv1 "github.com/chia-network/chia-operator/api/v1"
+	"github.com/chia-network/chia-operator/internal/controller/common/kube"
+	"github.com/chia-network/chia-operator/internal/metrics"
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/tools/record"
+	ctrl "sigs.k8s.io/controller-runtime"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+	"sigs.k8s.io/controller-runtime/pkg/log"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+// ChiaDataLayerReconciler reconciles a ChiaDataLayer object
+type ChiaDataLayerReconciler struct {
+	client.Client
+	Scheme   *runtime.Scheme
+	Recorder record.EventRecorder
+}
+
+var chiadatalayers = make(map[string]bool)
+
+//+kubebuilder:rbac:groups=k8s.chia.net,resources=chiadatalayers,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=k8s.chia.net,resources=chiadatalayers/status,verbs=get;update;patch
+//+kubebuilder:rbac:groups=k8s.chia.net,resources=chiadatalayers/finalizers,verbs=update
+//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch
+//+kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
+
+// Reconcile is invoked on any event to a controlled Kubernetes resource
+func (r *ChiaDataLayerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	log := log.FromContext(ctx)
+	log.Info("Running reconciler...")
+
+	// Get the custom resource
+	var datalayer k8schianetv1.ChiaDataLayer
+	err := r.Get(ctx, req.NamespacedName, &datalayer)
+	if err != nil && errors.IsNotFound(err) {
+		// Remove this object from the map for tracking and subtract this CR's total metric by 1
+		_, exists := chiadatalayers[req.NamespacedName.String()]
+		if exists {
+			delete(chiadatalayers, req.NamespacedName.String())
+			metrics.ChiaDataLayers.Sub(1.0)
+		}
+		return ctrl.Result{}, nil
+	}
+	if err != nil {
+		log.Error(err, "unable to fetch ChiaDataLayer resource")
+		return ctrl.Result{}, client.IgnoreNotFound(err)
+	}
+
+	// Add this object to the tracking map and increment the gauge by 1, if it wasn't already added
+	_, exists := chiadatalayers[req.NamespacedName.String()]
+	if !exists {
+		chiadatalayers[req.NamespacedName.String()] = true
+		metrics.ChiaDataLayers.Add(1.0)
+	}
+
+	// Check for ChiaNetwork, retrieve matching ConfigMap if specified
+	networkData, err := kube.GetChiaNetworkData(ctx, r.Client, datalayer.Spec.ChiaConfig.CommonSpecChia, datalayer.Namespace)
+	if err != nil {
+		return ctrl.Result{}, err
+	}
+
+	// Assemble Daemon Service
+	daemonSrv := assembleDaemonService(datalayer)
+	if err := controllerutil.SetControllerReference(&datalayer, &daemonSrv, r.Scheme); err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer daemon Service -- Check operator logs.")
+		return ctrl.Result{}, fmt.Errorf("encountered error assembling daemon Service: %v", err)
+	}
+	// Reconcile Daemon Service
+	res, err := kube.ReconcileService(ctx, r.Client, datalayer.Spec.ChiaConfig.DaemonService, daemonSrv, true)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to reconcile datalayer daemon Service -- Check operator logs.")
+		return res, err
+	}
+
+	// Assemble RPC Service
+	rpcSrv := assembleRPCService(datalayer)
+	if err := controllerutil.SetControllerReference(&datalayer, &rpcSrv, r.Scheme); err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer RPC Service -- Check operator logs.")
+		return ctrl.Result{}, fmt.Errorf("encountered error assembling RPC Service: %v", err)
+	}
+	// Reconcile RPC Service
+	res, err = kube.ReconcileService(ctx, r.Client, datalayer.Spec.ChiaConfig.RPCService, rpcSrv, true)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to reconcile datalayer RPC Service -- Check operator logs.")
+		return res, err
+	}
+
+	// Assemble HTTP Service
+	httpSrv := assembleDataLayerHTTPService(datalayer)
+	if err := controllerutil.SetControllerReference(&datalayer, &httpSrv, r.Scheme); err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer HTTP Service -- Check operator logs.")
+		return ctrl.Result{}, fmt.Errorf("encountered error assembling HTTP Service: %v", err)
+	}
+	// Reconcile HTTP Service
+	res, err = kube.ReconcileService(ctx, r.Client, datalayer.Spec.DataLayerHTTPConfig.Service, httpSrv, true)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to reconcile datalayer HTTP Service -- Check operator logs.")
+		return res, err
+	}
+
+	// Assemble Chia-Exporter Service
+	exporterSrv := assembleChiaExporterService(datalayer)
+	if err := controllerutil.SetControllerReference(&datalayer, &exporterSrv, r.Scheme); err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer chia-exporter Service -- Check operator logs.")
+		return ctrl.Result{}, fmt.Errorf("encountered error assembling chia-exporter Service: %v", err)
+	}
+	// Reconcile Chia-Exporter Service
+	res, err = kube.ReconcileService(ctx, r.Client, datalayer.Spec.ChiaExporterConfig.Service, exporterSrv, true)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to reconcile datalayer chia-exporter Service -- Check operator logs.")
+		return res, err
+	}
+
+	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
+	if kube.ShouldMakeChiaRootVolumeClaim(datalayer.Spec.Storage) {
+		pvc, err := assembleChiaRootVolumeClaim(datalayer)
+		if err != nil {
+			r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer PVC -- Check operator logs.")
+			return reconcile.Result{}, err
+		}
+
+		if pvc != nil {
+			res, err = kube.ReconcilePersistentVolumeClaim(ctx, r.Client, datalayer.Spec.Storage, *pvc)
+			if err != nil {
+				r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to create datalayer CHIA_ROOT PVC -- Check operator logs.")
+				return res, err
+			}
+		} else {
+			return reconcile.Result{}, stdErrors.New("CHIA_ROOT PVC could not be created")
+		}
+	}
+
+	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
+	if kube.ShouldMakeDataLayerServerFilesVolumeClaim(datalayer.Spec.Storage) {
+		pvc, err := assembleDataLayerFilesVolumeClaim(datalayer)
+		if err != nil {
+			r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer server files PVC -- Check operator logs.")
+			return reconcile.Result{}, err
+		}
+
+		if pvc != nil {
+			res, err = kube.ReconcilePersistentVolumeClaim(ctx, r.Client, datalayer.Spec.Storage, *pvc)
+			if err != nil {
+				r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to create datalayer server files PVC -- Check operator logs.")
+				return res, err
+			}
+		} else {
+			return reconcile.Result{}, stdErrors.New("server files PVC could not be created")
+		}
+	}
+
+	// Assemble Deployment
+	deploy, err := assembleDeployment(ctx, datalayer, networkData)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer Deployment -- Check operator logs.")
+		return reconcile.Result{}, err
+	}
+	if err := controllerutil.SetControllerReference(&datalayer, &deploy, r.Scheme); err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to assemble datalayer Deployment -- Check operator logs.")
+		return reconcile.Result{}, err
+	}
+	// Reconcile Deployment
+	res, err = kube.ReconcileDeployment(ctx, r.Client, deploy)
+	if err != nil {
+		r.Recorder.Event(&datalayer, corev1.EventTypeWarning, "Failed", "Failed to create datalayer Deployment -- Check operator logs.")
+		return res, err
+	}
+
+	// Update CR status
+	r.Recorder.Event(&datalayer, corev1.EventTypeNormal, "Created", "Successfully created ChiaDataLayer resources.")
+	datalayer.Status.Ready = true
+	err = r.Status().Update(ctx, &datalayer)
+	if err != nil {
+		if strings.Contains(err.Error(), kube.ObjectModifiedTryAgainError) {
+			return ctrl.Result{RequeueAfter: 1 * time.Second}, nil
+		}
+		log.Error(err, "unable to update ChiaDataLayer status")
+		return ctrl.Result{}, err
+	}
+
+	return ctrl.Result{}, nil
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *ChiaDataLayerReconciler) SetupWithManager(mgr ctrl.Manager) error {
+	return ctrl.NewControllerManagedBy(mgr).
+		For(&k8schianetv1.ChiaDataLayer{}).
+		Owns(&appsv1.Deployment{}).
+		Owns(&corev1.Service{}).
+		Complete(r)
+}
diff --git a/internal/controller/chiadatalayer/helpers.go b/internal/controller/chiadatalayer/helpers.go
new file mode 100644
index 0000000..af3585a
--- /dev/null
+++ b/internal/controller/chiadatalayer/helpers.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2024 Chia Network Inc.
+*/
+
+package chiadatalayer
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"github.com/chia-network/chia-operator/internal/controller/common/kube"
+	corev1 "k8s.io/api/core/v1"
+	"sigs.k8s.io/controller-runtime/pkg/log"
+
+	k8schianetv1 "github.com/chia-network/chia-operator/api/v1"
+	"github.com/chia-network/chia-operator/internal/controller/common/consts"
+)
+
+// getChiaVolumes retrieves the requisite volumes from the Chia config struct
+func getChiaVolumes(datalayer k8schianetv1.ChiaDataLayer) []corev1.Volume {
+	var v []corev1.Volume
+
+	// secret ca volume
+	if datalayer.Spec.ChiaConfig.CASecretName != nil {
+		v = append(v, corev1.Volume{
+			Name: "secret-ca",
+			VolumeSource: corev1.VolumeSource{
+				Secret: &corev1.SecretVolumeSource{
+					SecretName: *datalayer.Spec.ChiaConfig.CASecretName,
+				},
+			},
+		})
+	}
+
+	// mnemonic key volume
+	v = append(v, corev1.Volume{
+		Name: "key",
+		VolumeSource: corev1.VolumeSource{
+			Secret: &corev1.SecretVolumeSource{
+				SecretName: datalayer.Spec.ChiaConfig.SecretKey.Name,
+			},
+		},
+	})
+
+	// CHIA_ROOT volume
+	if kube.ShouldMakeChiaRootVolumeClaim(datalayer.Spec.Storage) {
+		v = append(v, corev1.Volume{
+			Name: "chiaroot",
+			VolumeSource: corev1.VolumeSource{
+				PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+					ClaimName: fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name),
+				},
+			},
+		})
+	} else {
+		v = append(v, kube.GetExistingChiaRootVolume(datalayer.Spec.Storage))
+	}
+
+	// data_layer server files volume
+	// TODO finish this
+	if kube.ShouldMakeChiaRootVolumeClaim(datalayer.Spec.Storage) {
+		v = append(v, corev1.Volume{
+			Name: "server",
+			VolumeSource: corev1.VolumeSource{
+				PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+					ClaimName: fmt.Sprintf(chiadatalayerNamePattern, datalayer.Name),
+				},
+			},
+		})
+	} else {
+		v = append(v, kube.GetExistingChiaRootVolume(datalayer.Spec.Storage))
+	}
+
+	return v
+}
+
+// getChiaVolumeMounts retrieves the requisite volume mounts from the Chia config struct
+func getChiaVolumeMounts(datalayer k8schianetv1.ChiaDataLayer) []corev1.VolumeMount {
+	var v []corev1.VolumeMount
+
+	// secret ca volume
+	if datalayer.Spec.ChiaConfig.CASecretName != nil {
+		v = append(v, corev1.VolumeMount{
+			Name:      "secret-ca",
+			MountPath: "/chia-ca",
+		})
+	}
+
+	// key volume
+	v = append(v, corev1.VolumeMount{
+		Name:      "key",
+		MountPath: "/key",
+	})
+
+	// CHIA_ROOT volume
+	v = append(v, corev1.VolumeMount{
+		Name:      "chiaroot",
+		MountPath: "/chia-data",
+	})
+
+	// data_layer server files volume
+	v = append(v, corev1.VolumeMount{
+		Name:      "server",
+		MountPath: "/datalayer/server_files",
+	})
+
+	return v
+}
+
+// getChiaEnv retrieves the environment variables from the Chia config struct
+func getChiaEnv(ctx context.Context, datalayer k8schianetv1.ChiaDataLayer, networkData *map[string]string) ([]corev1.EnvVar, error) {
+	logr := log.FromContext(ctx)
+	var env []corev1.EnvVar
+
+	// service env var
+	env = append(env, corev1.EnvVar{
+		Name:  "service",
+		Value: "data",
+	})
+
+	// keys env var
+	env = append(env, corev1.EnvVar{
+		Name:  "keys",
+		Value: fmt.Sprintf("/key/%s", datalayer.Spec.ChiaConfig.SecretKey.Key),
+	})
+
+	env = append(env, corev1.EnvVar{
+		Name:  "chia.data_layer.server_files_location",
+		Value: "/datalayer/server_files",
+	})
+
+	// node peer env var
+	if datalayer.Spec.ChiaConfig.FullNodePeers != nil {
+		fnp, err := kube.MarshalFullNodePeers(*datalayer.Spec.ChiaConfig.FullNodePeers)
+		if err != nil {
+			logr.Error(err, "given full_node peers could not be marshaled to JSON, they may not appear in your chia configuration")
+		} else {
+			env = append(env, corev1.EnvVar{
+				Name:  "chia.wallet.full_node_peers",
+				Value: string(fnp),
+			})
+		}
+	}
+
+	// trusted_cidrs env var
+	if datalayer.Spec.ChiaConfig.TrustedCIDRs != nil {
+		// TODO should any special CIDR input checking happen here
+		cidrs, err := json.Marshal(*datalayer.Spec.ChiaConfig.TrustedCIDRs)
+		if err != nil {
+			logr.Error(err, "given CIDRs could not be marshalled to json. Peer connections that you would expect to be trusted might not be trusted.")
+		} else {
+			env = append(env, corev1.EnvVar{
+				Name:  "trusted_cidrs",
+				Value: string(cidrs),
+			})
+		}
+	}
+
+	// Add common env
+	commonEnv, err := kube.GetCommonChiaEnv(datalayer.Spec.ChiaConfig.CommonSpecChia, networkData)
+	if err != nil {
+		return env, err
+	}
+	env = append(env, commonEnv...)
+
+	return env, nil
+}
+
+// getChiaPorts returns the ports to a chia container
+func getChiaPorts() []corev1.ContainerPort {
+	return []corev1.ContainerPort{
+		{
+			Name:          "daemon",
+			ContainerPort: consts.DaemonPort,
+			Protocol:      "TCP",
+		},
+		{
+			Name:          "rpc",
+			ContainerPort: consts.DataLayerRPCPort,
+			Protocol:      "TCP",
+		},
+		{
+			Name:          "wallet-rpc",
+			ContainerPort: consts.WalletRPCPort,
+			Protocol:      "TCP",
+		},
+	}
+}
diff --git a/internal/controller/chiafarmer/controller.go b/internal/controller/chiafarmer/controller.go
index 6675e21..c0bec14 100644
--- a/internal/controller/chiafarmer/controller.go
+++ b/internal/controller/chiafarmer/controller.go
@@ -138,7 +138,7 @@ func (r *ChiaFarmerReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(farmer.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(farmer.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(farmer)
 		if err != nil {
 			r.Recorder.Event(&farmer, corev1.EventTypeWarning, "Failed", "Failed to assemble farmer PVC -- Check operator logs.")
diff --git a/internal/controller/chiafarmer/helpers.go b/internal/controller/chiafarmer/helpers.go
index d1bfb0f..6c34555 100644
--- a/internal/controller/chiafarmer/helpers.go
+++ b/internal/controller/chiafarmer/helpers.go
@@ -41,7 +41,7 @@ func getChiaVolumes(farmer k8schianetv1.ChiaFarmer) []corev1.Volume {
 	})
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(farmer.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(farmer.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiaharvester/controller.go b/internal/controller/chiaharvester/controller.go
index 8571a65..6a7a006 100644
--- a/internal/controller/chiaharvester/controller.go
+++ b/internal/controller/chiaharvester/controller.go
@@ -138,7 +138,7 @@ func (r *ChiaHarvesterReconciler) Reconcile(ctx context.Context, req ctrl.Reques
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(harvester.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(harvester.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(harvester)
 		if err != nil {
 			r.Recorder.Event(&harvester, corev1.EventTypeWarning, "Failed", "Failed to assemble harvester PVC -- Check operator logs.")
diff --git a/internal/controller/chiaharvester/helpers.go b/internal/controller/chiaharvester/helpers.go
index d30e883..53d8c78 100644
--- a/internal/controller/chiaharvester/helpers.go
+++ b/internal/controller/chiaharvester/helpers.go
@@ -30,7 +30,7 @@ func getChiaVolumes(harvester k8schianetv1.ChiaHarvester) []corev1.Volume {
 	})
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(harvester.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(harvester.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiaintroducer/controller.go b/internal/controller/chiaintroducer/controller.go
index 04c15fe..e8734ed 100644
--- a/internal/controller/chiaintroducer/controller.go
+++ b/internal/controller/chiaintroducer/controller.go
@@ -132,7 +132,7 @@ func (r *ChiaIntroducerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(introducer.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(introducer.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(introducer)
 		if err != nil {
 			r.Recorder.Event(&introducer, corev1.EventTypeWarning, "Failed", "Failed to assemble introducer PVC -- Check operator logs.")
diff --git a/internal/controller/chiaintroducer/helpers.go b/internal/controller/chiaintroducer/helpers.go
index d9a4fe1..0e822b9 100644
--- a/internal/controller/chiaintroducer/helpers.go
+++ b/internal/controller/chiaintroducer/helpers.go
@@ -30,7 +30,7 @@ func getChiaVolumes(introducer k8schianetv1.ChiaIntroducer) []corev1.Volume {
 	}
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(introducer.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(introducer.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiaseeder/controller.go b/internal/controller/chiaseeder/controller.go
index 1ca6023..a5341a1 100644
--- a/internal/controller/chiaseeder/controller.go
+++ b/internal/controller/chiaseeder/controller.go
@@ -174,7 +174,7 @@ func (r *ChiaSeederReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(seeder.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(seeder.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(seeder)
 		if err != nil {
 			r.Recorder.Event(&seeder, corev1.EventTypeWarning, "Failed", "Failed to assemble seeder PVC -- Check operator logs.")
diff --git a/internal/controller/chiaseeder/helpers.go b/internal/controller/chiaseeder/helpers.go
index f7ef8d0..1e63af0 100644
--- a/internal/controller/chiaseeder/helpers.go
+++ b/internal/controller/chiaseeder/helpers.go
@@ -32,7 +32,7 @@ func getChiaVolumes(seeder k8schianetv1.ChiaSeeder) []corev1.Volume {
 	}
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(seeder.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(seeder.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiatimelord/controller.go b/internal/controller/chiatimelord/controller.go
index 3347114..d023a06 100644
--- a/internal/controller/chiatimelord/controller.go
+++ b/internal/controller/chiatimelord/controller.go
@@ -153,7 +153,7 @@ func (r *ChiaTimelordReconciler) Reconcile(ctx context.Context, req ctrl.Request
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(timelord.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(timelord.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(timelord)
 		if err != nil {
 			r.Recorder.Event(&timelord, corev1.EventTypeWarning, "Failed", "Failed to assemble timelord PVC -- Check operator logs.")
diff --git a/internal/controller/chiatimelord/helpers.go b/internal/controller/chiatimelord/helpers.go
index aca6a3a..534a992 100644
--- a/internal/controller/chiatimelord/helpers.go
+++ b/internal/controller/chiatimelord/helpers.go
@@ -31,7 +31,7 @@ func getChiaVolumes(tl k8schianetv1.ChiaTimelord) []corev1.Volume {
 	})
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(tl.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(tl.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/chiawallet/controller.go b/internal/controller/chiawallet/controller.go
index 9672935..729b63a 100644
--- a/internal/controller/chiawallet/controller.go
+++ b/internal/controller/chiawallet/controller.go
@@ -138,7 +138,7 @@ func (r *ChiaWalletReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 	}
 
 	// Creates a persistent volume claim if the GenerateVolumeClaims setting was set to true
-	if kube.ShouldMakeVolumeClaim(wallet.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(wallet.Spec.Storage) {
 		pvc, err := assembleVolumeClaim(wallet)
 		if err != nil {
 			r.Recorder.Event(&wallet, corev1.EventTypeWarning, "Failed", "Failed to assemble wallet PVC -- Check operator logs.")
diff --git a/internal/controller/chiawallet/helpers.go b/internal/controller/chiawallet/helpers.go
index 616b7e7..b1ddf6e 100644
--- a/internal/controller/chiawallet/helpers.go
+++ b/internal/controller/chiawallet/helpers.go
@@ -63,7 +63,7 @@ func getChiaVolumes(wallet k8schianetv1.ChiaWallet) []corev1.Volume {
 	})
 
 	// CHIA_ROOT volume
-	if kube.ShouldMakeVolumeClaim(wallet.Spec.Storage) {
+	if kube.ShouldMakeChiaRootVolumeClaim(wallet.Spec.Storage) {
 		v = append(v, corev1.Volume{
 			Name: "chiaroot",
 			VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/common/consts/consts.go b/internal/controller/common/consts/consts.go
index 10a7966..657deb7 100644
--- a/internal/controller/common/consts/consts.go
+++ b/internal/controller/common/consts/consts.go
@@ -61,6 +61,12 @@ const (
 	// DaemonPort defines the port for the Chia daemon
 	DaemonPort = 55400
 
+	// DataLayerHTTPPort defines the port for the data_layer_http instances
+	DataLayerHTTPPort = 8575
+
+	// DataLayerRPCPort defines the port for the data_layer RPC
+	DataLayerRPCPort = 8562
+
 	// FarmerPort defines the port for farmer instances
 	FarmerPort = 8447
 
diff --git a/internal/controller/common/kube/helpers.go b/internal/controller/common/kube/helpers.go
index 3e834e0..cbfadbc 100644
--- a/internal/controller/common/kube/helpers.go
+++ b/internal/controller/common/kube/helpers.go
@@ -44,14 +44,22 @@ func CombineMaps(maps ...map[string]string) map[string]string {
 	return keyvalues
 }
 
-// ShouldMakeVolumeClaim returns true if the related PersistentVolumeClaim was configured to be made
-func ShouldMakeVolumeClaim(storage *k8schianetv1.StorageConfig) bool {
+// ShouldMakeChiaRootVolumeClaim returns true if the CHIA_ROOT PersistentVolumeClaim was configured to be made
+func ShouldMakeChiaRootVolumeClaim(storage *k8schianetv1.StorageConfig) bool {
 	if storage != nil && storage.ChiaRoot != nil && storage.ChiaRoot.PersistentVolumeClaim != nil && storage.ChiaRoot.PersistentVolumeClaim.GenerateVolumeClaims {
 		return storage.ChiaRoot.PersistentVolumeClaim.GenerateVolumeClaims
 	}
 	return false
 }
 
+// ShouldMakeDataLayerServerFilesVolumeClaim returns true if the server files PersistentVolumeClaim was configured to be made
+func ShouldMakeDataLayerServerFilesVolumeClaim(storage *k8schianetv1.StorageConfig) bool {
+	if storage != nil && storage.DataLayerServerFiles != nil && storage.DataLayerServerFiles.PersistentVolumeClaim != nil && storage.DataLayerServerFiles.PersistentVolumeClaim.GenerateVolumeClaims {
+		return storage.DataLayerServerFiles.PersistentVolumeClaim.GenerateVolumeClaims
+	}
+	return false
+}
+
 // ShouldMakeService returns true if the related Service was configured to be made, otherwise returns the specified default value
 func ShouldMakeService(srv k8schianetv1.Service, def bool) bool {
 	if srv.Enabled != nil {
@@ -130,7 +138,7 @@ func GetFullNodePort(chia k8schianetv1.CommonSpecChia, networkData *map[string]s
 // If both a PV and hostPath volume are specified for CHIA_ROOT, the PV will take precedence.
 // If both configs are empty, this will fall back to emptyDir so sidecars can mount CHIA_ROOT.
 // NOTE: This function does not handle the mode where the controller generates a CHIA_ROOT PVC, itself.
-// Therefore, if ShouldMakeVolumeClaim is true, specifying the PVC's name should be handled in the controller.
+// Therefore, if ShouldMakeChiaRootVolumeClaim is true, specifying the PVC's name should be handled in the controller.
 func GetExistingChiaRootVolume(storage *k8schianetv1.StorageConfig) corev1.Volume {
 	volumeName := "chiaroot"
 	if storage != nil && storage.ChiaRoot != nil {
diff --git a/internal/controller/common/kube/helpers_test.go b/internal/controller/common/kube/helpers_test.go
index 7bd594b..8593bb9 100644
--- a/internal/controller/common/kube/helpers_test.go
+++ b/internal/controller/common/kube/helpers_test.go
@@ -48,9 +48,9 @@ func TestCombineMaps(t *testing.T) {
 	require.Equal(t, expected, actual)
 }
 
-func TestShouldMakeVolumeClaim(t *testing.T) {
+func TestShouldMakeChiaRootVolumeClaim(t *testing.T) {
 	// True case
-	actual := ShouldMakeVolumeClaim(&k8schianetv1.StorageConfig{
+	actual := ShouldMakeChiaRootVolumeClaim(&k8schianetv1.StorageConfig{
 		ChiaRoot: &k8schianetv1.ChiaRootConfig{
 			PersistentVolumeClaim: &k8schianetv1.PersistentVolumeClaimConfig{
 				GenerateVolumeClaims: true,
@@ -60,17 +60,17 @@ func TestShouldMakeVolumeClaim(t *testing.T) {
 	require.Equal(t, true, actual, "expected should make volume claim")
 
 	// False case - nil storage config
-	actual = ShouldMakeVolumeClaim(nil)
+	actual = ShouldMakeChiaRootVolumeClaim(nil)
 	require.Equal(t, false, actual, "expected should not make volume claim for nil storage config")
 
 	// False case - non-nil storage config, nil ChiaRoot config
-	actual = ShouldMakeVolumeClaim(&k8schianetv1.StorageConfig{
+	actual = ShouldMakeChiaRootVolumeClaim(&k8schianetv1.StorageConfig{
 		ChiaRoot: nil,
 	})
 	require.Equal(t, false, actual, "expected should not make volume claim for nil ChiaRoot config")
 
 	// False case - non-nil storage config, nil PersistentVolumeClaim config
-	actual = ShouldMakeVolumeClaim(&k8schianetv1.StorageConfig{
+	actual = ShouldMakeChiaRootVolumeClaim(&k8schianetv1.StorageConfig{
 		ChiaRoot: &k8schianetv1.ChiaRootConfig{
 			PersistentVolumeClaim: nil,
 		},
@@ -78,7 +78,7 @@ func TestShouldMakeVolumeClaim(t *testing.T) {
 	require.Equal(t, false, actual, "expected should not make volume claim for nil PersistentVolumeClaim config")
 
 	// False case - non-nil storage config, GenerateVolumeClaims set to false
-	actual = ShouldMakeVolumeClaim(&k8schianetv1.StorageConfig{
+	actual = ShouldMakeChiaRootVolumeClaim(&k8schianetv1.StorageConfig{
 		ChiaRoot: &k8schianetv1.ChiaRootConfig{
 			PersistentVolumeClaim: &k8schianetv1.PersistentVolumeClaimConfig{
 				GenerateVolumeClaims: false,
diff --git a/internal/controller/common/kube/reconcilers.go b/internal/controller/common/kube/reconcilers.go
index c3e68b2..4d92fd3 100644
--- a/internal/controller/common/kube/reconcilers.go
+++ b/internal/controller/common/kube/reconcilers.go
@@ -256,7 +256,7 @@ func ReconcileStatefulset(ctx context.Context, c client.Client, desired appsv1.S
 // ReconcilePersistentVolumeClaim uses the controller-runtime client to determine if the PVC resource needs to be created or updated
 func ReconcilePersistentVolumeClaim(ctx context.Context, c client.Client, storage *k8schianetv1.StorageConfig, desired corev1.PersistentVolumeClaim) (reconcile.Result, error) {
 	klog := log.FromContext(ctx).WithValues("PersistentVolumeClaim.Namespace", desired.Namespace, "PersistentVolumeClaim.Name", desired.Name)
-	ensurePVCExists := ShouldMakeVolumeClaim(storage)
+	ensurePVCExists := ShouldMakeChiaRootVolumeClaim(storage)
 
 	// Get existing PVC
 	var current corev1.PersistentVolumeClaim
diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go
index b995f3f..7ced49c 100644
--- a/internal/metrics/metrics.go
+++ b/internal/metrics/metrics.go
@@ -22,6 +22,14 @@ var (
 		},
 	)
 
+	// ChiaDataLayers is a gauge metric that keeps a running total of deployed ChiaDataLayers
+	ChiaDataLayers = prometheus.NewGauge(
+		prometheus.GaugeOpts{
+			Name: "chia_operator_chiadatalayer_total",
+			Help: "Number of ChiaDataLayers objects controlled by this operator",
+		},
+	)
+
 	// ChiaFarmers is a gauge metric that keeps a running total of deployed ChiaFarmers
 	ChiaFarmers = prometheus.NewGauge(
 		prometheus.GaugeOpts{
@@ -91,6 +99,7 @@ func init() {
 	metrics.Registry.MustRegister(
 		ChiaCAs,
 		ChiaCrawlers,
+		ChiaDataLayers,
 		ChiaFarmers,
 		ChiaHarvesters,
 		ChiaIntroducers,