diff --git a/.gitignore b/.gitignore index ca07212..dca6b17 100644 --- a/.gitignore +++ b/.gitignore @@ -965,3 +965,5 @@ FodyWeavers.xsd # Additional files built by Visual Studio # End of https://www.toptal.com/developers/gitignore/api/vim,node,data,emacs,python,pycharm,executable,sublimetext,visualstudio,visualstudiocode +traefik +frigate/config diff --git a/active b/active new file mode 100644 index 0000000..19f9f9e --- /dev/null +++ b/active @@ -0,0 +1,6 @@ +traefik +portainer +pihole +librespeed +syncthing +home-gallery diff --git a/argo/install.yaml b/argo/install.yaml new file mode 100644 index 0000000..35d3c10 --- /dev/null +++ b/argo/install.yaml @@ -0,0 +1,1352 @@ +# This is an auto-generated file. DO NOT EDIT +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowartifactgctasks.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowArtifactGCTask + listKind: WorkflowArtifactGCTaskList + plural: workflowartifactgctasks + shortNames: + - wfat + singular: workflowartifactgctask + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the workflow + jsonPath: .status.phase + name: Status + type: string + - description: When the workflow was started + format: date-time + jsonPath: .status.startedAt + name: Age + type: date + - description: Human readable message indicating details about why the workflow + is in this condition. + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtaskresults.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbConfigConfigMap: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbKeytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + clientKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + clientSecretSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtasksets.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: argo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-server + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-role + namespace: argo +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-server-cluster-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - watch + - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows + - clusterworkflowtemplates + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-binding + namespace: argo +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-cluster-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap + namespace: argo +--- +apiVersion: v1 +kind: Service +metadata: + name: argo-server + namespace: argo +spec: + ports: + - name: web + port: 2746 + targetPort: 2746 + selector: + app: argo-server +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: workflow-controller +value: 1000000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-server + namespace: argo +spec: + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - args: + - server + env: [] + image: quay.io/argoproj/argocli:v3.5.1 + name: argo-server + ports: + - containerPort: 2746 + name: web + readinessProbe: + httpGet: + path: / + port: 2746 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 20 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + volumeMounts: + - mountPath: /tmp + name: tmp + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + serviceAccountName: argo-server + volumes: + - emptyDir: {} + name: tmp +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: workflow-controller + namespace: argo +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: [] + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: quay.io/argoproj/workflow-controller:v3.5.1 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo diff --git a/authelia/configmap-inspector.yml b/authelia/configmap-inspector.yml new file mode 100644 index 0000000..ea038da --- /dev/null +++ b/authelia/configmap-inspector.yml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: configmap-inspector + namespace: authelia +spec: + containers: + - image: registry.wayl.one/devtainer:slim + name: pvc-inspector + command: ["sleep", "300"] + + volumeMounts: + - mountPath: /configmap + name: configmap + - mountPath: /config + name: config + volumes: + - name: configmap + projected: + sources: + - configMap: + name: configuration + - configMap: + name: users + + - name: config + persistentVolumeClaim: + claimName: config + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: config + name: config + namespace: authelia +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} diff --git a/authelia/deployment.yaml b/authelia/deployment.yaml new file mode 100644 index 0000000..ca63ed3 --- /dev/null +++ b/authelia/deployment.yaml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n authelia + kompose.service.expose: auth.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: authelia + name: authelia + namespace: authelia +spec: + ports: + - name: "9091" + port: 9091 + targetPort: 9091 + - name: 9091-tcp + port: 9091 + targetPort: 9091 + selector: + io.kompose.service: authelia +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: authelia + namespace: authelia +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n authelia + kompose.service.expose: auth.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: authelia + name: authelia + namespace: authelia +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: authelia + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n authelia + kompose.service.expose: auth.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/authelia-default: "true" + io.kompose.service: authelia + spec: + containers: + - env: + - name: TZ + value: America/Chicago + image: authelia/authelia + name: authelia + ports: + - containerPort: 9091 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /config + name: config + restartPolicy: Always + volumes: + - name: config + persistentVolumeClaim: + claimName: config +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n authelia + kompose.service.expose: auth.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: authelia + name: authelia + namespace: authelia +spec: + rules: + - host: auth.wayl.one + http: + paths: + - backend: + service: + name: authelia + port: + number: 9091 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: config + name: config + namespace: authelia +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/authelia/docker-compose.yml b/authelia/docker-compose.yml new file mode 100644 index 0000000..70db740 --- /dev/null +++ b/authelia/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3" +services: + authelia: + image: authelia/authelia + container_name: authelia + volumes: + - config:/config + ports: + - 9091 + + expose: + - 9091 + environment: + - TZ=America/Chicago + healthcheck: + disable: true + labels: + kompose.service.expose: auth.wayl.one + # kompose.volume.type: configMap +volumes: + config: diff --git a/authelia/justfile b/authelia/justfile new file mode 100644 index 0000000..c63981a --- /dev/null +++ b/authelia/justfile @@ -0,0 +1,31 @@ +default: cred convert deploy viz +update: convert patch + +create-ns: + kubectl create ns authelia +cred: + kubectl get secret -n default regcred --output=yaml -o yaml | sed 's/namespace: default/namespace: authelia/' | kubectl apply -n authelia -f - && echo deployed secret || echo secret exists +convert: + kompose convert -o deployment.yaml -n authelia +deploy: + kubectl apply -f deployment.yaml +delete: + kubectl delete all --all -n authelia --timeout=0s +viz: + k8sviz -n authelia --kubeconfig $KUBECONFIG -t png -o authelia-k8s.png +restart: + kubectl rollout restart -n authelia deployment/authelia + +patch: + kubectl patch -f deployment.yaml +describe: + kubectl get deployment -n authelia + kubectl get rs -n authelia + kubectl get pod -n authelia + kubectl get svc -n authelia + kubectl get ing -n authelia + +describe-pod: + kubectl describe pod -n authelia +logs: + kubectl logs --all-containers -l io.kompose.service=authelia-wayl-one -n authelia -f diff --git a/authelia/middleware.yaml b/authelia/middleware.yaml new file mode 100644 index 0000000..c8e52d5 --- /dev/null +++ b/authelia/middleware.yaml @@ -0,0 +1,16 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: forwardauth-authelia + namespace: authelia + labels: + app.kubernetes.io/instance: authelia + app.kubernetes.io/name: authelia +spec: + forwardAuth: + address: http://authelia.authelia.svc/api/verify?rd=https%3A%2F%2Fwww.wayl.one%2F + authResponseHeaders: + - Remote-User + - Remote-Name + - Remote-Email + - Remote-Groups diff --git a/authelia/users_configmap.yaml b/authelia/users_configmap.yaml new file mode 100644 index 0000000..9d4e579 --- /dev/null +++ b/authelia/users_configmap.yaml @@ -0,0 +1,1471 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: users + namespace: authelia +data: + users_database.yml: |- + # yamllint disable rule:line-length + ############################################################### + # Users Database # + ############################################################### + + # This file can be used if you do not have an LDAP set up. + + users: + waylon: + disabled: false + displayname: "Waylon Walker" + password: "$argon2id$v=19$m=65536,t=3,p=4$9nYBYzxWFE9V3dMebQDNQg$IUo96X9RP1faswrTIjOBnPnRrG0NXFrslPHjka51g6w" + email: waylon@waylonwalker.com + groups: + - admins + - dev + # yamllint enable rule:line-length + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: configuration + namespace: authelia +data: + configuration.yml: |- + # yamllint disable rule:comments-indentation + ############################################################################### + # Authelia Configuration # + ############################################################################### + + ## Note: the container by default expects to find this file at /config/configuration.yml. + + ## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to + ## the system certificates store. + ## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem. + # certificates_directory: /config/certificates/ + + ## The theme to display: light, dark, grey, auto. + theme: dark + + ## The secret used to generate JWT tokens when validating user identity by email confirmation. JWT Secret can also be + ## set using a secret: https://www.authelia.com/c/secrets + jwt_secret: a_very_important_secret + + ## Default redirection URL + ## + ## If user tries to authenticate without any referer, Authelia does not know where to redirect the user to at the end + ## of the authentication process. This parameter allows you to specify the default redirection URL Authelia will use + ## in such a case. + ## + ## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication. + default_redirection_url: https://auth.wayl.one/ + + ## Set the default 2FA method for new users and for when a user has a preferred method configured that has been + ## disabled. This setting must be a method that is enabled. + ## Options are totp, webauthn, mobile_push. + default_2fa_method: "totp" + + ## + ## Server Configuration + ## + server: + ## The address to listen on. + host: 0.0.0.0 + + ## The port to listen on. + port: 9091 + + ## Set the single level path Authelia listens on. + ## Must be alphanumeric chars and should not contain any slashes. + path: "" + + ## Set the path on disk to Authelia assets. + ## Useful to allow overriding of specific static assets. + # asset_path: /config/assets/ + + ## Enables the pprof endpoint. + enable_pprof: false + + ## Enables the expvars endpoint. + enable_expvars: false + + ## Disables writing the health check vars to /app/.healthcheck.env which makes healthcheck.sh return exit code 0. + ## This is disabled by default if either /app/.healthcheck.env or /app/healthcheck.sh do not exist. + disable_healthcheck: false + + ## Authelia by default doesn't accept TLS communication on the server port. This section overrides this behaviour. + tls: + ## The path to the DER base64/PEM format private key. + key: "" + + ## The path to the DER base64/PEM format public certificate. + certificate: "" + + ## The list of certificates for client authentication. + client_certificates: [] + + ## Server headers configuration/customization. + headers: + ## The CSP Template. Read the docs. + csp_template: "" + + ## Server Buffers configuration. + # buffers: + + ## Buffers usually should be configured to be the same value. + ## Explanation at https://www.authelia.com/c/server#buffer-sizes + ## Read buffer size adjusts the server's max incoming request size in bytes. + ## Write buffer size does the same for outgoing responses. + + ## Read buffer. + # read: 4096 + + ## Write buffer. + # write: 4096 + + ## Server Timeouts configuration. + # timeouts: + + ## Read timeout. + # read: 6s + + ## Write timeout. + # write: 6s + + ## Idle timeout. + # idle: 30s + + ## + ## Log Configuration + ## + log: + ## Level of verbosity for logs: info, debug, trace. + level: debug + + ## Format the logs are written as: json, text. + # format: json + + ## File path where the logs will be written. If not set logs are written to stdout. + # file_path: /config/authelia.log + + ## Whether to also log to stdout when a log_file_path is defined. + # keep_stdout: false + + ## + ## Telemetry Configuration + ## + telemetry: + ## + ## Metrics Configuration + ## + metrics: + ## Enable Metrics. + enabled: false + + ## The address to listen on for metrics. This should be on a different port to the main server.port value. + address: tcp://0.0.0.0:9959 + + ## Metrics Server Buffers configuration. + # buffers: + + ## Read buffer. + # read: 4096 + + ## Write buffer. + # write: 4096 + + ## Metrics Server Timeouts configuration. + # timeouts: + + ## Read timeout. + # read: 6s + + ## Write timeout. + # write: 6s + + ## Idle timeout. + # idle: 30s + + ## + ## TOTP Configuration + ## + ## Parameters used for TOTP generation. + totp: + ## Disable TOTP. + disable: false + + ## The issuer name displayed in the Authenticator application of your choice. + issuer: authelia.com + + ## The TOTP algorithm to use. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/c/totp#algorithm + algorithm: sha1 + + ## The number of digits a user has to input. Must either be 6 or 8. + ## Changing this option only affects newly generated TOTP configurations. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/c/totp#digits + digits: 6 + + ## The period in seconds a one-time password is valid for. + ## Changing this option only affects newly generated TOTP configurations. + period: 30 + + ## The skew controls number of one-time passwords either side of the current one that are valid. + ## Warning: before changing skew read the docs link below. + skew: 1 + ## See: https://www.authelia.com/c/totp#input-validation to read + ## the documentation. + + ## The size of the generated shared secrets. Default is 32 and is sufficient in most use cases, minimum is 20. + secret_size: 32 + + ## + ## WebAuthn Configuration + ## + ## Parameters used for WebAuthn. + webauthn: + ## Disable Webauthn. + disable: false + + ## Adjust the interaction timeout for Webauthn dialogues. + timeout: 60s + + ## The display name the browser should show the user for when using Webauthn to login/register. + display_name: Authelia + + ## Conveyance preference controls if we collect the attestation statement including the AAGUID from the device. + ## Options are none, indirect, direct. + attestation_conveyance_preference: indirect + + ## User verification controls if the user must make a gesture or action to confirm they are present. + ## Options are required, preferred, discouraged. + user_verification: preferred + + ## + ## Duo Push API Configuration + ## + ## Parameters used to contact the Duo API. Those are generated when you protect an application of type + ## "Partner Auth API" in the management panel. + # duo_api: + # disable: false + # hostname: api-123456789.example.com + # integration_key: ABCDEF + ## Secret can also be set using a secret: https://www.authelia.com/c/secrets + # secret_key: 1234567890abcdefghifjkl + # enable_self_enrollment: false + + ## + ## NTP Configuration + ## + ## This is used to validate the servers time is accurate enough to validate TOTP. + ntp: + ## NTP server address. + address: "time.cloudflare.com:123" + + ## NTP version. + version: 4 + + ## Maximum allowed time offset between the host and the NTP server. + max_desync: 3s + + ## Disables the NTP check on startup entirely. This means Authelia will not contact a remote service at all if you + ## set this to true, and can operate in a truly offline mode. + disable_startup_check: false + + ## The default of false will prevent startup only if we can contact the NTP server and the time is out of sync with + ## the NTP server more than the configured max_desync. If you set this to true, an error will be logged but startup + ## will continue regardless of results. + disable_failure: false + + ## + ## Authentication Backend Provider Configuration + ## + ## Used for verifying user passwords and retrieve information such as email address and groups users belong to. + ## + ## The available providers are: `file`, `ldap`. You must use only one of these providers. + authentication_backend: + ## Password Reset Options. + password_reset: + ## Disable both the HTML element and the API for reset password functionality. + disable: false + + ## External reset password url that redirects the user to an external reset portal. This disables the internal reset + ## functionality. + custom_url: "" + + file: + path: /config/users_database.yml + watch: false + search: + email: false + case_insensitive: false + password: + algorithm: argon2 + argon2: + variant: argon2id + iterations: 3 + memory: 65536 + parallelism: 4 + key_length: 32 + salt_length: 16 + scrypt: + iterations: 16 + block_size: 8 + parallelism: 1 + key_length: 32 + salt_length: 16 + pbkdf2: + variant: sha512 + iterations: 310000 + salt_length: 16 + sha2crypt: + variant: sha512 + iterations: 50000 + salt_length: 16 + bcrypt: + variant: standard + cost: 12 + + ## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation. + ## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will + ## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP. + ## To force update on every request you can set this to '0' or 'always', this will increase processor demand. + ## See the below documentation for more information. + ## Duration Notation docs: https://www.authelia.com/c/common#duration-notation-format + ## Refresh Interval docs: https://www.authelia.com/c/1fa#refresh-interval + refresh_interval: 5m + + ## + ## LDAP (Authentication Provider) + ## + ## This is the recommended Authentication Provider in production + ## because it allows Authelia to offload the stateful operations + ## onto the LDAP service. + # ldap: + ## The LDAP implementation, this affects elements like the attribute utilised for resetting a password. + ## Acceptable options are as follows: + ## - 'activedirectory' - For Microsoft Active Directory. + ## - 'custom' - For custom specifications of attributes and filters. + ## This currently defaults to 'custom' to maintain existing behaviour. + ## + ## Depending on the option here certain other values in this section have a default value, notably all of the + ## attribute mappings have a default value that this config overrides, you can read more about these default values + ## at https://www.authelia.com/c/ldap#defaults + # implementation: custom + + ## The url to the ldap server. Format: ://
[:]. + ## Scheme can be ldap or ldaps in the format (port optional). + # url: ldap://127.0.0.1 + + ## The dial timeout for LDAP. + # timeout: 5s + + ## Use StartTLS with the LDAP connection. + # start_tls: false + + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host portion of the url option. + # server_name: ldap.example.com + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: TLS1.2 + + ## Maximum TLS version for the connection. + # maximum_version: TLS1.3 + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + + ## The distinguished name of the container searched for objects in the directory information tree. + ## See also: additional_users_dn, additional_groups_dn. + # base_dn: dc=example,dc=com + + ## The attribute holding the username of the user. This attribute is used to populate the username in the session + ## information. For your information, Microsoft Active Directory usually uses 'sAMAccountName' and OpenLDAP usually + ## uses 'uid'. Beware that this attribute holds the unique identifiers for the users binding the user and the + ## configuration stored in database. Therefore only single value attributes are allowed and the value must never be + ## changed once attributed to a user otherwise it would break the configuration for that user. Technically, + ## non-unique attributes like 'mail' can also be used but we don't recommend using them, we instead advise to use + ## a filter to perform alternative lookups and the attributes mentioned above (sAMAccountName and uid) to + ## follow https://www.ietf.org/rfc/rfc2307.txt. + # username_attribute: uid + + ## The additional_users_dn is prefixed to base_dn and delimited by a comma when searching for users. + ## i.e. with this set to OU=Users and base_dn set to DC=a,DC=com; OU=Users,DC=a,DC=com is searched for users. + # additional_users_dn: ou=users + + ## The users filter used in search queries to find the user profile based on input filled in login form. + ## Various placeholders are available in the user filter which you can read about in the documentation which can + ## be found at: https://www.authelia.com/c/ldap#users-filter-replacements + ## + ## Recommended settings are as follows: + ## - Microsoft Active Directory: (&({username_attribute}={input})(objectCategory=person)(objectClass=user)) + ## - OpenLDAP: + ## - (&({username_attribute}={input})(objectClass=person)) + ## - (&({username_attribute}={input})(objectClass=inetOrgPerson)) + ## + ## To allow sign in both with username and email, one can use a filter like + ## (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person)) + # users_filter: (&({username_attribute}={input})(objectClass=person)) + + ## The additional_groups_dn is prefixed to base_dn and delimited by a comma when searching for groups. + ## i.e. with this set to OU=Groups and base_dn set to DC=a,DC=com; OU=Groups,DC=a,DC=com is searched for groups. + # additional_groups_dn: ou=groups + + ## The groups filter used in search queries to find the groups based on relevant authenticated user. + ## Various placeholders are available in the groups filter which you can read about in the documentation which can + ## be found at: https://www.authelia.com/c/ldap#groups-filter-replacements + ## + ## If your groups use the `groupOfUniqueNames` structure use this instead: + ## (&(uniqueMember={dn})(objectClass=groupOfUniqueNames)) + # groups_filter: (&(member={dn})(objectClass=groupOfNames)) + + ## The attribute holding the name of the group. + # group_name_attribute: cn + + ## The attribute holding the mail address of the user. If multiple email addresses are defined for a user, only the + ## first one returned by the LDAP server is used. + # mail_attribute: mail + + ## The attribute holding the display name of the user. This will be used to greet an authenticated user. + # display_name_attribute: displayName + + ## Follow referrals returned by the server. + ## This is especially useful for environments where read-only servers exist. Only implemented for write operations. + # permit_referrals: false + + ## The username and password of the admin user. + # user: cn=admin,dc=example,dc=com + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: password + + ## + ## File (Authentication Provider) + ## + ## With this backend, the users database is stored in a file which is updated when users reset their passwords. + ## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia + ## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security + ## implications it is highly recommended you leave the default values. Before considering changing these settings + ## please read the docs page below: + ## https://www.authelia.com/r/passwords#tuning + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + + ## + ## Password Policy Configuration. + ## + password_policy: + ## The standard policy allows you to tune individual settings manually. + standard: + enabled: false + + ## Require a minimum length for passwords. + min_length: 8 + + ## Require a maximum length for passwords. + max_length: 0 + + ## Require uppercase characters. + require_uppercase: true + + ## Require lowercase characters. + require_lowercase: true + + ## Require numeric characters. + require_number: true + + ## Require special characters. + require_special: true + + ## zxcvbn is a well known and used password strength algorithm. It does not have tunable settings. + zxcvbn: + enabled: false + + ## Configures the minimum score allowed. + min_score: 3 + + ## + ## Access Control Configuration + ## + ## Access control is a list of rules defining the authorizations applied for one resource to users or group of users. + ## + ## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed + ## to anyone. Otherwise restrictions follow the rules defined. + ## + ## Note: One can use the wildcard * to match any subdomain. + ## It must stand at the beginning of the pattern. (example: *.example.com) + ## + ## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct. + ## + ## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'. + ## + ## - 'domain' defines which domain or set of domains the rule applies to. + ## + ## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not + ## provided. If provided, the parameter represents either a user or a group. It should be of the form + ## 'user:' or 'group:'. + ## + ## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'. + ## + ## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter + ## is optional and matches any resource if not provided. + ## + ## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies. + access_control: + ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any + ## resource if there is no policy to be applied to the user. + default_policy: deny + rules: + ## Rules applied to everyone + - domain: "public.wayl.one" + policy: bypass + - domain: "secure.wayl.one" + policy: one_factor + - domain: "internal.wayl.one" + policy: two_factor + + # networks: + # - name: internal + # networks: + # - 10.10.0.0/16 + # - 192.168.2.0/24 + # - name: VPN + # networks: 10.9.0.0/16 + + # rules: + ## Rules applied to everyone + # - domain: 'public.example.com' + # policy: bypass + + ## Domain Regex examples. Generally we recommend just using a standard domain. + # - domain_regex: '^(?P\w+)\.example\.com$' + # policy: one_factor + # - domain_regex: '^(?P\w+)\.example\.com$' + # policy: one_factor + # - domain_regex: + # - '^appgroup-.*\.example\.com$' + # - '^appgroup2-.*\.example\.com$' + # policy: one_factor + # - domain_regex: '^.*\.example\.com$' + # policy: two_factor + + # - domain: 'secure.example.com' + # policy: one_factor + ## Network based rule, if not provided any network matches. + # networks: + # - internal + # - VPN + # - 192.168.1.0/24 + # - 10.0.0.1 + + # - domain: + # - 'secure.example.com' + # - 'private.example.com' + # policy: two_factor + + # - domain: 'singlefactor.example.com' + # policy: one_factor + + ## Rules applied to 'admins' group + # - domain: 'mx2.mail.example.com' + # subject: 'group:admins' + # policy: deny + + # - domain: '*.example.com' + # subject: + # - 'group:admins' + # - 'group:moderators' + # policy: two_factor + + ## Rules applied to 'dev' group + # - domain: 'dev.example.com' + # resources: + # - '^/groups/dev/.*$' + # subject: 'group:dev' + # policy: two_factor + + ## Rules applied to user 'john' + # - domain: 'dev.example.com' + # resources: + # - '^/users/john/.*$' + # subject: 'user:john' + # policy: two_factor + + ## Rules applied to user 'harry' + # - domain: 'dev.example.com' + # resources: + # - '^/users/harry/.*$' + # subject: 'user:harry' + # policy: two_factor + + ## Rules applied to user 'bob' + # - domain: '*.mail.example.com' + # subject: 'user:bob' + # policy: two_factor + # - domain: 'dev.example.com' + # resources: + # - '^/users/bob/.*$' + # subject: 'user:bob' + # policy: two_factor + + ## + ## Session Provider Configuration + ## + ## The session cookies identify the user once logged in. + ## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined. + session: + ## The name of the session cookie. + name: authelia_session + + ## The domain to protect. + ## Note: the authenticator must also be in that domain. + ## If empty, the cookie is restricted to the subdomain of the issuer. + domain: wayl.one + + ## Sets the Cookie SameSite value. Possible options are none, lax, or strict. + ## Please read https://www.authelia.com/c/session#same_site + same_site: lax + + ## The secret to encrypt the session data. This is only used with Redis / Redis Sentinel. + ## Secret can also be set using a secret: https://www.authelia.com/c/secrets + secret: insecure_session_secret + + ## The value for expiration, inactivity, and remember_me_duration are in seconds or the duration notation format. + ## See: https://www.authelia.com/c/common#duration-notation-format + ## All three of these values affect the cookie/session validity period. Longer periods are considered less secure + ## because a stolen cookie will last longer giving attackers more time to spy or attack. + + ## The time before the cookie expires and the session is destroyed if remember me IS NOT selected. + expiration: 1h + + ## The inactivity time before the session is reset. If expiration is set to 1h, and this is set to 5m, if the user + ## does not select the remember me option their session will get destroyed after 1h, or after 5m since the last time + ## Authelia detected user activity. + inactivity: 5m + + ## The time before the cookie expires and the session is destroyed if remember me IS selected. + ## Value of -1 disables remember me. + remember_me_duration: 1M + + ## + ## Redis Provider + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + # redis: + # host: 127.0.0.1 + # port: 6379 + ## Use a unix socket instead + # host: /var/run/redis/redis.sock + + ## Username used for redis authentication. This is optional and a new feature in redis 6.0. + # username: authelia + + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: authelia + + ## This is the Redis DB Index https://redis.io/commands/select (sometimes referred to as database number, DB, etc). + # database_index: 0 + + ## The maximum number of concurrent active connections to Redis. + # maximum_active_connections: 8 + + ## The target number of idle connections to have open ready for work. Useful when opening connections is slow. + # minimum_idle_connections: 0 + + ## The Redis TLS configuration. If defined will require a TLS connection to the Redis instance(s). + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host option. + # server_name: myredis.example.com + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: TLS1.2 + + ## Maximum TLS version for the connection. + # maximum_version: TLS1.3 + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + + ## The Redis HA configuration options. + ## This provides specific options to Redis Sentinel, sentinel_name must be defined (Master Name). + # high_availability: + ## Sentinel Name / Master Name. + # sentinel_name: mysentinel + + ## Specific username for Redis Sentinel. The node username and password is configured above. + # sentinel_username: sentinel_specific_user + + ## Specific password for Redis Sentinel. The node username and password is configured above. + # sentinel_password: sentinel_specific_pass + + ## The additional nodes to pre-seed the redis provider with (for sentinel). + ## If the host in the above section is defined, it will be combined with this list to connect to sentinel. + ## For high availability to be used you must have either defined; the host above or at least one node below. + # nodes: + # - host: sentinel-node1 + # port: 6379 + # - host: sentinel-node2 + # port: 6379 + + ## Choose the host with the lowest latency. + # route_by_latency: false + + ## Choose the host randomly. + # route_randomly: false + + ## + ## Regulation Configuration + ## + ## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are made + ## in a short period of time. + regulation: + ## The number of failed login attempts before user is banned. Set it to 0 to disable regulation. + max_retries: 3 + + ## The time range during which the user can attempt login before being banned. The user is banned if the + ## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation. + ## See: https://www.authelia.com/c/common#duration-notation-format + find_time: 2m + + ## The length of time before a banned user can login again. Ban Time accepts duration notation. + ## See: https://www.authelia.com/c/common#duration-notation-format + ban_time: 5m + + ## + ## Storage Provider Configuration + ## + ## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers. + storage: + ## The encryption key that is used to encrypt sensitive information in the database. Must be a string with a minimum + ## length of 20. Please see the docs if you configure this with an undesirable key and need to change it, you MUST use + ## the CLI to change this in the database if you want to change it from a previously configured value. + encryption_key: you_must_generate_a_random_string_of_more_than_twenty_chars_and_configure_this + + ## + ## Local (Storage Provider) + ## + ## This stores the data in a SQLite3 Database. + ## This is only recommended for lightweight non-stateful installations. + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + local: + # Path to the SQLite3 Database. + path: /config/db.sqlite3 + + ## + ## MySQL / MariaDB (Storage Provider) + ## + # mysql: + # host: 127.0.0.1 + # port: 3306 + # database: authelia + # username: authelia + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: mypassword + # timeout: 5s + + ## MySQL TLS settings. Configuring this requires TLS. + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host option. + # server_name: mysql.example.com + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: TLS1.2 + + ## Maximum TLS version for the connection. + # maximum_version: TLS1.3 + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + + ## + ## PostgreSQL (Storage Provider) + ## + # postgres: + # host: 127.0.0.1 + # port: 5432 + # database: authelia + # schema: public + # username: authelia + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: mypassword + # timeout: 5s + + ## PostgreSQL TLS settings. Configuring this requires TLS. + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host option. + # server_name: postgres.example.com + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: TLS1.2 + + ## Maximum TLS version for the connection. + # maximum_version: TLS1.3 + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + + ## + ## Notification Provider + ## + ## Notifications are sent to users when they require a password reset, a Webauthn registration or a TOTP registration. + ## The available providers are: filesystem, smtp. You must use only one of these providers. + notifier: + ## You can disable the notifier startup check by setting this to true. + disable_startup_check: false + + ## + ## File System (Notification Provider) + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + filesystem: + filename: /config/notification.txt + + ## + ## SMTP (Notification Provider) + ## + ## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate. + ## [Security] By default Authelia will: + ## - force all SMTP connections over TLS including unauthenticated connections + ## - use the disable_require_tls boolean value to disable this requirement + ## (only works for unauthenticated connections) + ## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates + ## (configure in tls section) + # smtp: + ## The SMTP host to connect to. + # host: 127.0.0.1 + + ## The port to connect to the SMTP host on. + # port: 1025 + + ## The connection timeout. + # timeout: 5s + + ## The username used for SMTP authentication. + # username: test + + ## The password used for SMTP authentication. + ## Can also be set using a secret: https://www.authelia.com/c/secrets + # password: password + + ## The sender is used to is used for the MAIL FROM command and the FROM header. + ## If this is not defined and the username is an email, we use the username as this value. This can either be just + ## an email address or the RFC5322 'Name ' format. + # sender: "Authelia " + + ## HELO/EHLO Identifier. Some SMTP Servers may reject the default of localhost. + # identifier: localhost + + ## Subject configuration of the emails sent. {title} is replaced by the text from the notifier. + # subject: "[Authelia] {title}" + + ## This address is used during the startup check to verify the email configuration is correct. + ## It's not important what it is except if your email server only allows local delivery. + # startup_check_address: test@authelia.com + + ## By default we require some form of TLS. This disables this check though is not advised. + # disable_require_tls: false + + ## Disables sending HTML formatted emails. + # disable_html_emails: false + + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host option. + # server_name: smtp.example.com + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: TLS1.2 + + ## Maximum TLS version for the connection. + # maximum_version: TLS1.3 + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + ## + ## Identity Providers + ## + # identity_providers: + + ## + ## OpenID Connect (Identity Provider) + ## + ## It's recommended you read the documentation before configuration of this section: + ## https://www.authelia.com/c/oidc + # oidc: + ## The hmac_secret is used to sign OAuth2 tokens (authorization code, access tokens and refresh tokens). + ## HMAC Secret can also be set using a secret: https://www.authelia.com/c/secrets + # hmac_secret: this_is_a_secret_abc123abc123abc + + ## The issuer_certificate_chain is an optional PEM encoded certificate chain. It's used in conjunction with the + ## issuer_private_key to sign JWT's. All certificates in the chain must be within the validity period, and every + ## certificate included must be signed by the certificate immediately after it if provided. + # issuer_certificate_chain: | + # -----BEGIN CERTIFICATE----- + # MIIC5jCCAc6gAwIBAgIRAK4Sj7FiN6PXo/urPfO4E7owDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAPKv3pSyP4ozGEiVLJ14dIWFCEGEgq7WUMI0SZZqQA2ID0L59U/Q + # /Usyy7uC9gfMUzODTpANtkOjFQcQAsxlR1FOjVBrX5QgjSvXwbQn3DtwMA7XWSl6 + # LuYx2rBYSlMSN5UZQm/RxMtXfLK2b51WgEEYDFi+nECSqKzR4R54eOPkBEWRfvuY + # 91AMjlhpivg8e4JWkq4LVQUKbmiFYwIdK8XQiN4blY9WwXwJFYs5sQ/UYMwBFi0H + # kWOh7GEjfxgoUOPauIueZSMSlQp7zqAH39N0ZSYb6cS0Npj57QoWZSY3ak87ebcR + # Nf4rCvZLby7LoN7qYCKxmCaDD3x2+NYpWH8CAwEAAaM1MDMwDgYDVR0PAQH/BAQD + # AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcN + # AQELBQADggEBAHSITqIQSNzonFl3DzxHPEzr2hp6peo45buAAtu8FZHoA+U7Icfh + # /ZXjPg7Xz+hgFwM/DTNGXkMWacQA/PaNWvZspgRJf2AXvNbMSs2UQODr7Tbv+Fb4 + # lyblmMUNYFMCFVAMU0eIxXAFq2qcwv8UMcQFT0Z/35s6PVOakYnAGGQjTfp5Ljuq + # wsdc/xWmM0cHWube6sdRRUD7SY20KU/kWzl8iFO0VbSSrDf1AlEhnLEkp1SPaxXg + # OdBnl98MeoramNiJ7NT6Jnyb3zZ578fjaWfThiBpagItI8GZmG4s4Ovh2JbheN8i + # ZsjNr9jqHTjhyLVbDRlmJzcqoj4JhbKs6/I^invalid DO NOT USE= + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # MIIDBDCCAeygAwIBAgIRALJsPg21kA0zY4F1wUCIuoMwDQYJKoZIhvcNAQELBQAw + # EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNNzAwMTAxMDAwMDAwWhcNNzEwMTAxMDAw + # MDAwWjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP + # ADCCAQoCggEBAMXHBvVxUzYk0u34/DINMSF+uiOekKOAjOrC6Mi9Ww8ytPVO7t2S + # zfTvM+XnEJqkFQFgimERfG/eGhjF9XIEY6LtnXe8ATvOK4nTwdufzBaoeQu3Gd50 + # 5VXr6OHRo//ErrGvFXwP3g8xLePABsi/fkH3oDN+ztewOBMDzpd+KgTrk8ysv2ou + # kNRMKFZZqASvCgv0LD5KWvUCnL6wgf1oTXG7aztduA4oSkUP321GpOmBC5+5ElU7 + # ysoRzvD12o9QJ/IfEaulIX06w9yVMo60C/h6A3U6GdkT1SiyTIqR7v7KU/IWd/Qi + # Lfftcj91VhCmJ73Meff2e2S2PrpjdXbG5FMCAwEAAaNTMFEwDgYDVR0PAQH/BAQD + # AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + # Z7AtA3mzFc0InSBA5fiMfeLXA3owDQYJKoZIhvcNAQELBQADggEBAEE5hm1mtlk/ + # kviCoHH4evbpw7rxPxDftIQlqYTtvMM4eWY/6icFoSZ4fUHEWYyps8SsPu/8f2tf + # 71LGgZn0FdHi1QU2H8m0HHK7TFw+5Q6RLrLdSyk0PItJ71s9en7r8pX820nAFEHZ + # HkOSfJZ7B5hFgUDkMtVM6bardXAhoqcMk4YCU96e9d4PB4eI+xGc+mNuYvov3RbB + # D0s8ICyojeyPVLerz4wHjZu68Z5frAzhZ68YbzNs8j2fIBKKHkHyLG1iQyF+LJVj + # 2PjCP+auJsj6fQQpMGoyGtpLcSDh+ptcTngUD8JsWipzTCjmaNqdPHAOYmcgtf4b + # qocikt3WAdU^invalid DO NOT USE= + # -----END CERTIFICATE----- + + ## The issuer_private_key is used to sign the JWT forged by OpenID Connect. + ## Issuer Private Key can also be set using a secret: https://www.authelia.com/c/secrets + # issuer_private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # MIIEpAIBAAKCAQEA8q/elLI/ijMYSJUsnXh0hYUIQYSCrtZQwjRJlmpADYgPQvn1 + # T9D9SzLLu4L2B8xTM4NOkA22Q6MVBxACzGVHUU6NUGtflCCNK9fBtCfcO3AwDtdZ + # KXou5jHasFhKUxI3lRlCb9HEy1d8srZvnVaAQRgMWL6cQJKorNHhHnh44+QERZF+ + # +5j3UAyOWGmK+Dx7glaSrgtVBQpuaIVjAh0rxdCI3huVj1bBfAkVizmxD9RgzAEW + # LQeRY6HsYSN/GChQ49q4i55lIxKVCnvOoAff03RlJhvpxLQ2mPntChZlJjdqTzt5 + # txE1/isK9ktvLsug3upgIrGYJoMPfHb41ilYfwIDAQABAoIBAQDTOdFf2JjHH1um + # aPgRAvNf9v7Nj5jytaRKs5nM6iNf46ls4QPreXnMhqSeSwj6lpNgBYxOgzC9Q+cc + # Y4ob/paJJPaIJTxmP8K/gyWcOQlNToL1l+eJ20eQoZm23NGr5fIsunSBwLEpTrdB + # ENqqtcwhW937K8Pxy/Q1nuLyU2bc6Tn/ivLozc8n27dpQWWKh8537VY7ancIaACr + # LJJLYxKqhQpjtBWAyCDvZQirnAOm9KnvIHaGXIswCZ4Xbsu0Y9NL+woARPyRVQvG + # jfxy4EmO9s1s6y7OObSukwKDSNihAKHx/VIbvVWx8g2Lv5fGOa+J2Y7o9Qurs8t5 + # BQwMTt0BAoGBAPUw5Z32EszNepAeV3E2mPFUc5CLiqAxagZJuNDO2pKtyN29ETTR + # Ma4O1cWtGb6RqcNNN/Iukfkdk27Q5nC9VJSUUPYelOLc1WYOoUf6oKRzE72dkMQV + # R4bf6TkjD+OVR17fAfkswkGahZ5XA7j48KIQ+YC4jbnYKSxZTYyKPjH/AoGBAP1i + # tqXt36OVlP+y84wWqZSjMelBIVa9phDVGJmmhz3i1cMni8eLpJzWecA3pfnG6Tm9 + # ze5M4whASleEt+M00gEvNaU9ND+z0wBfi+/DwJYIbv8PQdGrBiZFrPhTPjGQUldR + # lXccV2meeLZv7TagVxSi3DO6dSJfSEHyemd5j9mBAoGAX8Hv+0gOQZQCSOTAq8Nx + # 6dZcp9gHlNaXnMsP9eTDckOSzh636JPGvj6m+GPJSSbkURUIQ3oyokMNwFqvlNos + # fTaLhAOfjBZI9WnDTTQxpugWjphJ4HqbC67JC/qIiw5S6FdaEvGLEEoD4zoChywZ + # 9oGAn+fz2d/0/JAH/FpFPgsCgYEAp/ipZgPzziiZ9ov1wbdAQcWRj7RaWnssPFpX + # jXwEiXT3CgEMO4MJ4+KWIWOChrti3qFBg6i6lDyyS6Qyls7sLFbUdC7HlTcrOEMe + # rBoTcCI1GqZNlqWOVQ65ZIEiaI7o1vPBZo2GMQEZuq8mDKFsOMThvvTrM5cAep84 + # n6HJR4ECgYABWcbsSnr0MKvVth/inxjbKapbZnp2HUCuw87Ie5zK2Of/tbC20wwk + # yKw3vrGoE3O1t1g2m2tn8UGGASeZ842jZWjIODdSi5+icysQGuULKt86h/woz2SQ + # 27GoE2i5mh6Yez6VAYbUuns3FcwIsMyWLq043Tu2DNkx9ijOOAuQzw^invalid.. + # DO NOT USE== + # -----END RSA PRIVATE KEY----- + + ## The lifespans configure the expiration for these token types. + # access_token_lifespan: 1h + # authorize_code_lifespan: 1m + # id_token_lifespan: 1h + # refresh_token_lifespan: 90m + + ## Enables additional debug messages. + # enable_client_debug_messages: false + + ## SECURITY NOTICE: It's not recommended changing this option and values below 8 are strongly discouraged. + # minimum_parameter_entropy: 8 + + ## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it set to 'never' + ## for security reasons. + # enforce_pkce: public_clients_only + + ## Cross-Origin Resource Sharing (CORS) settings. + # cors: + ## List of endpoints in addition to the metadata endpoints to permit cross-origin requests on. + # endpoints: + # - authorization + # - token + # - revocation + # - introspection + # - userinfo + + ## List of allowed origins. + ## Any origin with https is permitted unless this option is configured or the + ## allowed_origins_from_client_redirect_uris option is enabled. + # allowed_origins: + # - https://example.com + + ## Automatically adds the origin portion of all redirect URI's on all clients to the list of allowed_origins, + ## provided they have the scheme http or https and do not have the hostname of localhost. + # allowed_origins_from_client_redirect_uris: false + + ## Clients is a list of known clients and their configuration. + # clients: + # - + ## The ID is the OpenID Connect ClientID which is used to link an application to a configuration. + # id: myapp + + ## The description to show to users when they end up on the consent screen. Defaults to the ID above. + # description: My Application + + ## The client secret is a shared secret between Authelia and the consumer of this client. + # secret: this_is_a_secret + + ## Sector Identifiers are occasionally used to generate pairwise subject identifiers. In most cases this is not + ## necessary. Read the documentation for more information. + ## The subject identifier must be the host component of a URL, which is a domain name with an optional port. + # sector_identifier: example.com + + ## Sets the client to public. This should typically not be set, please see the documentation for usage. + # public: false + + ## The policy to require for this client; one_factor or two_factor. + # authorization_policy: two_factor + + ## The consent mode controls how consent is obtained. + # consent_mode: auto + + ## This value controls the duration a consent on this client remains remembered when the consent mode is + ## configured as 'auto' or 'pre-configured'. + # pre_configured_consent_duration: 1w + + ## Audience this client is allowed to request. + # audience: [] + + ## Scopes this client is allowed to request. + # scopes: + # - openid + # - groups + # - email + # - profile + + ## Redirect URI's specifies a list of valid case-sensitive callbacks for this client. + # redirect_uris: + # - https://oidc.example.com:8080/oauth2/callback + + ## Grant Types configures which grants this client can obtain. + ## It's not recommended to define this unless you know what you're doing. + # grant_types: + # - refresh_token + # - authorization_code + + ## Response Types configures which responses this client can be sent. + ## It's not recommended to define this unless you know what you're doing. + # response_types: + # - code + + ## Response Modes configures which response modes this client supports. + # response_modes: + # - form_post + # - query + # - fragment + + ## The algorithm used to sign userinfo endpoint responses for this client, either none or RS256. + # userinfo_signing_algorithm: none diff --git a/cal/docker-compose.yml b/cal/docker-compose.yml new file mode 100644 index 0000000..4e2fb8b --- /dev/null +++ b/cal/docker-compose.yml @@ -0,0 +1,59 @@ +# Use postgres/example user/password credentials +version: "3.8" + +volumes: + database-data: + +services: + caldb: + container_name: caldb + image: postgres + restart: always + volumes: + - database-data:/var/lib/postgresql/data/ + env_file: .env + networks: + - proxy + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + calcom: + container_name: calcom + image: calcom.docker.scarf.sh/calcom/cal.com + restart: always + ports: + - "3000:3000" + build: + context: . + dockerfile: Dockerfile + args: + NEXT_PUBLIC_WEBAPP_URL: ${NEXT_PUBLIC_WEBAPP_URL} + NEXT_PUBLIC_LICENSE_CONSENT: ${NEXT_PUBLIC_LICENSE_CONSENT} + CALCOM_TELEMETRY_DISABLED: ${CALCOM_TELEMETRY_DISABLED} + NEXTAUTH_SECRET: ${NEXTAUTH_SECRET} + CALENDSO_ENCRYPTION_KEY: ${CALENDSO_ENCRYPTION_KEY} + DATABASE_URL: ${DATABASE_URL} + network: proxy + networks: + - proxy + env_file: .env + environment: + - DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@tcp(caldb)/${POSTGRES_DB} + depends_on: + - caldb + labels: + - "traefik.enable=true" + - "traefik.http.routers.cal.entrypoints=http" + - "traefik.http.routers.cal.rule=Host(`cal.${URL}`)" + - "traefik.http.middlewares.cal-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.cal.middlewares=cal-https-redirect" + - "traefik.http.routers.cal-secure.entrypoints=https" + - "traefik.http.routers.cal-secure.rule=Host(`cal.${URL}`)" + - "traefik.http.routers.cal-secure.tls=true" + - "traefik.http.routers.cal-secure.service=cal" + - "traefik.http.services.cal.loadbalancer.server.port=3000" + - "traefik.docker.network=proxy" + +networks: + proxy: + external: true diff --git a/cams.sh b/cams.sh new file mode 100644 index 0000000..1e56320 --- /dev/null +++ b/cams.sh @@ -0,0 +1,6 @@ +ffplay rtsp://localhost:8554/back-yard-cam & +ffplay rtsp://localhost:8554/printer & +ffplay rtsp://localhost:8554/basement & +ffplay rtsp://localhost:8554/art-room & +ffplay rtsp://localhost:8554/aylas-room & +ffplay rtsp://localhost:8554/kitchen-cam & diff --git a/code-server/docker-compose.yml b/code-server/docker-compose.yml index 4eeb27d..5a3cffa 100644 --- a/code-server/docker-compose.yml +++ b/code-server/docker-compose.yml @@ -17,6 +17,7 @@ services: - code-server-data:/config labels: kompose.service.expose: code-server.wayl.one + # traefik.ingress.kubernetes.io/router.middlewares: default-forwardauth-authelia@kubernetescrd ports: - 8443 volumes: diff --git a/code-server/middleware.yml b/code-server/middleware.yml new file mode 100644 index 0000000..9ad2dc5 --- /dev/null +++ b/code-server/middleware.yml @@ -0,0 +1,16 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: forwardauth-authelia + namespace: code-server + labels: + app.kubernetes.io/instance: authelia + app.kubernetes.io/name: authelia +spec: + forwardAuth: + address: http://auth.wayl.one/api/verify?rd=https%3A%2F%2Fcode-server.wayl.one%2F + authResponseHeaders: + - Remote-User + - Remote-Name + - Remote-Email + - Remote-Groups diff --git a/docker-compose.yml.bak b/docker-compose.yml.bak new file mode 100644 index 0000000..8fda809 --- /dev/null +++ b/docker-compose.yml.bak @@ -0,0 +1,111 @@ +version: "3.5" +services: + + traefik: + image: docker.io/traefik:latest + container_name: traefik + restart: unless-stopped + security_opt: + - no-new-privileges:true + networks: + - proxy + ports: + - 80:80 + - 443:443 + environment: + - CF_API_EMAIL=${CF_API_EMAIL} + - CF_DNS_API_TOKEN=${CF_DNS_API_TOKEN} + # - CF_API_KEY=YOUR_API_KEY + # be sure to use the correct one depending on if you are using a token or key + volumes: + - /etc/localtime:/etc/localtime:ro + # - /var/run/docker.sock:/var/run/docker.sock:ro + - ${PWD}/traefik/data/traefik.yml:/traefik.yml:ro + - ${PWD}/traefik/data/acme.json:/acme.json + - ${PWD}/traefik/data/config.yml:/config.yml:ro + labels: + - "traefik.enable=true" + - "traefik.http.routers.traefik.entrypoints=http" + - "traefik.http.routers.traefik.rule=Host(`traefik-dashboard.${URL}`)" + - "traefik.http.middlewares.traefik-auth.basicauth.users=${TRAEFIK_USERNAME}:${TRAEFIK_PASSWORD}" + - "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https" + - "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https" + - "traefik.http.routers.traefik.middlewares=traefik-https-redirect" + - "traefik.http.routers.traefik-secure.entrypoints=https" + - "traefik.http.routers.traefik-secure.rule=Host(`traefik-dashboard.${URL}`)" + - "traefik.http.routers.traefik-secure.middlewares=traefik-auth" + - "traefik.http.routers.traefik-secure.tls=true" + - "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare" + - "traefik.http.routers.traefik-secure.tls.domains[0].main=${URL}" + - "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.${URL}" + - "traefik.http.routers.traefik-secure.service=api@internal" + + + portainer: + image: docker.io/portainer/portainer-ce + container_name: portainer + restart: unless-stopped + security_opt: + - no-new-privileges:true + networks: + - proxy + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + - ${PWD}/portainer/data:/data + ports: + - 9000:9000 + labels: + - "traefik.enable=true" + - "traefik.http.routers.portainer.entrypoints=http" + - "traefik.http.routers.portainer.rule=Host(`portainer.${URL}`)" + - "traefik.http.middlewares.portainer-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.portainer.middlewares=portainer-https-redirect" + - "traefik.http.routers.portainer-secure.entrypoints=https" + - "traefik.http.routers.portainer-secure.rule=Host(`portainer.${URL}`)" + - "traefik.http.routers.portainer-secure.tls=true" + - "traefik.http.routers.portainer-secure.service=portainer" + - "traefik.http.services.portainer.loadbalancer.server.port=9000" + - "traefik.docker.network=proxy" + jellyfin: + image: docker.io/jellyfin/jellyfin + container_name: jellyfin + restart: unless-stopped + security_opt: + - no-new-privileges:true + + stdin_open: true # docker run -i + tty: true # docker run -t + # network_mode: "host" + # networks: + # - proxy + # user: uid:gid + ports: + - 0.0.0.0:8096:8096 + volumes: + - /tank/jellyfin/config:/config + - /tank/jellyfin/cache:/cache + - /tank/jellyfin/media:/media + - /tank/jellyfin/media2:/media2:ro + # Optional - alternative address used for autodiscovery + environment: + - JELLYFIN_PublishedServerUrl=https://jellyfin.${URL} + # Optional - may be necessary for docker healthcheck to pass if running in host network mode + # extra_hosts: + # - "host.docker.internal:host-gateway" + labels: + - "traefik.enable=true" + - "traefik.http.routers.jellyfin.entrypoints=http" + - "traefik.http.routers.jellyfin.rule=Host(`jellyfin.${URL}`)" + - "traefik.http.middlewares.jellyfin-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.jellyfin.middlewares=jellyfin-https-redirect" + - "traefik.http.routers.jellyfin-secure.entrypoints=https" + - "traefik.http.routers.jellyfin-secure.rule=Host(`jellyfin.${URL}`)" + - "traefik.http.routers.jellyfin-secure.tls=true" + - "traefik.http.routers.jellyfin-secure.service=jellyfin" + - "traefik.http.services.jellyfin.loadbalancer.server.port=8096" + - "traefik.docker.network=proxy" + +networks: + proxy: + external: true diff --git a/frigate/docker-compose.yml b/frigate/docker-compose.yml new file mode 100644 index 0000000..c25746e --- /dev/null +++ b/frigate/docker-compose.yml @@ -0,0 +1,30 @@ +version: "3.9" +services: + frigate: + container_name: frigate + privileged: true # this may not be necessary for all setups + image: ghcr.io/blakeblackshear/frigate:stable + shm_size: "64mb" # update for your cameras based on calculation above + devices: + - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions + - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux + - /dev/video11:/dev/video11 # For Raspberry Pi 4B + - /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware + volumes: + - ./config:/config + - storage:/media/frigate + - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear + target: /tmp/cache + tmpfs: + size: 1000000000 + ports: + - "5050:5000" + - "8654:8554" # RTSP feeds + - "8555:8555/tcp" # WebRTC over tcp + - "8555:8555/udp" # WebRTC over udp + environment: + - FRIGATE_RTSP_PASSWORD="password" + - TZ=America/Chicago +volumes: + config: + storage: diff --git a/frigate/frigate.yaml b/frigate/frigate.yaml new file mode 100644 index 0000000..4eb82f6 --- /dev/null +++ b/frigate/frigate.yaml @@ -0,0 +1,166 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert --namespace frigate -o frigate.yaml + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: frigate + name: frigate + namespace: frigate +spec: + ports: + - name: "5000" + port: 5000 + targetPort: 5000 + - name: "8554" + port: 8554 + targetPort: 8554 + - name: "8555" + port: 8555 + targetPort: 8555 + - name: 8555-udp + port: 8555 + protocol: UDP + targetPort: 8555 + selector: + io.kompose.service: frigate +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: frigate + namespace: frigate +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert --namespace frigate -o frigate.yaml + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: frigate + name: frigate + namespace: frigate +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: frigate + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert --namespace frigate -o frigate.yaml + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/frigate-default: "true" + io.kompose.service: frigate + spec: + containers: + - env: + - name: FRIGATE_RTSP_PASSWORD + value: '"password"' + - name: TZ + value: America/Chicago + image: ghcr.io/blakeblackshear/frigate:stable + name: frigate + ports: + - containerPort: 5000 + hostPort: 5000 + protocol: TCP + - containerPort: 8554 + hostPort: 8554 + protocol: TCP + - containerPort: 8555 + hostPort: 8555 + protocol: TCP + - containerPort: 8555 + hostPort: 8555 + protocol: UDP + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /config + name: config + - mountPath: /media/frigate + name: storage + - mountPath: /tmp/cache + name: frigate-claim2 + restartPolicy: Always + volumes: + - name: config + persistentVolumeClaim: + claimName: config + - name: storage + persistentVolumeClaim: + claimName: storage + - name: frigate-claim2 + persistentVolumeClaim: + claimName: frigate-claim2 +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: config + name: config + namespace: frigate +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: storage + name: storage + namespace: frigate +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: frigate-claim2 + name: frigate-claim2 + namespace: frigate +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/generate_example_env.sh b/generate_example_env.sh new file mode 100644 index 0000000..bc704db --- /dev/null +++ b/generate_example_env.sh @@ -0,0 +1 @@ +cat .env | sed 's/\=.*/\=/' > .env.example diff --git a/home-assistant/docker-compose.yml b/home-assistant/docker-compose.yml new file mode 100644 index 0000000..e157a41 --- /dev/null +++ b/home-assistant/docker-compose.yml @@ -0,0 +1,17 @@ +version: '3' +services: + homeassistant: + container_name: homeassistant + image: "ghcr.io/home-assistant/home-assistant:stable" + volumes: + - config:/config + # - /run/dbus:/run/dbus:ro + restart: unless-stopped + privileged: true + network_mode: host + environment: + TZ: "America/Chicago" + ports: + - "8123:8123" +volumes: + config: diff --git a/home/Dockerfile b/home/Dockerfile index b446e19..836d04e 100644 --- a/home/Dockerfile +++ b/home/Dockerfile @@ -1,3 +1,3 @@ -FROM nginx:alpine +FROM docker.io/nginx:alpine COPY ./site /usr/share/nginx/html COPY ./config/default.conf /etc/nginx/conf.d/default.conf diff --git a/home/deployment.yaml b/home/deployment.yaml index d891dab..e61445d 100644 --- a/home/deployment.yaml +++ b/home/deployment.yaml @@ -65,7 +65,7 @@ spec: io.kompose.service: wayl-one spec: containers: - - image: registry.wayl.one/wayl-one:fe8ab95 + - image: registry.wayl.one/wayl-one:8c0277c name: wayl-one ports: - containerPort: 80 diff --git a/home/docker-compose.yml b/home/docker-compose.yml index e5d0416..d1ad9d4 100644 --- a/home/docker-compose.yml +++ b/home/docker-compose.yml @@ -1,7 +1,7 @@ version: "3" services: wayl-one: - image: registry.wayl.one/wayl-one:fe8ab95 + image: registry.wayl.one/wayl-one:8c0277c ports: - 80 labels: diff --git a/home/justfile b/home/justfile index 5eee942..038e6ee 100644 --- a/home/justfile +++ b/home/justfile @@ -3,16 +3,16 @@ default: convert deploy viz regcred: kubectl get secret -n default regcred --output=yaml -o yaml | sed 's/namespace: default/namespace: wayl-one/' | kubectl apply -n wayl-one -f - && echo deployed secret || echo secret exists build: - docker build -t registry.wayl.one/wayl-one -f Dockerfile . + podman build -t waylonwalker/wayl-one -f Dockerfile . tag: - docker tag registry.wayl.one/wayl-one registry.wayl.one/wayl-one:$(git rev-parse --short HEAD) + podman tag waylonwalker/wayl-one waylonwalker/wayl-one:v1 test: - docker run -p 5556:80 registry.wayl.one/wayl-one + podman run -p 5556:80 waylonwalker/wayl-one push: - docker push registry.wayl.one/wayl-one:$(git rev-parse --short HEAD) - docker push registry.wayl.one/wayl-one:latest + podman push waylonwalker/wayl-one:v1 + podman push waylonwalker/wayl-one:latest set-image: - kubectl set image deployment/shot-wayl-one --namespace shot shot-wayl-one=registry.wayl.one/wayl-one:$(git rev-parse --short HEAD) + kubectl set image deployment/shot-wayl-one --namespace shot shot-wayl-one=waylonwalker/wayl-one:v1 convert: kompose convert -o deployment.yaml -n wayl-one diff --git a/home/site/8bitcc.ico b/home/site/8bitcc.ico old mode 100755 new mode 100644 diff --git a/home/site/index.html b/home/site/index.html index 886a716..a10370f 100644 --- a/home/site/index.html +++ b/home/site/index.html @@ -189,6 +189,18 @@ play-outside + +
  • + + + + + reader + +
  • diff --git a/home/wayl-one-k8s.png b/home/wayl-one-k8s.png new file mode 100644 index 0000000..ce64257 Binary files /dev/null and b/home/wayl-one-k8s.png differ diff --git a/immich/docker-compose.yml b/immich/docker-compose.yml new file mode 100644 index 0000000..1669d4b --- /dev/null +++ b/immich/docker-compose.yml @@ -0,0 +1,98 @@ +version: "3.8" + +name: immich + +services: + immich-server: + container_name: immich_server + image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} + command: [ "start.sh", "immich" ] + volumes: + - ${UPLOAD_LOCATION}:/usr/src/app/upload + - /etc/localtime:/etc/localtime:ro + env_file: + - .env + depends_on: + - redis + - database + - typesense + restart: always + + immich-microservices: + container_name: immich_microservices + image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} + # extends: + # file: hwaccel.yml + # service: hwaccel + command: [ "start.sh", "microservices" ] + volumes: + - ${UPLOAD_LOCATION}:/usr/src/app/upload + - /etc/localtime:/etc/localtime:ro + env_file: + - .env + depends_on: + - redis + - database + - typesense + restart: always + + immich-machine-learning: + container_name: immich_machine_learning + image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} + volumes: + - model-cache:/cache + env_file: + - .env + restart: always + + immich-web: + container_name: immich_web + image: ghcr.io/immich-app/immich-web:${IMMICH_VERSION:-release} + env_file: + - .env + restart: always + + typesense: + container_name: immich_typesense + image: typesense/typesense:0.24.1@sha256:9bcff2b829f12074426ca044b56160ca9d777a0c488303469143dd9f8259d4dd + environment: + - TYPESENSE_API_KEY=${TYPESENSE_API_KEY} + - TYPESENSE_DATA_DIR=/data + # remove this to get debug messages + - GLOG_minloglevel=1 + volumes: + - tsdata:/data + restart: always + + redis: + container_name: immich_redis + image: redis:6.2-alpine@sha256:3995fe6ea6a619313e31046bd3c8643f9e70f8f2b294ff82659d409b47d06abb + restart: always + + database: + container_name: immich_postgres + image: postgres:14-alpine@sha256:874f566dd512d79cf74f59754833e869ae76ece96716d153b0fa3e64aec88d92 + env_file: + - .env + environment: + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_USER: ${DB_USERNAME} + POSTGRES_DB: ${DB_DATABASE_NAME} + volumes: + - pgdata:/var/lib/postgresql/data + restart: always + + immich-proxy: + container_name: immich_proxy + image: ghcr.io/immich-app/immich-proxy:${IMMICH_VERSION:-release} + ports: + - 2283:8080 + depends_on: + - immich-server + - immich-web + restart: always + +volumes: + pgdata: + model-cache: + tsdata: diff --git a/jellyfin/deployment.yaml b/jellyfin/deployment.yaml new file mode 100644 index 0000000..7c9b838 --- /dev/null +++ b/jellyfin/deployment.yaml @@ -0,0 +1,145 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + service: jellyfin + name: jellyfin + namespace: jellyfin +spec: + ports: + - name: "8096" + port: 8096 + targetPort: 8096 + selector: + service: jellyfin +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: jellyfin + namespace: jellyfin +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + service: jellyfin + name: jellyfin + namespace: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + service: jellyfin + strategy: + type: Recreate + template: + metadata: + labels: + service: jellyfin + spec: + containers: + - env: + - name: JELLYFIN_PublishedServerUrl + value: https://jellyfin. + image: docker.io/jellyfin/jellyfin + name: jellyfin + ports: + - containerPort: 8096 + hostIP: 0.0.0.0 + hostPort: 8096 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /config + name: jellyfin-config + - mountPath: /cache + name: jellyfin-cache + - mountPath: /media + name: jellyfin-media + readOnly: true + restartPolicy: Always + volumes: + - name: jellyfin-config + persistentVolumeClaim: + claimName: jellyfin-config + - name: jellyfin-cache + persistentVolumeClaim: + claimName: jellyfin-cache + - name: jellyfin-media + persistentVolumeClaim: + claimName: jellyfin-media +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + service: jellyfin-config + name: jellyfin-config + namespace: jellyfin +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + service: jellyfin-cache + name: jellyfin-cache + namespace: jellyfin +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + service: jellyfin-media + name: jellyfin-media + namespace: jellyfin + +spec: + storageClassName: manual + volumeName: jellyfin-media + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3000Gi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: jellyfin-media + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 3000Gi + accessModes: + - ReadWriteOnce + hostPath: + path: /mnt/vault diff --git a/justfile b/justfile index 10e1482..f389be2 100644 --- a/justfile +++ b/justfile @@ -14,4 +14,5 @@ viz: k8sviz -n installer --kubeconfig $KUBECONFIG -t png -o kubeviz/installer.png k8sviz -n vault --kubeconfig $KUBECONFIG -t png -o kubeviz/vault.png k8sviz -n jobrunner --kubeconfig $KUBECONFIG -t png -o kubeviz/jobrunner.png + convert kubeviz/*.png -append kubeviz/all.png # convert image1.png image2.png image3.png -append result/result-sprite.png diff --git a/k8s.png b/k8s.png new file mode 100644 index 0000000..e2b7698 Binary files /dev/null and b/k8s.png differ diff --git a/matrix/deployment.yaml b/matrix/deployment.yaml new file mode 100644 index 0000000..cccf955 --- /dev/null +++ b/matrix/deployment.yaml @@ -0,0 +1,232 @@ +# namespace: matrix +apiVersion: v1 +kind: Namespace +metadata: + name: matrix +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: db + name: db + namespace: matrix +spec: + ports: + - name: "5432" + port: 5432 + targetPort: 5432 + selector: + io.kompose.service: db +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.service.expose: dev-matrix.wayl.one,matrix.k.waylonwalker.com + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + ports: + - name: "8008" + port: 8008 + targetPort: 8008 + selector: + io.kompose.service: synapse +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: matrix + namespace: matrix +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: db + name: db + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: db + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/matrix-default: "true" + io.kompose.service: db + spec: + containers: + - env: + - name: POSTGRES_INITDB_ARGS + value: --encoding=UTF-8 --lc-collate=C --lc-ctype=C + - name: POSTGRES_PASSWORD + value: synapse + - name: POSTGRES_USER + value: synapse + image: docker.io/postgres:12-alpine + name: synapse-db + ports: + - containerPort: 5432 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: schemas + restartPolicy: Always + volumes: + - name: schemas + persistentVolumeClaim: + claimName: schemas +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: schemas + name: schemas + namespace: matrix +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.service.expose: dev-matrix.wayl.one,matrix.k.waylonwalker.com + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: synapse + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.service.expose: dev-matrix.wayl.one,matrix.k.waylonwalker.com + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/matrix-default: "true" + io.kompose.service: synapse + spec: + containers: + - image: docker.io/matrixdotorg/synapse:latest + name: synapse + ports: + - containerPort: 8008 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /data + name: synapse-data + restartPolicy: Always + volumes: + - name: synapse-data + persistentVolumeClaim: + claimName: synapse-data +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n matrix + kompose.service.expose: dev-matrix.wayl.one,matrix.k.waylonwalker.com + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + rules: + - host: dev-matrix.wayl.one + http: + paths: + - backend: + service: + name: synapse + port: + number: 8008 + path: / + pathType: Prefix + - host: matrix.k.waylonwalker.com + http: + paths: + - backend: + service: + name: synapse + port: + number: 8008 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: synapse-data + name: synapse-data + namespace: matrix +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} diff --git a/matrix/docker-compose.yml b/matrix/docker-compose.yml index cbe6d2e..d39b8ab 100644 --- a/matrix/docker-compose.yml +++ b/matrix/docker-compose.yml @@ -8,44 +8,49 @@ services: synapse: image: docker.io/matrixdotorg/synapse:latest container_name: synapse - restart: unless-stopped networks: - proxy security_opt: - no-new-privileges:true volumes: - - ${PWD}/matrix/synapse-data:/data - depends_on: - - db + # - ${PWD}/matrix/synapse-data:/data + - synapse-data:/data + # depends_on: + # - db ports: - 8448:8448/tcp labels: - - "traefik.enable=true" - - "traefik.http.routers.synapse.entrypoints=http" - - "traefik.http.routers.synapse.rule=Host(`matrix.${URL}`)" - - "traefik.http.middlewares.synapse-https-redirect.redirectscheme.scheme=https" - - "traefik.http.routers.synapse.middlewares=synapse-https-redirect" - - "traefik.http.routers.synapse-secure.entrypoints=https" - - "traefik.http.routers.synapse-secure.rule=Host(`matrix.${URL}`)" - - "traefik.http.routers.synapse-secure.tls=true" - - "traefik.http.routers.synapse-secure.service=synapse" - - "traefik.http.services.synapse.loadbalancer.server.port=8008" - - "traefik.docker.network=proxy" - db: - image: docker.io/postgres:12-alpine - container_name: synapse-db - # Change that password, of course! - environment: - - POSTGRES_USER=${SYNAPSE_POSTGRES_USER:-synapse} - - POSTGRES_PASSWORD=${SYNAPSE_POSTGRES_PASSWORD:-synapse} - # ensure the database gets created correctly - # https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database - - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C - volumes: - # You may store the database tables in a local folder.. - - ${PWD}/matrix/schemas:/var/lib/postgresql/data - # .. or store them on some high performance storage for better results - # - /path/to/ssd/storage:/var/lib/postgresql/data + kompose.service.expose: m2.wayl.one + # - "traefik.enable=true" + # - "traefik.http.routers.synapse.entrypoints=http" + # - "traefik.http.routers.synapse.rule=Host(`matrix.${URL}`)" + # - "traefik.http.middlewares.synapse-https-redirect.redirectscheme.scheme=https" + # - "traefik.http.routers.synapse.middlewares=synapse-https-redirect" + # - "traefik.http.routers.synapse-secure.entrypoints=https" + # - "traefik.http.routers.synapse-secure.rule=Host(`matrix.${URL}`)" + # - "traefik.http.routers.synapse-secure.tls=true" + # - "traefik.http.routers.synapse-secure.service=synapse" + # - "traefik.http.services.synapse.loadbalancer.server.port=8008" + # - "traefik.docker.network=proxy" + # db: + # image: docker.io/postgres:12-alpine + # container_name: synapse-db + # # Change that password, of course! + # environment: + # - POSTGRES_USER=${SYNAPSE_POSTGRES_USER:-synapse} + # - POSTGRES_PASSWORD=${SYNAPSE_POSTGRES_PASSWORD:-synapse} + # # ensure the database gets created correctly + # # https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database + # - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C + # volumes: + # # You may store the database tables in a local folder.. + # - ${PWD}/matrix/schemas:/var/lib/postgresql/data + # # .. or store them on some high performance storage for better results + # # - /path/to/ssd/storage:/var/lib/postgresql/data + +volumes: + synapse-data: {} + networks: proxy: diff --git a/matrix/generate.yaml b/matrix/generate.yaml new file mode 100644 index 0000000..526f126 --- /dev/null +++ b/matrix/generate.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: matrix +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: synapse-generate + namespace: matrix +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + containers: + - image: docker.io/matrixdotorg/synapse:latest + name: synapse + command: ["generate"] + volumeMounts: + - mountPath: /data + name: synapse-data + volumes: + - name: synapse-data + persistentVolumeClaim: + claimName: synapse-data + # template: + # spec: + # containers: + # - image: docker.io/matrixdotorg/synapse:latest + # name: synapse + # command: ["generate"] + # volumeMounts: + # - mountPath: /data + # name: synapse-data + # volumes: + # - name: synapse-data + # persistentVolumeClaim: + # claimName: synapse-data +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: synapse-data + name: synapse-data + namespace: matrix +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} diff --git a/matrix/justfile b/matrix/justfile new file mode 100644 index 0000000..b486eb6 --- /dev/null +++ b/matrix/justfile @@ -0,0 +1,9 @@ +default: convert deploy viz +convert: + kompose convert -o deployment.yaml -n matrix +deploy: + kubectl apply -f deployment.yaml +viz: + k8sviz -n matrix --kubeconfig $KUBECONFIG -t png -o matrix-k8s.png +restart: + kubectl rollout restart -n matrix deployment/matrix diff --git a/matrix/kui b/matrix/kui new file mode 100644 index 0000000..b008e92 Binary files /dev/null and b/matrix/kui differ diff --git a/matrix/matrix-k8s.png b/matrix/matrix-k8s.png new file mode 100644 index 0000000..f41d63a Binary files /dev/null and b/matrix/matrix-k8s.png differ diff --git a/matrix/matrix.yaml b/matrix/matrix.yaml new file mode 100644 index 0000000..7f3f41a --- /dev/null +++ b/matrix/matrix.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert --namespace matrix -o matrix.yaml + kompose.service.expose: m2.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + ports: + - name: "8448" + port: 8448 + targetPort: 8448 + selector: + io.kompose.service: synapse +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: matrix + namespace: matrix +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert --namespace matrix -o matrix.yaml + kompose.service.expose: m2.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: synapse + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert --namespace matrix -o matrix.yaml + kompose.service.expose: m2.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/proxy: "true" + io.kompose.service: synapse + spec: + containers: + - image: docker.io/matrixdotorg/synapse:latest + name: synapse + ports: + - containerPort: 8448 + hostPort: 8448 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /data + name: synapse-data + restartPolicy: Always + volumes: + - name: synapse-data + persistentVolumeClaim: + claimName: synapse-data +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert --namespace matrix -o matrix.yaml + kompose.service.expose: m2.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: synapse + name: synapse + namespace: matrix +spec: + rules: + - host: m2.wayl.one + http: + paths: + - backend: + service: + name: synapse + port: + number: 8448 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: synapse-data + name: synapse-data + namespace: matrix +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/matrix/pvc-inspector.yaml b/matrix/pvc-inspector.yaml new file mode 100644 index 0000000..b964ae1 --- /dev/null +++ b/matrix/pvc-inspector.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: matrix +--- +apiVersion: v1 +kind: Pod +metadata: + name: pvc-inspector + namespace: matrix +spec: + containers: + - image: docker.io/matrixdotorg/synapse:latest + name: pvc-inspector + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /data + name: synapse-data + - image: docker.io/matrixdotorg/synapse:latest + name: pvc-inspector-synapse + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /data + name: synapse-data + volumes: + - name: synapse-data + persistentVolumeClaim: + claimName: synapse-data + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: synapse-data + name: synapse-data + namespace: matrix +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} diff --git a/minio/deployment.yaml b/minio/deployment.yaml deleted file mode 100644 index 914d70e..0000000 --- a/minio/deployment.yaml +++ /dev/null @@ -1,107 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - annotations: - kompose.cmd: kompose convert -o deployment.yaml -n minio --replicas 3 - kompose.version: 1.31.2 (a92241f79) - creationTimestamp: null - labels: - io.kompose.service: sandcrawler - name: sandcrawler - namespace: minio -spec: - ports: - - name: "9000" - port: 9000 - targetPort: 9000 - - name: "9001" - port: 9001 - targetPort: 9001 - selector: - io.kompose.service: sandcrawler -status: - loadBalancer: {} - ---- -apiVersion: v1 -kind: Namespace -metadata: - creationTimestamp: null - name: minio - namespace: minio -spec: {} -status: {} - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - kompose.cmd: kompose convert -o deployment.yaml -n minio --replicas 3 - kompose.version: 1.31.2 (a92241f79) - creationTimestamp: null - labels: - io.kompose.service: sandcrawler - name: sandcrawler - namespace: minio -spec: - replicas: 3 - selector: - matchLabels: - io.kompose.service: sandcrawler - strategy: - type: Recreate - template: - metadata: - annotations: - kompose.cmd: kompose convert -o deployment.yaml -n minio --replicas 3 - kompose.version: 1.31.2 (a92241f79) - creationTimestamp: null - labels: - io.kompose.network/minio-default: "true" - io.kompose.service: sandcrawler - spec: - containers: - - args: - - server - - --console-address - - :9001 - - /data - image: minio/minio - name: sandcrawler - ports: - - containerPort: 9000 - protocol: TCP - - containerPort: 9001 - protocol: TCP - resources: {} - volumeMounts: - - mountPath: /data - name: minio-storage - envFrom: - - secretRef: - name: minio-secret - restartPolicy: Always - volumes: - - name: minio-storage - persistentVolumeClaim: - claimName: minio-storage - -status: {} - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - creationTimestamp: null - labels: - io.kompose.service: minio-storage - name: minio-storage - namespace: minio -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Mi -status: {} diff --git a/minio/secret.yaml b/minio/secret.yaml deleted file mode 100644 index d89595e..0000000 --- a/minio/secret.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: minio-secret -data: - MINIO_ROOT_USER: "d2F5bG9uCg==" - MINIO_ROOT_PASSWORD: "TnpwWTRIKllCMnVncSQK" diff --git a/nextcloud/docker-compose.yml b/nextcloud/docker-compose.yml new file mode 100644 index 0000000..20a8f84 --- /dev/null +++ b/nextcloud/docker-compose.yml @@ -0,0 +1,19 @@ +services: + nextcloud: + image: lscr.io/linuxserver/nextcloud:latest + container_name: nextcloud + environment: + - PUID=1000 + - PGID=1000 + - TZ="America/Chicago" + volumes: + - appdata:/config + - data:/data + ports: + - 443 + labels: + kompose.service.expose: nextcloud.wayl.one + +volumes: + appdata: + data: diff --git a/nextcloud/nextcloud.yaml b/nextcloud/nextcloud.yaml new file mode 100644 index 0000000..e61cf5b --- /dev/null +++ b/nextcloud/nextcloud.yaml @@ -0,0 +1,155 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert --namespace nextcloud -o nextcloud.yaml + kompose.service.expose: nextcloud.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: nextcloud + name: nextcloud + namespace: nextcloud +spec: + ports: + - name: "443" + port: 443 + targetPort: 443 + selector: + io.kompose.service: nextcloud +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: nextcloud + namespace: nextcloud +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert --namespace nextcloud -o nextcloud.yaml + kompose.service.expose: nextcloud.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: nextcloud + name: nextcloud + namespace: nextcloud +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: nextcloud + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert --namespace nextcloud -o nextcloud.yaml + kompose.service.expose: nextcloud.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/nextcloud-default: "true" + io.kompose.service: nextcloud + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: '"America/Chicago"' + image: lscr.io/linuxserver/nextcloud:latest + name: nextcloud + ports: + - containerPort: 443 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /config + name: appdata + - mountPath: /data + name: data + restartPolicy: Always + volumes: + - name: appdata + persistentVolumeClaim: + claimName: appdata + - name: data + persistentVolumeClaim: + claimName: data +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert --namespace nextcloud -o nextcloud.yaml + kompose.service.expose: nextcloud.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: nextcloud + name: nextcloud + namespace: nextcloud +spec: + rules: + - host: nextcloud.wayl.one + http: + paths: + - backend: + service: + name: nextcloud + port: + number: 443 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: appdata + name: appdata + namespace: nextcloud +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: data + name: data + namespace: nextcloud +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/nextcloud/values.yaml b/nextcloud/values.yaml new file mode 100644 index 0000000..8229014 --- /dev/null +++ b/nextcloud/values.yaml @@ -0,0 +1,563 @@ +## Official nextcloud image version +## ref: https://hub.docker.com/r/library/nextcloud/tags/ +## +image: + repository: nextcloud + flavor: apache + # default is generated by flavor and appVersion + tag: + pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + +nameOverride: "" +fullnameOverride: "" +podAnnotations: {} +deploymentAnnotations: {} +deploymentLabels: {} + +# Number of replicas to be deployed +replicaCount: 1 + +## Allowing use of ingress controllers +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + enabled: false + # className: nginx + annotations: {} + # nginx.ingress.kubernetes.io/proxy-body-size: 4G + # kubernetes.io/tls-acme: "true" + # cert-manager.io/cluster-issuer: letsencrypt-prod + # # Keep this in sync with the README.md: + # nginx.ingress.kubernetes.io/server-snippet: |- + # server_tokens off; + # proxy_hide_header X-Powered-By; + # rewrite ^/.well-known/webfinger /index.php/.well-known/webfinger last; + # rewrite ^/.well-known/nodeinfo /index.php/.well-known/nodeinfo last; + # rewrite ^/.well-known/host-meta /public.php?service=host-meta last; + # rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json; + # location = /.well-known/carddav { + # return 301 $scheme://$host/remote.php/dav; + # } + # location = /.well-known/caldav { + # return 301 $scheme://$host/remote.php/dav; + # } + # location = /robots.txt { + # allow all; + # log_not_found off; + # access_log off; + # } + # location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { + # deny all; + # } + # location ~ ^/(?:autotest|occ|issue|indie|db_|console) { + # deny all; + # } + # tls: + # - secretName: nextcloud-tls + # hosts: + # - nextcloud.kube.home + labels: {} + path: / + pathType: Prefix + + +# Allow configuration of lifecycle hooks +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ +lifecycle: {} +# postStartCommand: [] +# preStopCommand: [] + +phpClientHttpsFix: + enabled: false + protocol: https + +nextcloud: + host: nextcloud.wayl.one + username: admin + password: changeme + ## Use an existing secret + existingSecret: + enabled: false + # secretName: nameofsecret + usernameKey: nextcloud-username + passwordKey: nextcloud-password + tokenKey: nextcloud-token + smtpUsernameKey: smtp-username + smtpPasswordKey: smtp-password + smtpHostKey: smtp-host + update: 0 + # If web server is not binding default port, you can define it + containerPort: 80 + datadir: /var/www/html/data + persistence: + subPath: + mail: + enabled: false + fromAddress: user + domain: domain.com + smtp: + host: domain.com + secure: ssl + port: 465 + authtype: LOGIN + name: user + password: pass + # PHP Configuration files + # Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true + phpConfigs: {} + # Default config files + # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself + # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config + defaultConfigs: + # To protect /var/www/html/config + .htaccess: true + # Redis default configuration + redis.config.php: true + # Apache configuration for rewrite urls + apache-pretty-urls.config.php: true + # Define APCu as local cache + apcu.config.php: true + # Apps directory configs + apps.config.php: true + # Used for auto configure database + autoconfig.php: true + # SMTP default configuration + smtp.config.php: true + # Extra config files created in /var/www/html/config/ + # ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file + configs: {} + + # For example, to use S3 as primary storage + # ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3 + # + # configs: + # s3.config.php: |- + # array( + # 'class' => '\\OC\\Files\\ObjectStore\\S3', + # 'arguments' => array( + # 'bucket' => 'my-bucket', + # 'autocreate' => true, + # 'key' => 'xxx', + # 'secret' => 'xxx', + # 'region' => 'us-east-1', + # 'use_ssl' => true + # ) + # ) + # ); + + # Hooks for auto configuration + # Here you could write small scripts which are placed in `/docker-entrypoint-hooks.d//helm.sh` + # ref: https://github.com/nextcloud/docker?tab=readme-ov-file#auto-configuration-via-hook-folders + hooks: + pre-installation: + post-installation: + pre-upgrade: + post-upgrade: + before-starting: + + ## Strategy used to replace old pods + ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: Recreate + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + + ## + ## Extra environment variables + extraEnv: + # - name: SOME_SECRET_ENV + # valueFrom: + # secretKeyRef: + # name: nextcloud + # key: secret_key + + # Extra init containers that runs before pods start. + extraInitContainers: [] + # - name: do-something + # image: busybox + # command: ['do', 'something'] + + # Extra sidecar containers. + extraSidecarContainers: [] + # - name: nextcloud-logger + # image: busybox + # command: [/bin/sh, -c, 'while ! test -f "/run/nextcloud/data/nextcloud.log"; do sleep 1; done; tail -n+1 -f /run/nextcloud/data/nextcloud.log'] + # volumeMounts: + # - name: nextcloud-data + # mountPath: /run/nextcloud/data + + # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume + # to NextCloud pods in Kubernetes. This can then be configured in External Storage + extraVolumes: + # - name: nfs + # nfs: + # server: "10.0.0.1" + # path: "/nextcloud_data" + # readOnly: false + extraVolumeMounts: + # - name: nfs + # mountPath: "/legacy_data" + + # Set securityContext parameters for the nextcloud CONTAINER only (will not affect nginx container). + # For example, you may need to define runAsNonRoot directive + securityContext: {} + # runAsUser: 33 + # runAsGroup: 33 + # runAsNonRoot: true + # readOnlyRootFilesystem: false + + # Set securityContext parameters for the entire pod. For example, you may need to define runAsNonRoot directive + podSecurityContext: {} + # runAsUser: 33 + # runAsGroup: 33 + # runAsNonRoot: true + # readOnlyRootFilesystem: false + +nginx: + ## You need to set an fpm version of the image for nextcloud if you want to use nginx! + enabled: false + image: + repository: nginx + tag: alpine + pullPolicy: IfNotPresent + containerPort: 80 + + config: + # This generates the default nginx config as per the nextcloud documentation + default: true + # custom: |- + # worker_processes 1;.. + + resources: {} + + # Set nginx container securityContext parameters. For example, you may need to define runAsNonRoot directive + securityContext: {} + # the nginx alpine container default user is 82 + # runAsUser: 82 + # runAsGroup: 33 + # runAsNonRoot: true + # readOnlyRootFilesystem: true + +internalDatabase: + enabled: true + name: nextcloud + +## +## External database configuration +## +externalDatabase: + enabled: false + + ## Supported database engines: mysql or postgresql + type: mysql + + ## Database host + host: + + ## Database user + user: nextcloud + + ## Database password + password: "" + + ## Database name + database: nextcloud + + ## Use a existing secret + existingSecret: + enabled: false + # secretName: nameofsecret + usernameKey: db-username + passwordKey: db-password + # hostKey: db-hostname-or-ip + # databaseKey: db-name + +## +## MariaDB chart configuration +## ref: https://github.com/bitnami/charts/tree/main/bitnami/mariadb +## +mariadb: + ## Whether to deploy a mariadb server from the bitnami mariab db helm chart + # to satisfy the applications database requirements. if you want to deploy this bitnami mariadb, set this and externalDatabase to true + # To use an ALREADY DEPLOYED mariadb database, set this to false and configure the externalDatabase parameters + enabled: false + + auth: + database: nextcloud + username: nextcloud + password: changeme + # Use existing secret (auth.rootPassword, auth.password, and auth.replicationPassword will be ignored). + # secret must contain the keys mariadb-root-password, mariadb-replication-password and mariadb-password + existingSecret: "" + + architecture: standalone + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + primary: + persistence: + enabled: false + # Use an existing Persistent Volume Claim (must be created ahead of time) + # existingClaim: "" + # storageClass: "" + accessMode: ReadWriteOnce + size: 8Gi + +## +## PostgreSQL chart configuration +## for more options see https://github.com/bitnami/charts/tree/main/bitnami/postgresql +## +postgresql: + enabled: false + global: + postgresql: + # global.postgresql.auth overrides postgresql.auth + auth: + username: nextcloud + password: changeme + database: nextcloud + # Name of existing secret to use for PostgreSQL credentials. + # auth.postgresPassword, auth.password, and auth.replicationPassword will be ignored and picked up from this secret. + # secret might also contains the key ldap-password if LDAP is enabled. + # ldap.bind_password will be ignored and picked from this secret in this case. + existingSecret: "" + # Names of keys in existing secret to use for PostgreSQL credentials + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + primary: + persistence: + enabled: false + # Use an existing Persistent Volume Claim (must be created ahead of time) + # existingClaim: "" + # storageClass: "" + +## +## Redis chart configuration +## for more options see https://github.com/bitnami/charts/tree/main/bitnami/redis +## + +redis: + enabled: false + auth: + enabled: true + password: 'changeme' + # name of an existing secret with Redis® credentials (instead of auth.password), must be created ahead of time + existingSecret: "" + # Password key to be retrieved from existing secret + existingSecretPasswordKey: "" + + +## Cronjob to execute Nextcloud background tasks +## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron +## +cronjob: + enabled: false + + ## Cronjob sidecar resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + + # Allow configuration of lifecycle hooks + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ + lifecycle: {} + # postStartCommand: [] + # preStopCommand: [] + # Set securityContext parameters. For example, you may need to define runAsNonRoot directive + securityContext: {} + # runAsUser: 33 + # runAsGroup: 33 + # runAsNonRoot: true + # readOnlyRootFilesystem: true + +service: + type: ClusterIP + port: 8080 + loadBalancerIP: "" + nodePort: nil + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + # Nextcloud Data (/var/www/html) + enabled: false + annotations: {} + ## nextcloud data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 8Gi + + ## Use an additional pvc for the data directory rather than a subpath of the default PVC + ## Useful to store data on a different storageClass (e.g. on slower disks) + nextcloudData: + enabled: false + subPath: + annotations: {} + # storageClass: "-" + # existingClaim: + accessMode: ReadWriteOnce + size: 8Gi + +resources: {} +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Liveness and readiness probe values +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + + +## Enable pod autoscaling using HorizontalPodAutoscaler +## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ +## +hpa: + enabled: false + cputhreshold: 60 + minPods: 1 + maxPods: 10 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + replicaCount: 1 + # The metrics exporter needs to know how you serve Nextcloud either http or https + https: false + # Use API token if set, otherwise fall back to password authentication + # https://github.com/xperimental/nextcloud-exporter#token-authentication + # Currently you still need to set the token manually in your nextcloud install + token: "" + timeout: 5s + # if set to true, exporter skips certificate verification of Nextcloud server. + tlsSkipVerify: false + + image: + repository: xperimental/nextcloud-exporter + tag: 0.6.2 + pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter pod Annotation and Labels + # podAnnotations: {} + + # podLabels: {} + + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9205" + labels: {} + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + + ## @param metrics.serviceMonitor.namespaceSelector The selector of the namespace where the target service is located (defaults to the release namespace) + namespaceSelector: + + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: 30s + + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + + +rbac: + enabled: false + serviceaccount: + create: true + name: nextcloud-serviceaccount + annotations: {} + + +## @param securityContext for nextcloud pod @deprecated Use `nextcloud.podSecurityContext` instead +securityContext: {} diff --git a/photoprism/deployment.yaml b/photoprism/deployment.yaml new file mode 100644 index 0000000..d5dce4a --- /dev/null +++ b/photoprism/deployment.yaml @@ -0,0 +1,319 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: mariadb + name: mariadb + namespace: photoprism +spec: + ports: + - name: "3306" + port: 3306 + targetPort: 3306 + selector: + io.kompose.service: mariadb +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.service.expose: photoprism.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: photoprism + name: photoprism + namespace: photoprism +spec: + ports: + - name: "2342" + port: 2342 + targetPort: 2342 + selector: + io.kompose.service: photoprism +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: photoprism + namespace: photoprism +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: mariadb + name: mariadb + namespace: photoprism +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mariadb + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/photoprism-default: "true" + io.kompose.service: mariadb + spec: + containers: + - args: + - mariadbd + - --innodb-buffer-pool-size=512M + - --transaction-isolation=READ-COMMITTED + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + - --max-connections=512 + - --innodb-rollback-on-timeout=OFF + - --innodb-lock-wait-timeout=120 + env: + - name: MARIADB_AUTO_UPGRADE + value: "1" + - name: MARIADB_DATABASE + value: photoprism + - name: MARIADB_INITDB_SKIP_TZINFO + value: "1" + - name: MARIADB_PASSWORD + value: insecure + - name: MARIADB_ROOT_PASSWORD + value: insecure + - name: MARIADB_USER + value: photoprism + image: mariadb:10.11 + name: mariadb + ports: + - containerPort: 3306 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /var/lib/mysql + name: photoprism-db + restartPolicy: Always + terminationGracePeriodSeconds: 5 + volumes: + - name: photoprism-db + persistentVolumeClaim: + claimName: photoprism-db +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: photoprism-db + name: photoprism-db + namespace: photoprism +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.service.expose: photoprism.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: photoprism + name: photoprism + namespace: photoprism +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: photoprism + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.service.expose: photoprism.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/photoprism-default: "true" + io.kompose.service: photoprism + spec: + containers: + - env: + - name: PHOTOPRISM_ADMIN_PASSWORD + value: insecure + - name: PHOTOPRISM_ADMIN_USER + value: admin + - name: PHOTOPRISM_AUTH_MODE + value: password + - name: PHOTOPRISM_DATABASE_DRIVER + value: mysql + - name: PHOTOPRISM_DATABASE_NAME + value: photoprism + - name: PHOTOPRISM_DATABASE_PASSWORD + value: insecure + - name: PHOTOPRISM_DATABASE_SERVER + value: mariadb:3306 + - name: PHOTOPRISM_DATABASE_USER + value: photoprism + - name: PHOTOPRISM_DEFAULT_TLS + value: "true" + - name: PHOTOPRISM_DETECT_NSFW + value: "false" + - name: PHOTOPRISM_DISABLE_CHOWN + value: "false" + - name: PHOTOPRISM_DISABLE_CLASSIFICATION + value: "false" + - name: PHOTOPRISM_DISABLE_FACES + value: "false" + - name: PHOTOPRISM_DISABLE_RAW + value: "false" + - name: PHOTOPRISM_DISABLE_SETTINGS + value: "false" + - name: PHOTOPRISM_DISABLE_TENSORFLOW + value: "false" + - name: PHOTOPRISM_DISABLE_TLS + value: "false" + - name: PHOTOPRISM_DISABLE_VECTORS + value: "false" + - name: PHOTOPRISM_DISABLE_WEBDAV + value: "false" + - name: PHOTOPRISM_EXPERIMENTAL + value: "false" + - name: PHOTOPRISM_HTTP_COMPRESSION + value: gzip + - name: PHOTOPRISM_JPEG_QUALITY + value: "85" + - name: PHOTOPRISM_LOG_LEVEL + value: info + - name: PHOTOPRISM_ORIGINALS_LIMIT + value: "5000" + - name: PHOTOPRISM_RAW_PRESETS + value: "false" + - name: PHOTOPRISM_READONLY + value: "false" + - name: PHOTOPRISM_SITE_AUTHOR + - name: PHOTOPRISM_SITE_CAPTION + value: AI-Powered Photos App + - name: PHOTOPRISM_SITE_DESCRIPTION + - name: PHOTOPRISM_SITE_URL + value: https://photoprism.wayl.one + - name: PHOTOPRISM_UPLOAD_NSFW + value: "true" + image: photoprism/photoprism:latest + name: photoprism + ports: + - containerPort: 2342 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /photoprism/originals + name: photoprism-originals + - mountPath: /photoprism/storage + name: photoprism-storage + workingDir: /photoprism + restartPolicy: Always + terminationGracePeriodSeconds: 10 + volumes: + - name: photoprism-originals + persistentVolumeClaim: + claimName: photoprism-originals + - name: photoprism-storage + persistentVolumeClaim: + claimName: photoprism-storage +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n photoprism + kompose.service.expose: photoprism.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: photoprism + name: photoprism + namespace: photoprism +spec: + rules: + - host: photoprism.wayl.one + http: + paths: + - backend: + service: + name: photoprism + port: + number: 2342 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: photoprism-originals + name: photoprism-originals + namespace: photoprism +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: photoprism-storage + name: photoprism-storage + namespace: photoprism +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/photoprism/docker-compose.yml b/photoprism/docker-compose.yml new file mode 100644 index 0000000..c44a07c --- /dev/null +++ b/photoprism/docker-compose.yml @@ -0,0 +1,140 @@ +version: "3.5" + +# Example Docker Compose config file for PhotoPrism (Linux / AMD64) +# +# Note: +# - Running PhotoPrism on a server with less than 4 GB of swap space or setting a memory/swap limit can cause unexpected +# restarts ("crashes"), for example, when the indexer temporarily needs more memory to process large files. +# - If you install PhotoPrism on a public server outside your home network, please always run it behind a secure +# HTTPS reverse proxy such as Traefik or Caddy. Your files and passwords will otherwise be transmitted +# in clear text and can be intercepted by anyone, including your provider, hackers, and governments: +# https://docs.photoprism.app/getting-started/proxies/traefik/ +# +# Setup Guides: +# - https://docs.photoprism.app/getting-started/docker-compose/ +# - https://docs.photoprism.app/getting-started/raspberry-pi/ +# - https://www.photoprism.app/kb/activation +# +# Troubleshooting Checklists: +# - https://docs.photoprism.app/getting-started/troubleshooting/ +# - https://docs.photoprism.app/getting-started/troubleshooting/docker/ +# - https://docs.photoprism.app/getting-started/troubleshooting/mariadb/ +# +# CLI Commands: +# - https://docs.photoprism.app/getting-started/docker-compose/#command-line-interface +# +# All commands may have to be prefixed with "sudo" when not running as root. +# This will point the home directory shortcut ~ to /root in volume mounts. + +services: + photoprism: + ## Use photoprism/photoprism:preview for testing preview builds: + image: photoprism/photoprism:latest + ## Don't enable automatic restarts until PhotoPrism has been properly configured and tested! + ## If the service gets stuck in a restart loop, this points to a memory, filesystem, network, or database issue: + ## https://docs.photoprism.app/getting-started/troubleshooting/#fatal-server-errors + # restart: unless-stopped + stop_grace_period: 10s + depends_on: + - mariadb + security_opt: + - seccomp:unconfined + - apparmor:unconfined + ports: + - 2342 # HTTP port (host:container) + environment: + PHOTOPRISM_ADMIN_USER: "admin" # admin login username + PHOTOPRISM_ADMIN_PASSWORD: "insecure" # initial admin password (8-72 characters) + PHOTOPRISM_AUTH_MODE: "password" # authentication mode (public, password) + PHOTOPRISM_SITE_URL: "https://photoprism.wayl.one" # server URL in the format "http(s)://domain.name(:port)/(path)" + PHOTOPRISM_DISABLE_TLS: "false" # disables HTTPS/TLS even if the site URL starts with https:// and a certificate is available + PHOTOPRISM_DEFAULT_TLS: "true" # defaults to a self-signed HTTPS/TLS certificate if no other certificate is available + PHOTOPRISM_ORIGINALS_LIMIT: 5000 # file size limit for originals in MB (increase for high-res video) + PHOTOPRISM_HTTP_COMPRESSION: "gzip" # improves transfer speed and bandwidth utilization (none or gzip) + PHOTOPRISM_LOG_LEVEL: "info" # log level: trace, debug, info, warning, error, fatal, or panic + PHOTOPRISM_READONLY: "false" # do not modify originals directory (reduced functionality) + PHOTOPRISM_EXPERIMENTAL: "false" # enables experimental features + PHOTOPRISM_DISABLE_CHOWN: "false" # disables updating storage permissions via chmod and chown on startup + PHOTOPRISM_DISABLE_WEBDAV: "false" # disables built-in WebDAV server + PHOTOPRISM_DISABLE_SETTINGS: "false" # disables settings UI and API + PHOTOPRISM_DISABLE_TENSORFLOW: "false" # disables all features depending on TensorFlow + PHOTOPRISM_DISABLE_FACES: "false" # disables face detection and recognition (requires TensorFlow) + PHOTOPRISM_DISABLE_CLASSIFICATION: "false" # disables image classification (requires TensorFlow) + PHOTOPRISM_DISABLE_VECTORS: "false" # disables vector graphics support + PHOTOPRISM_DISABLE_RAW: "false" # disables indexing and conversion of RAW images + PHOTOPRISM_RAW_PRESETS: "false" # enables applying user presets when converting RAW images (reduces performance) + PHOTOPRISM_JPEG_QUALITY: 85 # a higher value increases the quality and file size of JPEG images and thumbnails (25-100) + PHOTOPRISM_DETECT_NSFW: "false" # automatically flags photos as private that MAY be offensive (requires TensorFlow) + PHOTOPRISM_UPLOAD_NSFW: "true" # allows uploads that MAY be offensive (no effect without TensorFlow) + # PHOTOPRISM_DATABASE_DRIVER: "sqlite" # SQLite is an embedded database that doesn't require a server + PHOTOPRISM_DATABASE_DRIVER: "mysql" # use MariaDB 10.5+ or MySQL 8+ instead of SQLite for improved performance + PHOTOPRISM_DATABASE_SERVER: "mariadb:3306" # MariaDB or MySQL database server (hostname:port) + PHOTOPRISM_DATABASE_NAME: "photoprism" # MariaDB or MySQL database schema name + PHOTOPRISM_DATABASE_USER: "photoprism" # MariaDB or MySQL database user name + PHOTOPRISM_DATABASE_PASSWORD: "insecure" # MariaDB or MySQL database user password + PHOTOPRISM_SITE_CAPTION: "AI-Powered Photos App" + PHOTOPRISM_SITE_DESCRIPTION: "" # meta site description + PHOTOPRISM_SITE_AUTHOR: "" # meta site author + ## Video Transcoding (https://docs.photoprism.app/getting-started/advanced/transcoding/): + # PHOTOPRISM_FFMPEG_ENCODER: "software" # H.264/AVC encoder (software, intel, nvidia, apple, raspberry, or vaapi) + # PHOTOPRISM_FFMPEG_SIZE: "1920" # video size limit in pixels (720-7680) (default: 3840) + # PHOTOPRISM_FFMPEG_BITRATE: "32" # video bitrate limit in Mbit/s (default: 50) + ## Run/install on first startup (options: update https gpu tensorflow davfs clitools clean): + # PHOTOPRISM_INIT: "https gpu tensorflow" + ## Run as a non-root user after initialization (supported: 0, 33, 50-99, 500-600, and 900-1200): + # PHOTOPRISM_UID: 1000 + # PHOTOPRISM_GID: 1000 + # PHOTOPRISM_UMASK: 0000 + ## Start as non-root user before initialization (supported: 0, 33, 50-99, 500-600, and 900-1200): + # user: "1000:1000" + ## Share hardware devices with FFmpeg and TensorFlow (optional): + # devices: + # - "/dev/dri:/dev/dri" # Intel QSV + # - "/dev/nvidia0:/dev/nvidia0" # Nvidia CUDA + # - "/dev/nvidiactl:/dev/nvidiactl" + # - "/dev/nvidia-modeset:/dev/nvidia-modeset" + # - "/dev/nvidia-nvswitchctl:/dev/nvidia-nvswitchctl" + # - "/dev/nvidia-uvm:/dev/nvidia-uvm" + # - "/dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools" + # - "/dev/video11:/dev/video11" # Video4Linux Video Encode Device (h264_v4l2m2m) + working_dir: "/photoprism" # do not change or remove + ## Storage Folders: "~" is a shortcut for your home directory, "." for the current directory + volumes: + # "/host/folder:/photoprism/folder" # Example + - "photoprism-originals:/photoprism/originals" # Original media files (DO NOT REMOVE) + # - "/example/family:/photoprism/originals/family" # *Additional* media folders can be mounted like this + # - "~/Import:/photoprism/import" # *Optional* base folder from which files can be imported to originals + - "photoprism-storage:/photoprism/storage" # *Writable* storage folder for cache, database, and sidecar files (DO NOT REMOVE) + + labels: + kompose.service.expose: photoprism.wayl.one + + ## Database Server (recommended) + ## see https://docs.photoprism.app/getting-started/faq/#should-i-use-sqlite-mariadb-or-mysql + mariadb: + image: mariadb:10.11 + ## If MariaDB gets stuck in a restart loop, this points to a memory or filesystem issue: + ## https://docs.photoprism.app/getting-started/troubleshooting/#fatal-server-errors + # restart: unless-stopped + stop_grace_period: 5s + security_opt: # see https://github.com/MariaDB/mariadb-docker/issues/434#issuecomment-1136151239 + - seccomp:unconfined + - apparmor:unconfined + command: mariadbd --innodb-buffer-pool-size=512M --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=120 + ## Never store database files on an unreliable device such as a USB flash drive, an SD card, or a shared network folder: + volumes: + - "photoprism-db:/var/lib/mysql" # DO NOT REMOVE + ports: + - 3306 + environment: + MARIADB_AUTO_UPGRADE: "1" + MARIADB_INITDB_SKIP_TZINFO: "1" + MARIADB_DATABASE: "photoprism" + MARIADB_USER: "photoprism" + MARIADB_PASSWORD: "insecure" + MARIADB_ROOT_PASSWORD: "insecure" + +volumes: + photoprism-originals: {} + photoprism-storage: {} + photoprism-db: {} diff --git a/photoprism/justfile b/photoprism/justfile new file mode 100644 index 0000000..2e382c4 --- /dev/null +++ b/photoprism/justfile @@ -0,0 +1,31 @@ +default: cred convert deploy viz +update: convert patch + +create-ns: + kubectl create ns photoprism +cred: + kubectl get secret -n default regcred --output=yaml -o yaml | sed 's/namespace: default/namespace: photoprism/' | kubectl apply -n photoprism -f - && echo deployed secret || echo secret exists +convert: + kompose convert -o deployment.yaml -n photoprism +deploy: + kubectl apply -f deployment.yaml +delete: + kubectl delete all --all -n photoprism --timeout=0s +viz: + k8sviz -n photoprism --kubeconfig $KUBECONFIG -t png -o photoprism-k8s.png +restart: + kubectl rollout restart -n photoprism deployment/photoprism + +patch: + kubectl patch -f deployment.yaml +describe: + kubectl get deployment -n photoprism + kubectl get rs -n photoprism + kubectl get pod -n photoprism + kubectl get svc -n photoprism + kubectl get ing -n photoprism + +describe-pod: + kubectl describe pod -n photoprism +logs: + kubectl logs --all-containers -l io.kompose.service=photoprism-wayl-one -n photoprism -f diff --git a/pihole/basic-install.sh b/pihole/basic-install.sh new file mode 100644 index 0000000..4c69788 --- /dev/null +++ b/pihole/basic-install.sh @@ -0,0 +1,2788 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1090 + +# Pi-hole: A black hole for Internet advertisements +# (c) Pi-hole (https://pi-hole.net) +# Network-wide ad blocking via your own hardware. +# +# Installs and Updates Pi-hole +# +# This file is copyright under the latest version of the EUPL. +# Please see LICENSE file for your rights under this license. + +# pi-hole.net/donate +# +# Install with this command (from your Linux machine): +# +# curl -sSL https://install.pi-hole.net | bash + +# -e option instructs bash to immediately exit if any command [1] has a non-zero exit status +# We do not want users to end up with a partially working install, so we exit the script +# instead of continuing the installation with something broken +set -e + +# Append common folders to the PATH to ensure that all basic commands are available. +# When using "su" an incomplete PATH could be passed: https://github.com/pi-hole/pi-hole/issues/3209 +export PATH+=':/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' + +######## VARIABLES ######### +# For better maintainability, we store as much information that can change in variables +# This allows us to make a change in one place that can propagate to all instances of the variable +# These variables should all be GLOBAL variables, written in CAPS +# Local variables will be in lowercase and will exist only within functions +# It's still a work in progress, so you may see some variance in this guideline until it is complete + +# Dialog result codes +# dialog code values can be set by environment variables, we only override if +# the env var is not set or empty. +: "${DIALOG_OK:=0}" +: "${DIALOG_CANCEL:=1}" +: "${DIALOG_ESC:=255}" + + +# List of supported DNS servers +DNS_SERVERS=$(cat << EOM +Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844 +OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53 +Level3;4.2.2.1;4.2.2.2;; +Comodo;8.26.56.26;8.20.247.20;; +DNS.WATCH (DNSSEC);84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b +Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9 +Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10 +Quad9 (filtered, ECS, DNSSEC);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11 +Cloudflare (DNSSEC);1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001 +EOM +) + +# Location for final installation log storage +installLogLoc="/etc/pihole/install.log" +# This is an important file as it contains information specific to the machine it's being installed on +setupVars="/etc/pihole/setupVars.conf" +# Pi-hole uses lighttpd as a Web server, and this is the config file for it +lighttpdConfig="/etc/lighttpd/lighttpd.conf" +# This is a file used for the colorized output +coltable="/opt/pihole/COL_TABLE" + +# Root of the web server +webroot="/var/www/html" + + +# We clone (or update) two git repositories during the install. This helps to make sure that we always have the latest versions of the relevant files. +# web is used to set up the Web admin interface. +# Pi-hole contains various setup scripts and files which are critical to the installation. +# Search for "PI_HOLE_LOCAL_REPO" in this file to see all such scripts. +# Two notable scripts are gravity.sh (used to generate the HOSTS file) and advanced/Scripts/webpage.sh (used to install the Web admin interface) +webInterfaceGitUrl="https://github.com/pi-hole/web.git" +webInterfaceDir="${webroot}/admin" +piholeGitUrl="https://github.com/pi-hole/pi-hole.git" +PI_HOLE_LOCAL_REPO="/etc/.pihole" +# List of pihole scripts, stored in an array +PI_HOLE_FILES=(chronometer list piholeDebug piholeLogFlush setupLCD update version gravity uninstall webpage) +# This directory is where the Pi-hole scripts will be installed +PI_HOLE_INSTALL_DIR="/opt/pihole" +PI_HOLE_CONFIG_DIR="/etc/pihole" +PI_HOLE_BIN_DIR="/usr/local/bin" +FTL_CONFIG_FILE="${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" +if [ -z "$useUpdateVars" ]; then + useUpdateVars=false +fi + +adlistFile="/etc/pihole/adlists.list" +# Pi-hole needs an IP address; to begin, these variables are empty since we don't know what the IP is until this script can run +IPV4_ADDRESS=${IPV4_ADDRESS} +IPV6_ADDRESS=${IPV6_ADDRESS} +# Give settings their default values. These may be changed by prompts later in the script. +QUERY_LOGGING=true +INSTALL_WEB_INTERFACE=true +PRIVACY_LEVEL=0 +CACHE_SIZE=10000 + +if [ -z "${USER}" ]; then + USER="$(id -un)" +fi + +# dialog dimensions: Let dialog handle appropriate sizing. +r=20 +c=70 + +######## Undocumented Flags. Shhh ######## +# These are undocumented flags; some of which we can use when repairing an installation +# The runUnattended flag is one example of this +reconfigure=false +runUnattended=false +INSTALL_WEB_SERVER=true +# Check arguments for the undocumented flags +for var in "$@"; do + case "$var" in + "--reconfigure" ) reconfigure=true;; + "--unattended" ) runUnattended=true;; + "--disable-install-webserver" ) INSTALL_WEB_SERVER=false;; + esac +done + +# If the color table file exists, +if [[ -f "${coltable}" ]]; then + # source it + source "${coltable}" +# Otherwise, +else + # Set these values so the installer can still run in color + COL_NC='\e[0m' # No Color + COL_LIGHT_GREEN='\e[1;32m' + COL_LIGHT_RED='\e[1;31m' + TICK="[${COL_LIGHT_GREEN}✓${COL_NC}]" + CROSS="[${COL_LIGHT_RED}✗${COL_NC}]" + INFO="[i]" + # shellcheck disable=SC2034 + DONE="${COL_LIGHT_GREEN} done!${COL_NC}" + OVER="\\r\\033[K" +fi + +# A simple function that just echoes out our logo in ASCII format +# This lets users know that it is a Pi-hole, LLC product +show_ascii_berry() { + echo -e " + ${COL_LIGHT_GREEN}.;;,. + .ccccc:,. + :cccclll:. ..,, + :ccccclll. ;ooodc + 'ccll:;ll .oooodc + .;cll.;;looo:. + ${COL_LIGHT_RED}.. ','. + .',,,,,,'. + .',,,,,,,,,,. + .',,,,,,,,,,,,.... + ....''',,,,,,,'....... + ......... .... ......... + .......... .......... + .......... .......... + ......... .... ......... + ........,,,,,,,'...... + ....',,,,,,,,,,,,. + .',,,,,,,,,'. + .',,,,,,'. + ..'''.${COL_NC} +" +} + +is_command() { + # Checks to see if the given command (passed as a string argument) exists on the system. + # The function returns 0 (success) if the command exists, and 1 if it doesn't. + local check_command="$1" + + command -v "${check_command}" >/dev/null 2>&1 +} + +os_check() { + if [ "$PIHOLE_SKIP_OS_CHECK" != true ]; then + # This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net + # and determines whether or not the script is running on one of those systems + local remote_os_domain valid_os valid_version valid_response detected_os detected_version display_warning cmdResult digReturnCode response + remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"} + + detected_os=$(grep '^ID=' /etc/os-release | cut -d '=' -f2 | tr -d '"') + detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"') + + cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)" + # Gets the return code of the previous command (last line) + digReturnCode="${cmdResult##*$'\n'}" + + if [ ! "${digReturnCode}" == "0" ]; then + valid_response=false + else + # Dig returned 0 (success), so get the actual response, and loop through it to determine if the detected variables above are valid + response="${cmdResult%%$'\n'*}" + # If the value of ${response} is a single 0, then this is the return code, not an actual response. + if [ "${response}" == 0 ]; then + valid_response=false + fi + + IFS=" " read -r -a supportedOS < <(echo "${response}" | tr -d '"') + for distro_and_versions in "${supportedOS[@]}" + do + distro_part="${distro_and_versions%%=*}" + versions_part="${distro_and_versions##*=}" + + # If the distro part is a (case-insensitive) substring of the computer OS + if [[ "${detected_os^^}" =~ ${distro_part^^} ]]; then + valid_os=true + IFS="," read -r -a supportedVer <<<"${versions_part}" + for version in "${supportedVer[@]}" + do + if [[ "${detected_version}" =~ $version ]]; then + valid_version=true + break + fi + done + break + fi + done + fi + + if [ "$valid_os" = true ] && [ "$valid_version" = true ] && [ ! "$valid_response" = false ]; then + display_warning=false + fi + + if [ "$display_warning" != false ]; then + if [ "$valid_response" = false ]; then + + if [ "${digReturnCode}" -eq 0 ]; then + errStr="dig succeeded, but response was blank. Please contact support" + else + errStr="dig failed with return code ${digReturnCode}" + fi + printf " %b %bRetrieval of supported OS list failed. %s. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${errStr}" "${COL_NC}" + printf " %bUnable to determine if the detected OS (%s %s) is supported%b\\n" "${COL_LIGHT_RED}" "${detected_os^}" "${detected_version}" "${COL_NC}" + printf " Possible causes for this include:\\n" + printf " - Firewall blocking certain DNS lookups from Pi-hole device\\n" + printf " - ns1.pi-hole.net being blocked (required to obtain TXT record from versions.pi-hole.net containing supported operating systems)\\n" + printf " - Other internet connectivity issues\\n" + else + printf " %b %bUnsupported OS detected: %s %s%b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${detected_os^}" "${detected_version}" "${COL_NC}" + printf " If you are seeing this message and you do have a supported OS, please contact support.\\n" + fi + printf "\\n" + printf " %bhttps://docs.pi-hole.net/main/prerequisites/#supported-operating-systems%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}" + printf "\\n" + printf " If you wish to attempt to continue anyway, you can try one of the following commands to skip this check:\\n" + printf "\\n" + printf " e.g: If you are seeing this message on a fresh install, you can run:\\n" + printf " %bcurl -sSL https://install.pi-hole.net | sudo PIHOLE_SKIP_OS_CHECK=true bash%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}" + printf "\\n" + printf " If you are seeing this message after having run pihole -up:\\n" + printf " %bsudo PIHOLE_SKIP_OS_CHECK=true pihole -r%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}" + printf " (In this case, your previous run of pihole -up will have already updated the local repository)\\n" + printf "\\n" + printf " It is possible that the installation will still fail at this stage due to an unsupported configuration.\\n" + printf " If that is the case, you can feel free to ask the community on Discourse with the %bCommunity Help%b category:\\n" "${COL_LIGHT_RED}" "${COL_NC}" + printf " %bhttps://discourse.pi-hole.net/c/bugs-problems-issues/community-help/%b\\n" "${COL_LIGHT_GREEN}" "${COL_NC}" + printf "\\n" + exit 1 + + else + printf " %b %bSupported OS detected%b\\n" "${TICK}" "${COL_LIGHT_GREEN}" "${COL_NC}" + fi + else + printf " %b %bPIHOLE_SKIP_OS_CHECK env variable set to true - installer will continue%b\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${COL_NC}" + fi +} + +# This function waits for dpkg to unlock, which signals that the previous apt-get command has finished. +test_dpkg_lock() { + i=0 + printf " %b Waiting for package manager to finish (up to 30 seconds)\\n" "${INFO}" + # fuser is a program to show which processes use the named files, sockets, or filesystems + # So while the lock is held, + while fuser /var/lib/dpkg/lock >/dev/null 2>&1 + do + # we wait half a second, + sleep 0.5 + # increase the iterator, + ((i=i+1)) + # exit if waiting for more then 30 seconds + if [[ $i -gt 60 ]]; then + printf " %b %bError: Could not verify package manager finished and released lock. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${COL_NC}" + printf " Attempt to install packages manually and retry.\\n" + exit 1; + fi + done + # and then report success once dpkg is unlocked. + return 0 +} + +# Compatibility +package_manager_detect() { + # TODO - pull common packages for both distributions out into a common variable, then add + # the distro-specific ones below. + + # First check to see if apt-get is installed. + if is_command apt-get ; then + # Set some global variables here + # We don't set them earlier since the installed package manager might be rpm, so these values would be different + PKG_MANAGER="apt-get" + # A variable to store the command used to update the package cache + UPDATE_PKG_CACHE="${PKG_MANAGER} update" + # The command we will use to actually install packages + PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install) + # grep -c will return 1 if there are no matches. This is an acceptable condition, so we OR TRUE to prevent set -e exiting the script. + PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true" + # Update package cache + update_package_cache || exit 1 + # Check for and determine version number (major and minor) of current php install + local phpVer="php" + if is_command php ; then + phpVer="$(php <<< "")" + # Check if the first character of the string is numeric + if [[ ${phpVer:0:1} =~ [1-9] ]]; then + printf " %b Existing PHP installation detected : PHP version %s\\n" "${INFO}" "${phpVer}" + printf -v phpInsMajor "%d" "$(php <<< "")" + printf -v phpInsMinor "%d" "$(php <<< "")" + phpVer="php$phpInsMajor.$phpInsMinor" + else + printf " %b No valid PHP installation detected!\\n" "${CROSS}" + printf " %b PHP version : %s\\n" "${INFO}" "${phpVer}" + printf " %b Aborting installation.\\n" "${CROSS}" + exit 1 + fi + fi + # Packages required to perform the os_check (stored as an array) + OS_CHECK_DEPS=(grep dnsutils) + # Packages required to run this install script (stored as an array) + INSTALLER_DEPS=(git iproute2 dialog ca-certificates) + # Packages required to run Pi-hole (stored as an array) + PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip idn2 libcap2-bin dns-root-data libcap2 netcat-openbsd procps jq) + # Packages required for the Web admin interface (stored as an array) + # It's useful to separate this from Pi-hole, since the two repos are also setup separately + PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-sqlite3" "${phpVer}-xml" "${phpVer}-intl") + # Prior to PHP8.0, JSON functionality is provided as dedicated module, required by Pi-hole web: https://www.php.net/manual/json.installation.php + if [[ -z "${phpInsMajor}" || "${phpInsMajor}" -lt 8 ]]; then + PIHOLE_WEB_DEPS+=("${phpVer}-json") + fi + # The Web server user, + LIGHTTPD_USER="www-data" + # group, + LIGHTTPD_GROUP="www-data" + # and config file + LIGHTTPD_CFG="lighttpd.conf.debian" + + # If apt-get is not found, check for rpm. + elif is_command rpm ; then + # Then check if dnf or yum is the package manager + if is_command dnf ; then + PKG_MANAGER="dnf" + else + PKG_MANAGER="yum" + fi + + # These variable names match the ones for apt-get. See above for an explanation of what they are for. + PKG_INSTALL=("${PKG_MANAGER}" install -y) + # CentOS package manager returns 100 when there are packages to update so we need to || true to prevent the script from exiting. + PKG_COUNT="${PKG_MANAGER} check-update | grep -E '(.i686|.x86|.noarch|.arm|.src|.riscv64)' | wc -l || true" + OS_CHECK_DEPS=(grep bind-utils) + INSTALLER_DEPS=(git dialog iproute newt procps-ng chkconfig ca-certificates) + PIHOLE_DEPS=(cronie curl findutils sudo unzip libidn2 psmisc libcap nmap-ncat jq) + PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl) + LIGHTTPD_USER="lighttpd" + LIGHTTPD_GROUP="lighttpd" + LIGHTTPD_CFG="lighttpd.conf.fedora" + + # If the host OS is centos (or a derivative), epel is required for lighttpd + if ! grep -qiE 'fedora|fedberry' /etc/redhat-release; then + if rpm -qa | grep -qi 'epel'; then + printf " %b EPEL repository already installed\\n" "${TICK}" + else + local RH_RELEASE EPEL_PKG + # EPEL not already installed, add it based on the release version + RH_RELEASE=$(grep -oP '(?<= )[0-9]+(?=\.?)' /etc/redhat-release) + EPEL_PKG="https://dl.fedoraproject.org/pub/epel/epel-release-latest-${RH_RELEASE}.noarch.rpm" + printf " %b Enabling EPEL package repository (https://fedoraproject.org/wiki/EPEL)\\n" "${INFO}" + "${PKG_INSTALL[@]}" "${EPEL_PKG}" + printf " %b Installed %s\\n" "${TICK}" "${EPEL_PKG}" + fi + fi + + # If neither apt-get or yum/dnf package managers were found + else + # we cannot install required packages + printf " %b No supported package manager found\\n" "${CROSS}" + # so exit the installer + exit + fi +} + +# A function for checking if a directory is a git repository +is_repo() { + # Use a named, local variable instead of the vague $1, which is the first argument passed to this function + # These local variables should always be lowercase + local directory="${1}" + # A variable to store the return code + local rc + # If the first argument passed to this function is a directory, + if [[ -d "${directory}" ]]; then + # move into the directory + pushd "${directory}" &> /dev/null || return 1 + # Use git to check if the directory is a repo + # git -C is not used here to support git versions older than 1.8.4 + git status --short &> /dev/null || rc=$? + # If the command was not successful, + else + # Set a non-zero return code if directory does not exist + rc=1 + fi + # Move back into the directory the user started in + popd &> /dev/null || return 1 + # Return the code; if one is not set, return 0 + return "${rc:-0}" +} + +# A function to clone a repo +make_repo() { + # Set named variables for better readability + local directory="${1}" + local remoteRepo="${2}" + + # The message to display when this function is running + str="Clone ${remoteRepo} into ${directory}" + # Display the message and use the color table to preface the message with an "info" indicator + printf " %b %s..." "${INFO}" "${str}" + # If the directory exists, + if [[ -d "${directory}" ]]; then + # Return with a 1 to exit the installer. We don't want to overwrite what could already be here in case it is not ours + str="Unable to clone ${remoteRepo} into ${directory} : Directory already exists" + printf "%b %b%s\\n" "${OVER}" "${CROSS}" "${str}" + return 1 + fi + # Clone the repo and return the return code from this command + git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $? + # Move into the directory that was passed as an argument + pushd "${directory}" &> /dev/null || return 1 + # Check current branch. If it is master, then reset to the latest available tag. + # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks) + curBranch=$(git rev-parse --abbrev-ref HEAD) + if [[ "${curBranch}" == "master" ]]; then + # If we're calling make_repo() then it should always be master, we may not need to check. + git reset --hard "$(git describe --abbrev=0 --tags)" || return $? + fi + # Show a colored message showing it's status + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) + chmod -R a+rX "${directory}" + # Move back into the original directory + popd &> /dev/null || return 1 + return 0 +} + +# We need to make sure the repos are up-to-date so we can effectively install Clean out the directory if it exists for git to clone into +update_repo() { + # Use named, local variables + # As you can see, these are the same variable names used in the last function, + # but since they are local, their scope does not go beyond this function + # This helps prevent the wrong value from being assigned if you were to set the variable as a GLOBAL one + local directory="${1}" + local curBranch + + # A variable to store the message we want to display; + # Again, it's useful to store these in variables in case we need to reuse or change the message; + # we only need to make one change here + local str="Update repo in ${1}" + # Move into the directory that was passed as an argument + pushd "${directory}" &> /dev/null || return 1 + # Let the user know what's happening + printf " %b %s..." "${INFO}" "${str}" + # Stash any local commits as they conflict with our working code + git stash --all --quiet &> /dev/null || true # Okay for stash failure + git clean --quiet --force -d || true # Okay for already clean directory + # Pull the latest commits + git pull --no-rebase --quiet &> /dev/null || return $? + # Check current branch. If it is master, then reset to the latest available tag. + # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks) + curBranch=$(git rev-parse --abbrev-ref HEAD) + if [[ "${curBranch}" == "master" ]]; then + git reset --hard "$(git describe --abbrev=0 --tags)" || return $? + fi + # Show a completion message + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) + chmod -R a+rX "${directory}" + # Move back into the original directory + popd &> /dev/null || return 1 + return 0 +} + +# A function that combines the previous git functions to update or clone a repo +getGitFiles() { + # Setup named variables for the git repos + # We need the directory + local directory="${1}" + # as well as the repo URL + local remoteRepo="${2}" + # A local variable containing the message to be displayed + local str="Check for existing repository in ${1}" + # Show the message + printf " %b %s..." "${INFO}" "${str}" + # Check if the directory is a repository + if is_repo "${directory}"; then + # Show that we're checking it + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # Update the repo, returning an error message on failure + update_repo "${directory}" || { printf "\\n %b: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } + # If it's not a .git repo, + else + # Show an error + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + # Attempt to make the repository, showing an error on failure + make_repo "${directory}" "${remoteRepo}" || { printf "\\n %bError: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; } + fi + echo "" + # Success via one of the two branches, as the commands would exit if they failed. + return 0 +} + +# Reset a repo to get rid of any local changed +resetRepo() { + # Use named variables for arguments + local directory="${1}" + # Move into the directory + pushd "${directory}" &> /dev/null || return 1 + # Store the message in a variable + str="Resetting repository within ${1}..." + # Show the message + printf " %b %s..." "${INFO}" "${str}" + # Use git to remove the local changes + git reset --hard &> /dev/null || return $? + # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) + chmod -R a+rX "${directory}" + # And show the status + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # Return to where we came from + popd &> /dev/null || return 1 + # Function succeeded, as "git reset" would have triggered a return earlier if it failed + return 0 +} + +find_IPv4_information() { + # Detects IPv4 address used for communication to WAN addresses. + # Accepts no arguments, returns no values. + + # Named, local variables + local route + local IPv4bare + + # Find IP used to route to outside world by checking the the route to Google's public DNS server + route=$(ip route get 8.8.8.8) + + # Get just the interface IPv4 address + # shellcheck disable=SC2059,SC2086 + # disabled as we intentionally want to split on whitespace and have printf populate + # the variable with just the first field. + printf -v IPv4bare "$(printf ${route#*src })" + # Get the default gateway IPv4 address (the way to reach the Internet) + # shellcheck disable=SC2059,SC2086 + printf -v IPv4gw "$(printf ${route#*via })" + + if ! valid_ip "${IPv4bare}" ; then + IPv4bare="127.0.0.1" + fi + + # Append the CIDR notation to the IP address, if valid_ip fails this should return 127.0.0.1/8 + IPV4_ADDRESS=$(ip -oneline -family inet address show | grep "${IPv4bare}/" | awk '{print $4}' | awk 'END {print}') +} + +# Get available interfaces that are UP +get_available_interfaces() { + # There may be more than one so it's all stored in a variable + availableInterfaces=$(ip --oneline link show up | grep -v "lo" | awk '{print $2}' | cut -d':' -f1 | cut -d'@' -f1) +} + +# A function for displaying the dialogs the user sees when first running the installer +welcomeDialogs() { + # Display the welcome dialog using an appropriately sized window via the calculation conducted earlier in the script + dialog --no-shadow --clear --keep-tite \ + --backtitle "Welcome" \ + --title "Pi-hole Automated Installer" \ + --msgbox "\\n\\nThis installer will transform your device into a network-wide ad blocker!" \ + "${r}" "${c}" \ + --and-widget --clear \ + --backtitle "Support Pi-hole" \ + --title "Open Source Software" \ + --msgbox "\\n\\nThe Pi-hole is free, but powered by your donations: https://pi-hole.net/donate/" \ + "${r}" "${c}" \ + --and-widget --clear \ + --colors \ + --backtitle "Initiating network interface" \ + --title "Static IP Needed" \ + --no-button "Exit" --yes-button "Continue" \ + --defaultno \ + --yesno "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly.\\n\\n\ +\\Zb\\Z1IMPORTANT:\\Zn If you have not already done so, you must ensure that this device has a static IP.\\n\\n\ +Depending on your operating system, there are many ways to achieve this, through DHCP reservation, or by manually assigning one.\\n\\n\ +Please continue when the static addressing has been configured."\ + "${r}" "${c}" && result=0 || result="$?" + + case "${result}" in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Installer exited at static IP message.\\n" "${INFO}" + exit 1 + ;; + esac +} + +# A function that lets the user pick an interface to use with Pi-hole +chooseInterface() { + # Turn the available interfaces into a string so it can be used with dialog + local interfacesList + # Number of available interfaces + local interfaceCount + + # POSIX compliant way to get the number of elements in an array + interfaceCount=$(printf "%s\n" "${availableInterfaces}" | wc -l) + + # If there is one interface, + if [[ "${interfaceCount}" -eq 1 ]]; then + # Set it as the interface to use since there is no other option + PIHOLE_INTERFACE="${availableInterfaces}" + # Otherwise, + else + # Set status for the first entry to be selected + status="ON" + + # While reading through the available interfaces + for interface in ${availableInterfaces}; do + # Put all these interfaces into a string + interfacesList="${interfacesList}${interface} available ${status} " + # All further interfaces are deselected + status="OFF" + done + # shellcheck disable=SC2086 + # Disable check for double quote here as we are passing a string with spaces + PIHOLE_INTERFACE=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" --ok-label "Select" \ + --radiolist "Choose An Interface (press space to toggle selection)" \ + ${r} ${c} "${interfaceCount}" ${interfacesList}) + + result=$? + case ${result} in + "${DIALOG_CANCEL}"|"${DIALOG_ESC}") + # Show an error message and exit + printf " %b %s\\n" "${CROSS}" "No interface selected, exiting installer" + exit 1 + ;; + esac + + printf " %b Using interface: %s\\n" "${INFO}" "${PIHOLE_INTERFACE}" + fi +} + +# This lets us prefer ULA addresses over GUA +# This caused problems for some users when their ISP changed their IPv6 addresses +# See https://github.com/pi-hole/pi-hole/issues/1473#issuecomment-301745953 +testIPv6() { + # first will contain fda2 (ULA) + printf -v first "%s" "${1%%:*}" + # value1 will contain 253 which is the decimal value corresponding to 0xFD + value1=$(( (0x$first)/256 )) + # value2 will contain 162 which is the decimal value corresponding to 0xA2 + value2=$(( (0x$first)%256 )) + # the ULA test is testing for fc00::/7 according to RFC 4193 + if (( (value1&254)==252 )); then + # echoing result to calling function as return value + echo "ULA" + fi + # the GUA test is testing for 2000::/3 according to RFC 4291 + if (( (value1&112)==32 )); then + # echoing result to calling function as return value + echo "GUA" + fi + # the LL test is testing for fe80::/10 according to RFC 4193 + if (( (value1)==254 )) && (( (value2&192)==128 )); then + # echoing result to calling function as return value + echo "Link-local" + fi +} + +find_IPv6_information() { + # Detects IPv6 address used for communication to WAN addresses. + mapfile -t IPV6_ADDRESSES <<<"$(ip -6 address | grep 'scope global' | awk '{print $2}')" + + # For each address in the array above, determine the type of IPv6 address it is + for i in "${IPV6_ADDRESSES[@]}"; do + # Check if it's ULA, GUA, or LL by using the function created earlier + result=$(testIPv6 "$i") + # If it's a ULA address, use it and store it as a global variable + [[ "${result}" == "ULA" ]] && ULA_ADDRESS="${i%/*}" + # If it's a GUA address, use it and store it as a global variable + [[ "${result}" == "GUA" ]] && GUA_ADDRESS="${i%/*}" + # Else if it's a Link-local address, we cannot use it, so just continue + done + + # Determine which address to be used: Prefer ULA over GUA or don't use any if none found + # If the ULA_ADDRESS contains a value, + if [[ -n "${ULA_ADDRESS}" ]]; then + # set the IPv6 address to the ULA address + IPV6_ADDRESS="${ULA_ADDRESS}" + # Show this info to the user + printf " %b Found IPv6 ULA address\\n" "${INFO}" + # Otherwise, if the GUA_ADDRESS has a value, + elif [[ -n "${GUA_ADDRESS}" ]]; then + # Let the user know + printf " %b Found IPv6 GUA address\\n" "${INFO}" + # And assign it to the global variable + IPV6_ADDRESS="${GUA_ADDRESS}" + # If none of those work, + else + printf " %b Unable to find IPv6 ULA/GUA address\\n" "${INFO}" + # So set the variable to be empty + IPV6_ADDRESS="" + fi +} + +# A function to collect IPv4 and IPv6 information of the device +collect_v4andv6_information() { + find_IPv4_information + # Echo the information to the user + printf " %b IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}" + # if `dhcpcd` is used offer to set this as static IP for the device + if [[ -f "/etc/dhcpcd.conf" ]]; then + # configure networking via dhcpcd + getStaticIPv4Settings + fi + find_IPv6_information + printf " %b IPv6 address: %s\\n" "${INFO}" "${IPV6_ADDRESS}" +} + +getStaticIPv4Settings() { + # Local, named variables + local ipSettingsCorrect + local DHCPChoice + # Ask if the user wants to use DHCP settings as their static IP + # This is useful for users that are using DHCP reservations; we can use the information gathered + DHCPChoice=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" --ok-label "Continue" \ + --backtitle "Calibrating network interface" \ + --title "Static IP Address" \ + --menu "Do you want to use your current network settings as a static address?\\n \ + IP address: ${IPV4_ADDRESS}\\n \ + Gateway: ${IPv4gw}\\n" \ + "${r}" "${c}" 3 \ + "Yes" "Set static IP using current values" \ + "No" "Set static IP using custom values" \ + "Skip" "I will set a static IP later, or have already done so") + + result=$? + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + case ${DHCPChoice} in + "Skip") + return + ;; + "Yes") + # If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict. + dialog --no-shadow --keep-tite \ + --cancel-label "Exit" \ + --backtitle "IP information" \ + --title "FYI: IP Conflict" \ + --msgbox "\\nIt is possible your router could still try to assign this IP to a device, which would cause a conflict, \ +but in most cases the router is smart enough to not do that.\n\n\ +If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want.\n\n\ +It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address."\ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + ;; + + "No") + # Otherwise, we need to ask the user to input their desired settings. + # Start by getting the IPv4 address (pre-filling it with info gathered from DHCP) + # Start a loop to let the user enter their information with the chance to go back and edit it if necessary + ipSettingsCorrect=false + until [[ "${ipSettingsCorrect}" = True ]]; do + + # Ask for the IPv4 address + _staticIPv4Temp=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" \ + --ok-label "Continue" \ + --backtitle "Calibrating network interface" \ + --title "IPv4 Address" \ + --form "\\nEnter your desired IPv4 address" \ + "${r}" "${c}" 0 \ + "IPv4 Address:" 1 1 "${IPV4_ADDRESS}" 1 15 19 0 \ + "IPv4 Gateway:" 2 1 "${IPv4gw}" 2 15 19 0) + + result=$? + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + IPV4_ADDRESS=${_staticIPv4Temp%$'\n'*} + IPv4gw=${_staticIPv4Temp#*$'\n'} + + # Give the user a chance to review their settings before moving on + dialog --no-shadow --keep-tite \ + --no-label "Edit IP" \ + --backtitle "Calibrating network interface" \ + --title "Static IP Address" \ + --defaultno \ + --yesno "Are these settings correct? + IP address: ${IPV4_ADDRESS} + Gateway: ${IPv4gw}" \ + "${r}" "${c}" && ipSettingsCorrect=True + done + ;; + esac + setDHCPCD +} + +# Configure networking via dhcpcd +setDHCPCD() { + # Regex for matching a non-commented static ip address setting + local regex="^[ \t]*static ip_address[ \t]*=[ \t]*${IPV4_ADDRESS}" + + # Check if static IP is already set in file + if grep -q "${regex}" /etc/dhcpcd.conf; then + printf " %b Static IP already configured\\n" "${INFO}" + # If it's not, + else + # we can append these lines to dhcpcd.conf to enable a static IP + echo "interface ${PIHOLE_INTERFACE} + static ip_address=${IPV4_ADDRESS} + static routers=${IPv4gw} + static domain_name_servers=${PIHOLE_DNS_1} ${PIHOLE_DNS_2}" | tee -a /etc/dhcpcd.conf >/dev/null + # Then use the ip command to immediately set the new address + ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}" + # Also give a warning that the user may need to reboot their system + printf " %b Set IP address to %s\\n" "${TICK}" "${IPV4_ADDRESS%/*}" + printf " %b You may need to restart after the install is complete\\n" "${INFO}" + fi +} + +# Check an IP address to see if it is a valid one +valid_ip() { + # Local, named variables + local ip=${1} + local stat=1 + + # Regex matching one IPv4 component, i.e. an integer from 0 to 255. + # See https://tools.ietf.org/html/rfc1340 + local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)"; + # Regex matching an optional port (starting with '#') range of 1-65536 + local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"; + # Build a full IPv4 regex from the above subexpressions + local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$" + + # Evaluate the regex, and return the result + [[ $ip =~ ${regex} ]] + + stat=$? + return "${stat}" +} + +valid_ip6() { + local ip=${1} + local stat=1 + + # Regex matching one IPv6 element, i.e. a hex value from 0000 to FFFF + local ipv6elem="[0-9a-fA-F]{1,4}" + # Regex matching an IPv6 CIDR, i.e. 1 to 128 + local v6cidr="(\\/([1-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])){0,1}" + # Regex matching an optional port (starting with '#') range of 1-65536 + local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"; + # Build a full IPv6 regex from the above subexpressions + local regex="^(((${ipv6elem}))*((:${ipv6elem}))*::((${ipv6elem}))*((:${ipv6elem}))*|((${ipv6elem}))((:${ipv6elem})){7})${v6cidr}${portelem}$" + + # Evaluate the regex, and return the result + [[ ${ip} =~ ${regex} ]] + + stat=$? + return "${stat}" +} + +# A function to choose the upstream DNS provider(s) +setDNS() { + # Local, named variables + local DNSSettingsCorrect + + # In an array, list the available upstream providers + DNSChooseOptions=() + local DNSServerCount=0 + # Save the old Internal Field Separator in a variable, + OIFS=$IFS + # and set the new one to newline + IFS=$'\n' + # Put the DNS Servers into an array + for DNSServer in ${DNS_SERVERS} + do + DNSName="$(cut -d';' -f1 <<< "${DNSServer}")" + DNSChooseOptions[DNSServerCount]="${DNSName}" + (( DNSServerCount=DNSServerCount+1 )) + DNSChooseOptions[DNSServerCount]="" + (( DNSServerCount=DNSServerCount+1 )) + done + DNSChooseOptions[DNSServerCount]="Custom" + (( DNSServerCount=DNSServerCount+1 )) + DNSChooseOptions[DNSServerCount]="" + # Restore the IFS to what it was + IFS=${OIFS} + # In a dialog, show the options + DNSchoices=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" \ + --menu "Select Upstream DNS Provider. To use your own, select Custom." "${r}" "${c}" 7 \ + "${DNSChooseOptions[@]}") + + result=$? + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + # Depending on the user's choice, set the GLOBAL variables to the IP of the respective provider + if [[ "${DNSchoices}" == "Custom" ]] + then + # Loop until we have a valid DNS setting + until [[ "${DNSSettingsCorrect}" = True ]]; do + # Signal value, to be used if the user inputs an invalid IP address + strInvalid="Invalid" + if [[ ! "${PIHOLE_DNS_1}" ]]; then + if [[ ! "${PIHOLE_DNS_2}" ]]; then + # If the first and second upstream servers do not exist, do not prepopulate an IP address + prePopulate="" + else + # Otherwise, prepopulate the dialogue with the appropriate DNS value(s) + prePopulate=", ${PIHOLE_DNS_2}" + fi + elif [[ "${PIHOLE_DNS_1}" ]] && [[ ! "${PIHOLE_DNS_2}" ]]; then + prePopulate="${PIHOLE_DNS_1}" + elif [[ "${PIHOLE_DNS_1}" ]] && [[ "${PIHOLE_DNS_2}" ]]; then + prePopulate="${PIHOLE_DNS_1}, ${PIHOLE_DNS_2}" + fi + + # Prompt the user to enter custom upstream servers + piholeDNS=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" \ + --backtitle "Specify Upstream DNS Provider(s)" \ + --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\ +If you want to specify a port other than 53, separate it with a hash.\ +\\n\\nFor example '8.8.8.8, 8.8.4.4' or '127.0.0.1#5335'"\ + "${r}" "${c}" "${prePopulate}") + + result=$? + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + # Clean user input and replace whitespace with comma. + piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}") + + # Separate the user input into the two DNS values (separated by a comma) + printf -v PIHOLE_DNS_1 "%s" "${piholeDNS%%,*}" + printf -v PIHOLE_DNS_2 "%s" "${piholeDNS##*,}" + + # If the first DNS value is invalid or empty, this if statement will be true and we will set PIHOLE_DNS_1="Invalid" + if ! valid_ip "${PIHOLE_DNS_1}" || [[ ! "${PIHOLE_DNS_1}" ]]; then + PIHOLE_DNS_1=${strInvalid} + fi + # If the second DNS value is invalid or empty, this if statement will be true and we will set PIHOLE_DNS_2="Invalid" + if ! valid_ip "${PIHOLE_DNS_2}" && [[ "${PIHOLE_DNS_2}" ]]; then + PIHOLE_DNS_2=${strInvalid} + fi + # If either of the DNS servers are invalid, + if [[ "${PIHOLE_DNS_1}" == "${strInvalid}" ]] || [[ "${PIHOLE_DNS_2}" == "${strInvalid}" ]]; then + # explain this to the user, + dialog --no-shadow --keep-tite \ + --title "Invalid IP Address(es)" \ + --backtitle "Invalid IP" \ + --msgbox "\\nOne or both of the entered IP addresses were invalid. Please try again.\ +\\n\\nInvalid IPs: ${PIHOLE_DNS_1}, ${PIHOLE_DNS_2}" \ + "${r}" "${c}" + + # set the variables back to nothing, + if [[ "${PIHOLE_DNS_1}" == "${strInvalid}" ]]; then + PIHOLE_DNS_1="" + fi + if [[ "${PIHOLE_DNS_2}" == "${strInvalid}" ]]; then + PIHOLE_DNS_2="" + fi + # and continue the loop. + DNSSettingsCorrect=False + else + dialog --no-shadow --no-collapse --keep-tite \ + --backtitle "Specify Upstream DNS Provider(s)" \ + --title "Upstream DNS Provider(s)" \ + --yesno "Are these settings correct?\\n"$'\t'"DNS Server 1:"$'\t'"${PIHOLE_DNS_1}\\n"$'\t'"DNS Server 2:"$'\t'"${PIHOLE_DNS_2}" \ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_OK}") + DNSSettingsCorrect=True + ;; + "${DIALOG_CANCEL}") + DNSSettingsCorrect=False + ;; + "${DIALOG_ESC}") + printf " %b Escape pressed, exiting installer at DNS Settings%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + fi + done + else + # Save the old Internal Field Separator in a variable, + OIFS=$IFS + # and set the new one to newline + IFS=$'\n' + for DNSServer in ${DNS_SERVERS} + do + DNSName="$(cut -d';' -f1 <<< "${DNSServer}")" + if [[ "${DNSchoices}" == "${DNSName}" ]] + then + PIHOLE_DNS_1="$(cut -d';' -f2 <<< "${DNSServer}")" + PIHOLE_DNS_2="$(cut -d';' -f3 <<< "${DNSServer}")" + break + fi + done + # Restore the IFS to what it was + IFS=${OIFS} + fi + + # Display final selection + local DNSIP=${PIHOLE_DNS_1} + [[ -z ${PIHOLE_DNS_2} ]] || DNSIP+=", ${PIHOLE_DNS_2}" + printf " %b Using upstream DNS: %s (%s)\\n" "${INFO}" "${DNSchoices}" "${DNSIP}" +} + +# Allow the user to enable/disable logging +setLogging() { + # Ask the user if they want to enable logging + dialog --no-shadow --keep-tite \ + --backtitle "Pihole Installation" \ + --title "Enable Logging" \ + --yesno "\\n\\nWould you like to enable query logging?" \ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_OK}") + # If they chose yes, + printf " %b Query Logging on.\\n" "${INFO}" + QUERY_LOGGING=true + ;; + "${DIALOG_CANCEL}") + # If they chose no, + printf " %b Query Logging off.\\n" "${INFO}" + QUERY_LOGGING=false + ;; + "${DIALOG_ESC}") + # User pressed + printf " %b Escape pressed, exiting installer at Query Logging choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac +} + +# Allow the user to set their FTL privacy level +setPrivacyLevel() { + # The default selection is level 0 + PRIVACY_LEVEL=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label "Exit" \ + --ok-label "Continue" \ + --radiolist "Select a privacy mode for FTL. https://docs.pi-hole.net/ftldns/privacylevels/" \ + "${r}" "${c}" 6 \ + "0" "Show everything" on \ + "1" "Hide domains" off \ + "2" "Hide domains and clients" off \ + "3" "Anonymous mode" off) + + result=$? + case ${result} in + "${DIALOG_OK}") + printf " %b Using privacy level: %s\\n" "${INFO}" "${PRIVACY_LEVEL}" + ;; + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancelled privacy level selection.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac +} + +# Function to ask the user if they want to install the dashboard +setAdminFlag() { + # Similar to the logging function, ask what the user wants + dialog --no-shadow --keep-tite \ + --backtitle "Pihole Installation" \ + --title "Admin Web Interface" \ + --yesno "\\n\\nDo you want to install the Admin Web Interface?" \ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_OK}") + # If they chose yes, + printf " %b Installing Admin Web Interface\\n" "${INFO}" + # Set the flag to install the web interface + INSTALL_WEB_INTERFACE=true + ;; + "${DIALOG_CANCEL}") + # If they chose no, + printf " %b Not installing Admin Web Interface\\n" "${INFO}" + # Set the flag to not install the web interface + INSTALL_WEB_INTERFACE=false + INSTALL_WEB_SERVER=false + ;; + "${DIALOG_ESC}") + # User pressed + printf " %b Escape pressed, exiting installer at Admin Web Interface choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + # If the user wants to install the Web admin interface (i.e. it has not been deselected above) and did not deselect the web server via command-line argument + if [[ "${INSTALL_WEB_INTERFACE}" == true && "${INSTALL_WEB_SERVER}" == true ]]; then + # Get list of required PHP modules, excluding base package (common) and handler (cgi) + local i php_modules + for i in "${PIHOLE_WEB_DEPS[@]}"; do [[ $i == 'php'* && $i != *'-common' && $i != *'-cgi' ]] && php_modules+=" ${i#*-}"; done + dialog --no-shadow --keep-tite \ + --backtitle "Pi-hole Installation" \ + --title "Web Server" \ + --yesno "\\n\\nA web server is required for the Admin Web Interface.\ +\\n\\nDo you want to install lighttpd and the required PHP modules?\ +\\n\\nNB: If you disable this, and, do not have an existing web server \ +and required PHP modules (${php_modules# }) installed, the web interface \ +will not function. Additionally the web server user needs to be member of \ +the \"pihole\" group for full functionality." \ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_OK}") + # If they chose yes, + printf " %b Installing lighttpd\\n" "${INFO}" + # Set the flag to install the web server + INSTALL_WEB_SERVER=true + ;; + "${DIALOG_CANCEL}") + # If they chose no, + printf " %b Not installing lighttpd\\n" "${INFO}" + # Set the flag to not install the web server + INSTALL_WEB_SERVER=false + ;; + "${DIALOG_ESC}") + # User pressed + printf " %b Escape pressed, exiting installer at web server choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + fi +} + +# A function to display a list of example blocklists for users to select +chooseBlocklists() { + # Back up any existing adlist file, on the off chance that it exists. Useful in case of a reconfigure. + if [[ -f "${adlistFile}" ]]; then + mv "${adlistFile}" "${adlistFile}.old" + fi + # Let user select (or not) blocklists + dialog --no-shadow --keep-tite \ + --backtitle "Pi-hole Installation" \ + --title "Blocklists" \ + --yesno "\\nPi-hole relies on third party lists in order to block ads.\ +\\n\\nYou can use the suggestion below, and/or add your own after installation.\ +\\n\\nSelect 'Yes' to include:\ +\\n\\nStevenBlack's Unified Hosts List" \ + "${r}" "${c}" && result=0 || result=$? + + case ${result} in + "${DIALOG_OK}") + # If they chose yes, + printf " %b Installing StevenBlack's Unified Hosts List\\n" "${INFO}" + echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}" + ;; + "${DIALOG_CANCEL}") + # If they chose no, + printf " %b Not installing StevenBlack's Unified Hosts List\\n" "${INFO}" + ;; + "${DIALOG_ESC}") + # User pressed + printf " %b Escape pressed, exiting installer at blocklist choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + # Create an empty adList file with appropriate permissions. + if [ ! -f "${adlistFile}" ]; then + install -m 644 /dev/null "${adlistFile}" + else + chmod 644 "${adlistFile}" + fi +} + +# Used only in unattended setup +# If there is already the adListFile, we keep it, else we create it using all default lists +installDefaultBlocklists() { + # In unattended setup, could be useful to use userdefined blocklist. + # If this file exists, we avoid overriding it. + if [[ -f "${adlistFile}" ]]; then + return; + fi + echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}" +} + +# Check if /etc/dnsmasq.conf is from pi-hole. If so replace with an original and install new in .d directory +version_check_dnsmasq() { + # Local, named variables + local dnsmasq_conf="/etc/dnsmasq.conf" + local dnsmasq_conf_orig="/etc/dnsmasq.conf.orig" + local dnsmasq_pihole_id_string="addn-hosts=/etc/pihole/gravity.list" + local dnsmasq_pihole_id_string2="# Dnsmasq config for Pi-hole's FTLDNS" + local dnsmasq_original_config="${PI_HOLE_LOCAL_REPO}/advanced/dnsmasq.conf.original" + local dnsmasq_pihole_01_source="${PI_HOLE_LOCAL_REPO}/advanced/01-pihole.conf" + local dnsmasq_pihole_01_target="/etc/dnsmasq.d/01-pihole.conf" + local dnsmasq_rfc6761_06_source="${PI_HOLE_LOCAL_REPO}/advanced/06-rfc6761.conf" + local dnsmasq_rfc6761_06_target="/etc/dnsmasq.d/06-rfc6761.conf" + + # If the dnsmasq config file exists + if [[ -f "${dnsmasq_conf}" ]]; then + printf " %b Existing dnsmasq.conf found..." "${INFO}" + # If a specific string is found within this file, we presume it's from older versions on Pi-hole, + if grep -q "${dnsmasq_pihole_id_string}" "${dnsmasq_conf}" || + grep -q "${dnsmasq_pihole_id_string2}" "${dnsmasq_conf}"; then + printf " it is from a previous Pi-hole install.\\n" + printf " %b Backing up dnsmasq.conf to dnsmasq.conf.orig..." "${INFO}" + # so backup the original file, + mv -f "${dnsmasq_conf}" "${dnsmasq_conf_orig}" + printf "%b %b Backing up dnsmasq.conf to dnsmasq.conf.orig...\\n" "${OVER}" "${TICK}" + printf " %b Restoring default dnsmasq.conf..." "${INFO}" + # and replace it with the default + install -D -m 644 -T "${dnsmasq_original_config}" "${dnsmasq_conf}" + printf "%b %b Restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}" + else + # Otherwise, don't to anything + printf " it is not a Pi-hole file, leaving alone!\\n" + fi + else + # If a file cannot be found, + printf " %b No dnsmasq.conf found... restoring default dnsmasq.conf..." "${INFO}" + # restore the default one + install -D -m 644 -T "${dnsmasq_original_config}" "${dnsmasq_conf}" + printf "%b %b No dnsmasq.conf found... restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}" + fi + + printf " %b Installing %s..." "${INFO}" "${dnsmasq_pihole_01_target}" + # Check to see if dnsmasq directory exists (it may not due to being a fresh install and dnsmasq no longer being a dependency) + if [[ ! -d "/etc/dnsmasq.d" ]];then + install -d -m 755 "/etc/dnsmasq.d" + fi + # Copy the new Pi-hole DNS config file into the dnsmasq.d directory + install -D -m 644 -T "${dnsmasq_pihole_01_source}" "${dnsmasq_pihole_01_target}" + printf "%b %b Installed %s\n" "${OVER}" "${TICK}" "${dnsmasq_pihole_01_target}" + # Add settings with the GLOBAL DNS variables that we populated earlier + # First, set the interface to listen on + addOrEditKeyValPair "${dnsmasq_pihole_01_target}" "interface" "$PIHOLE_INTERFACE" + if [[ "${PIHOLE_DNS_1}" != "" ]]; then + # then add in the primary DNS server. + addOrEditKeyValPair "${dnsmasq_pihole_01_target}" "server" "$PIHOLE_DNS_1" + fi + # Ditto if DNS2 is not empty + if [[ "${PIHOLE_DNS_2}" != "" ]]; then + addKey "${dnsmasq_pihole_01_target}" "server=$PIHOLE_DNS_2" + fi + + # Set the cache size + addOrEditKeyValPair "${dnsmasq_pihole_01_target}" "cache-size" "$CACHE_SIZE" + + sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' "${dnsmasq_conf}" + + # If the user does not want to enable logging, + if [[ "${QUERY_LOGGING}" == false ]] ; then + # remove itfrom the DNS config file + removeKey "${dnsmasq_pihole_01_target}" "log-queries" + else + # Otherwise, enable it by adding the directive to the DNS config file + addKey "${dnsmasq_pihole_01_target}" "log-queries" + fi + + printf " %b Installing %s..." "${INFO}" "${dnsmasq_rfc6761_06_source}" + install -D -m 644 -T "${dnsmasq_rfc6761_06_source}" "${dnsmasq_rfc6761_06_target}" + printf "%b %b Installed %s\n" "${OVER}" "${TICK}" "${dnsmasq_rfc6761_06_target}" +} + +# Clean an existing installation to prepare for upgrade/reinstall +clean_existing() { + # Local, named variables + # ${1} Directory to clean + local clean_directory="${1}" + # Pop the first argument, and shift all addresses down by one (i.e. ${2} becomes ${1}) + shift + # Then, we can access all arguments ($@) without including the directory to clean + local old_files=( "$@" ) + + # Remove each script in the old_files array + for script in "${old_files[@]}"; do + rm -f "${clean_directory}/${script}.sh" + done +} + +# Install the scripts from repository to their various locations +installScripts() { + # Local, named variables + local str="Installing scripts from ${PI_HOLE_LOCAL_REPO}" + printf " %b %s..." "${INFO}" "${str}" + + # Clear out script files from Pi-hole scripts directory. + clean_existing "${PI_HOLE_INSTALL_DIR}" "${PI_HOLE_FILES[@]}" + + # Install files from local core repository + if is_repo "${PI_HOLE_LOCAL_REPO}"; then + # move into the directory + cd "${PI_HOLE_LOCAL_REPO}" + # Install the scripts by: + # -o setting the owner to the user + # -Dm755 create all leading components of destination except the last, then copy the source to the destination and setting the permissions to 755 + # + # This first one is the directory + install -o "${USER}" -Dm755 -d "${PI_HOLE_INSTALL_DIR}" + # The rest are the scripts Pi-hole needs + install -o "${USER}" -Dm755 -t "${PI_HOLE_INSTALL_DIR}" gravity.sh + install -o "${USER}" -Dm755 -t "${PI_HOLE_INSTALL_DIR}" ./advanced/Scripts/*.sh + install -o "${USER}" -Dm755 -t "${PI_HOLE_INSTALL_DIR}" ./automated\ install/uninstall.sh + install -o "${USER}" -Dm755 -t "${PI_HOLE_INSTALL_DIR}" ./advanced/Scripts/COL_TABLE + install -o "${USER}" -Dm755 -t "${PI_HOLE_BIN_DIR}" pihole + install -Dm644 ./advanced/bash-completion/pihole /etc/bash_completion.d/pihole + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + + else + # Otherwise, show an error and exit + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + printf "\\t\\t%bError: Local repo %s not found, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}" + return 1 + fi +} + +# Install the configs from PI_HOLE_LOCAL_REPO to their various locations +installConfigs() { + printf "\\n %b Installing configs from %s...\\n" "${INFO}" "${PI_HOLE_LOCAL_REPO}" + # Make sure Pi-hole's config files are in place + version_check_dnsmasq + + # Install list of DNS servers + # Format: Name;Primary IPv4;Secondary IPv4;Primary IPv6;Secondary IPv6 + # Some values may be empty (for example: DNS servers without IPv6 support) + echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf" + chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf" + + # Install template file if it does not exist + if [[ ! -r "${FTL_CONFIG_FILE}" ]]; then + install -d -m 0755 ${PI_HOLE_CONFIG_DIR} + if ! install -T -o pihole -m 664 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.conf" "${FTL_CONFIG_FILE}" &>/dev/null; then + printf " %b Error: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}" + return 1 + fi + fi + + # Install empty custom.list file if it does not exist + if [[ ! -r "${PI_HOLE_CONFIG_DIR}/custom.list" ]]; then + if ! install -o root -m 644 /dev/null "${PI_HOLE_CONFIG_DIR}/custom.list" &>/dev/null; then + printf " %b Error: Unable to initialize configuration file %s/custom.list\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}" + return 1 + fi + fi + + # Install pihole-FTL systemd or init.d service, based on whether systemd is the init system or not + # Follow debhelper logic, which checks for /run/systemd/system to derive whether systemd is the init system + if [[ -d '/run/systemd/system' ]]; then + install -T -m 0644 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.systemd" '/etc/systemd/system/pihole-FTL.service' + + # Remove init.d service if present + if [[ -e '/etc/init.d/pihole-FTL' ]]; then + rm '/etc/init.d/pihole-FTL' + update-rc.d pihole-FTL remove + fi + + # Load final service + systemctl daemon-reload + else + install -T -m 0755 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.service" '/etc/init.d/pihole-FTL' + fi + install -T -m 0755 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL-prestart.sh" "${PI_HOLE_INSTALL_DIR}/pihole-FTL-prestart.sh" + install -T -m 0755 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL-poststop.sh" "${PI_HOLE_INSTALL_DIR}/pihole-FTL-poststop.sh" + + # If the user chose to install the dashboard, + if [[ "${INSTALL_WEB_SERVER}" == true ]]; then + # set permissions on /etc/lighttpd/lighttpd.conf so pihole user (other) can read the file + chmod o+x /etc/lighttpd + chmod o+r "${lighttpdConfig}" + + # Ensure /run/lighttpd exists and is owned by lighttpd user + # Needed for the php socket + mkdir -p /run/lighttpd + chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} /run/lighttpd + + if grep -q -F "OVERWRITTEN BY PI-HOLE" "${lighttpdConfig}"; then + # Attempt to preserve backwards compatibility with older versions + install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} "${lighttpdConfig}" + # Make the directories if they do not exist and set the owners + mkdir -p /var/cache/lighttpd/compress + chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} /var/cache/lighttpd/compress + mkdir -p /var/cache/lighttpd/uploads + chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} /var/cache/lighttpd/uploads + fi + # Copy the config file to include for pihole admin interface + if [[ -d "/etc/lighttpd/conf.d" ]]; then + install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/pihole-admin.conf /etc/lighttpd/conf.d/pihole-admin.conf + if grep -q -F 'include "/etc/lighttpd/conf.d/pihole-admin.conf"' "${lighttpdConfig}"; then + : + else + echo 'include "/etc/lighttpd/conf.d/pihole-admin.conf"' >> "${lighttpdConfig}" + fi + # Avoid some warnings trace from lighttpd, which might break tests + conf=/etc/lighttpd/conf.d/pihole-admin.conf + if lighttpd -f "${lighttpdConfig}" -tt 2>&1 | grep -q -F "WARNING: unknown config-key: dir-listing\."; then + echo '# Avoid some warnings trace from lighttpd, which might break tests' >> $conf + echo 'server.modules += ( "mod_dirlisting" )' >> $conf + fi + if lighttpd -f "${lighttpdConfig}" -tt 2>&1 | grep -q -F "warning: please use server.use-ipv6"; then + echo '# Avoid some warnings trace from lighttpd, which might break tests' >> $conf + echo 'server.use-ipv6 := "disable"' >> $conf + fi + elif [[ -d "/etc/lighttpd/conf-available" ]]; then + conf=/etc/lighttpd/conf-available/15-pihole-admin.conf + install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/pihole-admin.conf $conf + + # Get the version number of lighttpd + version=$(dpkg-query -f='${Version}\n' --show lighttpd) + # Test if that version is greater than or euqal to 1.4.56 + if dpkg --compare-versions "$version" "ge" "1.4.56"; then + # If it is, then we don't need to disable the modules + # (server.modules duplication is ignored in lighttpd 1.4.56+) + : + else + # disable server.modules += ( ... ) in $conf to avoid module dups + if awk '!/^server\.modules/{print}' $conf > $conf.$$ && mv $conf.$$ $conf; then + : + else + rm $conf.$$ + fi + fi + + chmod 644 $conf + if is_command lighty-enable-mod ; then + lighty-enable-mod pihole-admin access accesslog redirect fastcgi setenv > /dev/null || true + else + # Otherwise, show info about installing them + printf " %b Warning: 'lighty-enable-mod' utility not found\\n" "${INFO}" + printf " Please ensure fastcgi is enabled if you experience issues\\n" + fi + else + # lighttpd config include dir not found + printf " %b Warning: lighttpd config include dir not found\\n" "${INFO}" + printf " Please manually install pihole-admin.conf\\n" + fi + fi +} + +install_manpage() { + # Copy Pi-hole man pages and call mandb to update man page database + # Default location for man files for /usr/local/bin is /usr/local/share/man + # on lightweight systems may not be present, so check before copying. + printf " %b Testing man page installation" "${INFO}" + if ! is_command mandb ; then + # if mandb is not present, no manpage support + printf "%b %b man not installed\\n" "${OVER}" "${INFO}" + return + elif [[ ! -d "/usr/local/share/man" ]]; then + # appropriate directory for Pi-hole's man page is not present + printf "%b %b man pages not installed\\n" "${OVER}" "${INFO}" + return + fi + if [[ ! -d "/usr/local/share/man/man8" ]]; then + # if not present, create man8 directory + install -d -m 755 /usr/local/share/man/man8 + fi + if [[ ! -d "/usr/local/share/man/man5" ]]; then + # if not present, create man5 directory + install -d -m 755 /usr/local/share/man/man5 + fi + # Testing complete, copy the files & update the man db + install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8 + install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8 + + # remove previously installed "pihole-FTL.conf.5" man page + if [[ -f "/usr/local/share/man/man5/pihole-FTL.conf.5" ]]; then + rm /usr/local/share/man/man5/pihole-FTL.conf.5 + fi + + if mandb -q &>/dev/null; then + # Updated successfully + printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}" + return + else + # Something is wrong with the system's man installation, clean up + # our files, (leave everything how we found it). + rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 + printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}" + fi +} + +stop_service() { + # Stop service passed in as argument. + # Can softfail, as process may not be installed when this is called + local str="Stopping ${1} service" + printf " %b %s..." "${INFO}" "${str}" + if is_command systemctl ; then + systemctl stop "${1}" &> /dev/null || true + else + service "${1}" stop &> /dev/null || true + fi + printf "%b %b %s...\\n" "${OVER}" "${TICK}" "${str}" +} + +# Start/Restart service passed in as argument +restart_service() { + # Local, named variables + local str="Restarting ${1} service" + printf " %b %s..." "${INFO}" "${str}" + # If systemctl exists, + if is_command systemctl ; then + # use that to restart the service + systemctl restart "${1}" &> /dev/null + else + # Otherwise, fall back to the service command + service "${1}" restart &> /dev/null + fi + printf "%b %b %s...\\n" "${OVER}" "${TICK}" "${str}" +} + +# Enable service so that it will start with next reboot +enable_service() { + # Local, named variables + local str="Enabling ${1} service to start on reboot" + printf " %b %s..." "${INFO}" "${str}" + # If systemctl exists, + if is_command systemctl ; then + # use that to enable the service + systemctl enable "${1}" &> /dev/null + else + # Otherwise, use update-rc.d to accomplish this + update-rc.d "${1}" defaults &> /dev/null + fi + printf "%b %b %s...\\n" "${OVER}" "${TICK}" "${str}" +} + +# Disable service so that it will not with next reboot +disable_service() { + # Local, named variables + local str="Disabling ${1} service" + printf " %b %s..." "${INFO}" "${str}" + # If systemctl exists, + if is_command systemctl ; then + # use that to disable the service + systemctl disable "${1}" &> /dev/null + else + # Otherwise, use update-rc.d to accomplish this + update-rc.d "${1}" disable &> /dev/null + fi + printf "%b %b %s...\\n" "${OVER}" "${TICK}" "${str}" +} + +check_service_active() { + # If systemctl exists, + if is_command systemctl ; then + # use that to check the status of the service + systemctl is-enabled "${1}" &> /dev/null + else + # Otherwise, fall back to service command + service "${1}" status &> /dev/null + fi +} + +# Systemd-resolved's DNSStubListener and dnsmasq can't share port 53. +disable_resolved_stublistener() { + printf " %b Testing if systemd-resolved is enabled\\n" "${INFO}" + # Check if Systemd-resolved's DNSStubListener is enabled and active on port 53 + if check_service_active "systemd-resolved"; then + # Check if DNSStubListener is enabled + printf " %b %b Testing if systemd-resolved DNSStub-Listener is active" "${OVER}" "${INFO}" + if ( grep -E '#?DNSStubListener=yes' /etc/systemd/resolved.conf &> /dev/null ); then + # Disable the DNSStubListener to unbind it from port 53 + # Note that this breaks dns functionality on host until dnsmasq/ftl are up and running + printf "%b %b Disabling systemd-resolved DNSStubListener" "${OVER}" "${TICK}" + # Make a backup of the original /etc/systemd/resolved.conf + # (This will need to be restored on uninstallation) + sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf + printf " and restarting systemd-resolved\\n" + systemctl reload-or-restart systemd-resolved + else + printf "%b %b Systemd-resolved does not need to be restarted\\n" "${OVER}" "${INFO}" + fi + else + printf "%b %b Systemd-resolved is not enabled\\n" "${OVER}" "${INFO}" + fi +} + +update_package_cache() { + # Update package cache on apt based OSes. Do this every time since + # it's quick and packages can be updated at any time. + + # Local, named variables + local str="Update local cache of available packages" + printf " %b %s..." "${INFO}" "${str}" + # Create a command from the package cache variable + if eval "${UPDATE_PKG_CACHE}" &> /dev/null; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + else + # Otherwise, show an error and exit + + # In case we used apt-get and apt is also available, we use this as recommendation as we have seen it + # gives more user-friendly (interactive) advice + if [[ ${PKG_MANAGER} == "apt-get" ]] && is_command apt ; then + UPDATE_PKG_CACHE="apt update" + fi + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + printf " %b Error: Unable to update package cache. Please try \"%s\"%b\\n" "${COL_LIGHT_RED}" "sudo ${UPDATE_PKG_CACHE}" "${COL_NC}" + return 1 + fi +} + +# Let user know if they have outdated packages on their system and +# advise them to run a package update at soonest possible. +notify_package_updates_available() { + # Local, named variables + local str="Checking ${PKG_MANAGER} for upgraded packages" + printf "\\n %b %s..." "${INFO}" "${str}" + # Store the list of packages in a variable + updatesToInstall=$(eval "${PKG_COUNT}") + + if [[ -d "/lib/modules/$(uname -r)" ]]; then + if [[ "${updatesToInstall}" -eq 0 ]]; then + printf "%b %b %s... up to date!\\n\\n" "${OVER}" "${TICK}" "${str}" + else + printf "%b %b %s... %s updates available\\n" "${OVER}" "${TICK}" "${str}" "${updatesToInstall}" + printf " %b %bIt is recommended to update your OS after installing the Pi-hole!%b\\n\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${COL_NC}" + fi + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + printf " Kernel update detected. If the install fails, please reboot and try again\\n" + fi +} + +install_dependent_packages() { + + # Install packages passed in via argument array + # No spinner - conflicts with set -e + declare -a installArray + + # Debian based package install - debconf will download the entire package list + # so we just create an array of packages not currently installed to cut down on the + # amount of download traffic. + # NOTE: We may be able to use this installArray in the future to create a list of package that were + # installed by us, and remove only the installed packages, and not the entire list. + if is_command apt-get ; then + # For each package, check if it's already installed (and if so, don't add it to the installArray) + for i in "$@"; do + printf " %b Checking for %s..." "${INFO}" "${i}" + if dpkg-query -W -f='${Status}' "${i}" 2>/dev/null | grep "ok installed" &> /dev/null; then + printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}" + else + printf "%b %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}" + installArray+=("${i}") + fi + done + # If there's anything to install, install everything in the list. + if [[ "${#installArray[@]}" -gt 0 ]]; then + test_dpkg_lock + # Running apt-get install with minimal output can cause some issues with + # requiring user input (e.g password for phpmyadmin see #218) + printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}" + printf '%*s\n' "${c}" '' | tr " " -; + "${PKG_INSTALL[@]}" "${installArray[@]}" + printf '%*s\n' "${c}" '' | tr " " -; + return + fi + printf "\\n" + return 0 + fi + + # Install Fedora/CentOS packages + for i in "$@"; do + # For each package, check if it's already installed (and if so, don't add it to the installArray) + printf " %b Checking for %s..." "${INFO}" "${i}" + if "${PKG_MANAGER}" -q list installed "${i}" &> /dev/null; then + printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}" + else + printf "%b %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}" + installArray+=("${i}") + fi + done + # If there's anything to install, install everything in the list. + if [[ "${#installArray[@]}" -gt 0 ]]; then + printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}" + printf '%*s\n' "${c}" '' | tr " " -; + "${PKG_INSTALL[@]}" "${installArray[@]}" + printf '%*s\n' "${c}" '' | tr " " -; + return + fi + printf "\\n" + return 0 +} + +# Install the Web interface dashboard +installPiholeWeb() { + # Install Sudoers file + local str="Installing sudoer file" + printf "\\n %b %s..." "${INFO}" "${str}" + # Make the .d directory if it doesn't exist, + install -d -m 755 /etc/sudoers.d/ + # and copy in the pihole sudoers file + install -m 0640 ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.sudo /etc/sudoers.d/pihole + # Add lighttpd user (OS dependent) to sudoers file + echo "${LIGHTTPD_USER} ALL=NOPASSWD: ${PI_HOLE_BIN_DIR}/pihole" >> /etc/sudoers.d/pihole + + # If the Web server user is lighttpd, + if [[ "$LIGHTTPD_USER" == "lighttpd" ]]; then + # Allow executing pihole via sudo with Fedora + # Usually /usr/local/bin ${PI_HOLE_BIN_DIR} is not permitted as directory for sudoable programs + echo "Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:${PI_HOLE_BIN_DIR}" >> /etc/sudoers.d/pihole + fi + # Set the strict permissions on the file + chmod 0440 /etc/sudoers.d/pihole + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" +} + +# Installs a cron file +installCron() { + # Install the cron job + local str="Installing latest Cron script" + printf "\\n %b %s..." "${INFO}" "${str}" + # Copy the cron file over from the local repo + # File must not be world or group writeable and must be owned by root + install -D -m 644 -T -o root -g root ${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole.cron /etc/cron.d/pihole + # Randomize gravity update time + sed -i "s/59 1 /$((1 + RANDOM % 58)) $((3 + RANDOM % 2))/" /etc/cron.d/pihole + # Randomize update checker time + sed -i "s/59 17/$((1 + RANDOM % 58)) $((12 + RANDOM % 8))/" /etc/cron.d/pihole + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" +} + +# Gravity is a very important script as it aggregates all of the domains into a single HOSTS formatted list, +# which is what Pi-hole needs to begin blocking ads +runGravity() { + # Run gravity in the current shell + { /opt/pihole/gravity.sh --force; } +} + +# Check if the pihole user exists and create if it does not +create_pihole_user() { + local str="Checking for user 'pihole'" + printf " %b %s..." "${INFO}" "${str}" + # If the pihole user exists, + if id -u pihole &> /dev/null; then + # and if the pihole group exists, + if getent group pihole > /dev/null 2>&1; then + # succeed + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + else + local str="Checking for group 'pihole'" + printf " %b %s..." "${INFO}" "${str}" + local str="Creating group 'pihole'" + # if group can be created + if groupadd pihole; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + local str="Adding user 'pihole' to group 'pihole'" + printf " %b %s..." "${INFO}" "${str}" + # if pihole user can be added to group pihole + if usermod -g pihole pihole; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + fi + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + fi + fi + else + # If the pihole user doesn't exist, + printf "%b %b %s" "${OVER}" "${CROSS}" "${str}" + local str="Checking for group 'pihole'" + printf " %b %s..." "${INFO}" "${str}" + if getent group pihole > /dev/null 2>&1; then + # group pihole exists + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # then create and add her to the pihole group + local str="Creating user 'pihole'" + printf "%b %b %s..." "${OVER}" "${INFO}" "${str}" + if useradd -r --no-user-group -g pihole -s /usr/sbin/nologin pihole; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + fi + else + # group pihole does not exist + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + local str="Creating group 'pihole'" + # if group can be created + if groupadd pihole; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + # create and add pihole user to the pihole group + local str="Creating user 'pihole'" + printf "%b %b %s..." "${OVER}" "${INFO}" "${str}" + if useradd -r --no-user-group -g pihole -s /usr/sbin/nologin pihole; then + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + fi + + else + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + fi + fi + fi +} + +# This function saves any changes to the setup variables into the setupvars.conf file for future runs +finalExports() { + # set or update the variables in the file + + addOrEditKeyValPair "${setupVars}" "PIHOLE_INTERFACE" "${PIHOLE_INTERFACE}" + addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_1" "${PIHOLE_DNS_1}" + addOrEditKeyValPair "${setupVars}" "PIHOLE_DNS_2" "${PIHOLE_DNS_2}" + addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "${QUERY_LOGGING}" + addOrEditKeyValPair "${setupVars}" "INSTALL_WEB_SERVER" "${INSTALL_WEB_SERVER}" + addOrEditKeyValPair "${setupVars}" "INSTALL_WEB_INTERFACE" "${INSTALL_WEB_INTERFACE}" + addOrEditKeyValPair "${setupVars}" "LIGHTTPD_ENABLED" "${LIGHTTPD_ENABLED}" + addOrEditKeyValPair "${setupVars}" "CACHE_SIZE" "${CACHE_SIZE}" + addOrEditKeyValPair "${setupVars}" "DNS_FQDN_REQUIRED" "${DNS_FQDN_REQUIRED:-true}" + addOrEditKeyValPair "${setupVars}" "DNS_BOGUS_PRIV" "${DNS_BOGUS_PRIV:-true}" + addOrEditKeyValPair "${setupVars}" "DNSMASQ_LISTENING" "${DNSMASQ_LISTENING:-local}" + + chmod 644 "${setupVars}" + + # Set the privacy level + addOrEditKeyValPair "${FTL_CONFIG_FILE}" "PRIVACYLEVEL" "${PRIVACY_LEVEL}" + + # Bring in the current settings and the functions to manipulate them + source "${setupVars}" + # shellcheck source=advanced/Scripts/webpage.sh + source "${PI_HOLE_LOCAL_REPO}/advanced/Scripts/webpage.sh" + + # Look for DNS server settings which would have to be reapplied + ProcessDNSSettings + + # Look for DHCP server settings which would have to be reapplied + ProcessDHCPSettings +} + +# Install the logrotate script +installLogrotate() { + local str="Installing latest logrotate script" + local target=/etc/pihole/logrotate + + printf "\\n %b %s..." "${INFO}" "${str}" + if [[ -f ${target} ]]; then + + # Account for changed logfile paths from /var/log -> /var/log/pihole/ made in core v5.11. + if grep -q "/var/log/pihole.log" ${target} || grep -q "/var/log/pihole-FTL.log" ${target}; then + sed -i 's/\/var\/log\/pihole.log/\/var\/log\/pihole\/pihole.log/g' ${target} + sed -i 's/\/var\/log\/pihole-FTL.log/\/var\/log\/pihole\/FTL.log/g' ${target} + + printf "\\n\\t%b Old log file paths updated in existing logrotate file. \\n" "${INFO}" + return 3 + fi + + printf "\\n\\t%b Existing logrotate file found. No changes made.\\n" "${INFO}" + # Return value isn't that important, using 2 to indicate that it's not a fatal error but + # the function did not complete. + return 2 + fi + # Copy the file over from the local repo + install -D -m 644 -T "${PI_HOLE_LOCAL_REPO}"/advanced/Templates/logrotate ${target} + # Different operating systems have different user / group + # settings for logrotate that makes it impossible to create + # a static logrotate file that will work with e.g. + # Rasbian and Ubuntu at the same time. Hence, we have to + # customize the logrotate script here in order to reflect + # the local properties of the /var/log directory + logusergroup="$(stat -c '%U %G' /var/log)" + # If there is a usergroup for log rotation, + if [[ -n "${logusergroup}" ]]; then + # replace the line in the logrotate script with that usergroup. + sed -i "s/# su #/su ${logusergroup}/g;" ${target} + fi + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" +} + +# Install base files and web interface +installPihole() { + # If the user wants to install the Web interface, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + if [[ ! -d "${webroot}" ]]; then + # make the Web directory if necessary + install -d -m 0755 ${webroot} + fi + + if [[ "${INSTALL_WEB_SERVER}" == true ]]; then + # Set the owner and permissions + chown ${LIGHTTPD_USER}:${LIGHTTPD_GROUP} ${webroot} + chmod 0775 ${webroot} + # Repair permissions if webroot is not world readable + chmod a+rx /var/www + chmod a+rx ${webroot} + # Give lighttpd access to the pihole group so the web interface can + # manage the gravity.db database + usermod -a -G pihole ${LIGHTTPD_USER} + fi + fi + # Install base files and web interface + if ! installScripts; then + printf " %b Failure in dependent script copy function.\\n" "${CROSS}" + exit 1 + fi + + # /opt/pihole/utils.sh should be installed by installScripts now, so we can use it + if [ -f "${PI_HOLE_INSTALL_DIR}/utils.sh" ]; then + # shellcheck disable=SC1091 + source "${PI_HOLE_INSTALL_DIR}/utils.sh" + else + printf " %b Failure: /opt/pihole/utils.sh does not exist .\\n" "${CROSS}" + exit 1 + fi + + # Install config files + if ! installConfigs; then + printf " %b Failure in dependent config copy function.\\n" "${CROSS}" + exit 1 + fi + # If the user wants to install the dashboard, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # do so + installPiholeWeb + fi + # Install the cron file + installCron + + # Install the logrotate file + installLogrotate || true + + # Check if dnsmasq is present. If so, disable it and back up any possible + # config file + disable_dnsmasq + + # install a man page entry for pihole + install_manpage + + # Update setupvars.conf with any variables that may or may not have been changed during the install + finalExports +} + +# SELinux +checkSelinux() { + local DEFAULT_SELINUX + local CURRENT_SELINUX + local SELINUX_ENFORCING=0 + # Check for SELinux configuration file and getenforce command + if [[ -f /etc/selinux/config ]] && is_command getenforce; then + # Check the default SELinux mode + DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config) + case "${DEFAULT_SELINUX,,}" in + enforcing) + printf " %b %bDefault SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${DEFAULT_SELINUX,,}" "${COL_NC}" + SELINUX_ENFORCING=1 + ;; + *) # 'permissive' and 'disabled' + printf " %b %bDefault SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${DEFAULT_SELINUX,,}" "${COL_NC}" + ;; + esac + # Check the current state of SELinux + CURRENT_SELINUX=$(getenforce) + case "${CURRENT_SELINUX,,}" in + enforcing) + printf " %b %bCurrent SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${CURRENT_SELINUX,,}" "${COL_NC}" + SELINUX_ENFORCING=1 + ;; + *) # 'permissive' and 'disabled' + printf " %b %bCurrent SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${CURRENT_SELINUX,,}" "${COL_NC}" + ;; + esac + else + echo -e " ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}"; + fi + # Exit the installer if any SELinux checks toggled the flag + if [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -z "${PIHOLE_SELINUX}" ]]; then + printf " Pi-hole does not provide an SELinux policy as the required changes modify the security of your system.\\n" + printf " Please refer to https://wiki.centos.org/HowTos/SELinux if SELinux is required for your deployment.\\n" + printf " This check can be skipped by setting the environment variable %bPIHOLE_SELINUX%b to %btrue%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" "${COL_LIGHT_RED}" "${COL_NC}" + printf " e.g: export PIHOLE_SELINUX=true\\n" + printf " By setting this variable to true you acknowledge there may be issues with Pi-hole during or after the install\\n" + printf "\\n %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; + exit 1; + elif [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -n "${PIHOLE_SELINUX}" ]]; then + printf " %b %bSELinux Enforcing detected%b. PIHOLE_SELINUX env variable set - installer will continue\\n" "${INFO}" "${COL_LIGHT_RED}" "${COL_NC}" + fi +} + +# Installation complete message with instructions for the user +displayFinalMessage() { + # If the number of arguments is > 0, + if [[ "${#1}" -gt 0 ]] ; then + # set the password to the first argument. + pwstring="$1" + elif [[ $(grep 'WEBPASSWORD' -c "${setupVars}") -gt 0 ]]; then + # Else if the password exists from previous setup, we'll load it later + pwstring="unchanged" + else + # Else, inform the user that there is no set password. + pwstring="NOT SET" + fi + # If the user wants to install the dashboard, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # Store a message in a variable and display it + additional="View the web interface at http://pi.hole/admin or http://${IPV4_ADDRESS%/*}/admin\\n\\nYour Admin Webpage login password is ${pwstring}" + fi + + # Final completion message to user + dialog --no-shadow --keep-tite \ + --title "Installation Complete!" \ + --msgbox "Configure your devices to use the Pi-hole as their DNS server using:\ +\\n\\nIPv4: ${IPV4_ADDRESS%/*}\ +\\nIPv6: ${IPV6_ADDRESS:-"Not Configured"}\ +\\nIf you have not done so already, the above IP should be set to static.\ +\\n${additional}" "${r}" "${c}" +} + +update_dialogs() { + # If pihole -r "reconfigure" option was selected, + if [[ "${reconfigure}" = true ]]; then + # set some variables that will be used + opt1a="Repair" + opt1b="This will retain existing settings" + strAdd="You will remain on the same version" + else + # Otherwise, set some variables with different values + opt1a="Update" + opt1b="This will retain existing settings." + strAdd="You will be updated to the latest version." + fi + opt2a="Reconfigure" + opt2b="Resets Pi-hole and allows re-selecting settings." + + # Display the information to the user + UpdateCmd=$(dialog --no-shadow --keep-tite --output-fd 1 \ + --cancel-label Exit \ + --title "Existing Install Detected!" \ + --menu "\\n\\nWe have detected an existing install.\ +\\n\\nPlease choose from the following options:\ +\\n($strAdd)"\ + "${r}" "${c}" 2 \ + "${opt1a}" "${opt1b}" \ + "${opt2a}" "${opt2b}") || result=$? + + case ${result} in + "${DIALOG_CANCEL}" | "${DIALOG_ESC}") + printf " %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" + exit 1 + ;; + esac + + # Set the variable based on if the user chooses + case ${UpdateCmd} in + # repair, or + "${opt1a}") + printf " %b %s option selected\\n" "${INFO}" "${opt1a}" + useUpdateVars=true + ;; + # reconfigure, + "${opt2a}") + printf " %b %s option selected\\n" "${INFO}" "${opt2a}" + useUpdateVars=false + ;; + esac +} + +check_download_exists() { + status=$(curl --head --silent "https://ftl.pi-hole.net/${1}" | head -n 1) + if grep -q "404" <<< "$status"; then + return 1 + else + return 0 + fi +} + +fully_fetch_repo() { + # Add upstream branches to shallow clone + local directory="${1}" + + cd "${directory}" || return 1 + if is_repo "${directory}"; then + git remote set-branches origin '*' || return 1 + git fetch --quiet || return 1 + else + return 1 + fi + return 0 +} + +get_available_branches() { + # Return available branches + local directory + directory="${1}" + local output + + cd "${directory}" || return 1 + # Get reachable remote branches, but store STDERR as STDOUT variable + output=$( { git ls-remote --heads --quiet | cut -d'/' -f3- -; } 2>&1 ) + # echo status for calling function to capture + echo "$output" + return +} + +fetch_checkout_pull_branch() { + # Check out specified branch + local directory + directory="${1}" + local branch + branch="${2}" + + # Set the reference for the requested branch, fetch, check it put and pull it + cd "${directory}" || return 1 + git remote set-branches origin "${branch}" || return 1 + git stash --all --quiet &> /dev/null || true + git clean --quiet --force -d || true + git fetch --quiet || return 1 + checkout_pull_branch "${directory}" "${branch}" || return 1 +} + +checkout_pull_branch() { + # Check out specified branch + local directory + directory="${1}" + local branch + branch="${2}" + local oldbranch + + cd "${directory}" || return 1 + + oldbranch="$(git symbolic-ref HEAD)" + + str="Switching to branch: '${branch}' from '${oldbranch}'" + printf " %b %s" "${INFO}" "$str" + git checkout "${branch}" --quiet || return 1 + printf "%b %b %s\\n" "${OVER}" "${TICK}" "$str" + # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) + chmod -R a+rX "${directory}" + + git_pull=$(git pull --no-rebase || return 1) + + printf " %b %s\\n" "${INFO}" "${git_pull}" + + return 0 +} + +clone_or_update_repos() { + # If the user wants to reconfigure, + if [[ "${reconfigure}" == true ]]; then + printf " %b Performing reconfiguration, skipping download of local repos\\n" "${INFO}" + # Reset the Core repo + resetRepo ${PI_HOLE_LOCAL_REPO} || \ + { printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \ + exit 1; \ + } + # If the Web interface was installed, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # reset it's repo + resetRepo ${webInterfaceDir} || \ + { printf " %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceDir}" "${COL_NC}"; \ + exit 1; \ + } + fi + # Otherwise, a repair is happening + else + # so get git files for Core + getGitFiles ${PI_HOLE_LOCAL_REPO} ${piholeGitUrl} || \ + { printf " %b Unable to clone %s into %s, unable to continue%b\\n" "${COL_LIGHT_RED}" "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \ + exit 1; \ + } + # If the Web interface was installed, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # get the Web git files + getGitFiles ${webInterfaceDir} ${webInterfaceGitUrl} || \ + { printf " %b Unable to clone %s into ${webInterfaceDir}, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceGitUrl}" "${COL_NC}"; \ + exit 1; \ + } + fi + fi +} + +# Download FTL binary to random temp directory and install FTL binary +# Disable directive for SC2120 a value _can_ be passed to this function, but it is passed from an external script that sources this one +# shellcheck disable=SC2120 +FTLinstall() { + # Local, named variables + local str="Downloading and Installing FTL" + printf " %b %s..." "${INFO}" "${str}" + + # Move into the temp ftl directory + pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; } + + local ftlBranch + local url + + if [[ -f "/etc/pihole/ftlbranch" ]];then + ftlBranch=$( /dev/null + + # Install the new version with the correct permissions + install -T -m 0755 "${binary}" /usr/bin/pihole-FTL + + # Move back into the original directory the user was in + popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; } + + # Installed the FTL service + printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" + return 0 + else + # Otherwise, the hash download failed, so print and exit. + popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; } + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + printf " %b Error: Download of %s/%s failed (checksum error)%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}" + return 1 + fi + else + # Otherwise, the download failed, so print and exit. + popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; } + printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" + # The URL could not be found + printf " %b Error: URL %s/%s not found%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}" + return 1 + fi +} + +disable_dnsmasq() { + # dnsmasq can now be stopped and disabled if it exists + if is_command dnsmasq; then + if check_service_active "dnsmasq";then + printf " %b FTL can now resolve DNS Queries without dnsmasq running separately\\n" "${INFO}" + stop_service dnsmasq + disable_service dnsmasq + fi + fi + + # Backup existing /etc/dnsmasq.conf if present and ensure that + # /etc/dnsmasq.conf contains only "conf-dir=/etc/dnsmasq.d" + local conffile="/etc/dnsmasq.conf" + if [[ -f "${conffile}" ]]; then + printf " %b Backing up %s to %s.old\\n" "${INFO}" "${conffile}" "${conffile}" + mv "${conffile}" "${conffile}.old" + fi + # Create /etc/dnsmasq.conf + echo "conf-dir=/etc/dnsmasq.d" > "${conffile}" + chmod 644 "${conffile}" +} + +get_binary_name() { + # This gives the machine architecture which may be different from the OS architecture... + local machine + machine=$(uname -m) + + local l_binary + + local str="Detecting processor" + printf " %b %s..." "${INFO}" "${str}" + # If the machine is arm or aarch + if [[ "${machine}" == "arm"* || "${machine}" == *"aarch"* ]]; then + # ARM + local rev + rev=$(uname -m | sed "s/[^0-9]//g;") + local lib + lib=$(ldd "$(command -v sh)" | grep -E '^\s*/lib' | awk '{ print $1 }') + if [[ "${lib}" == "/lib/ld-linux-aarch64.so.1" ]]; then + printf "%b %b Detected AArch64 (64 Bit ARM) processor\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-aarch64-linux-gnu" + elif [[ "${lib}" == "/lib/ld-linux-armhf.so.3" ]]; then + # Hard-float available: Use gnueabihf binaries + # If ARMv8 or higher is found (e.g., BCM2837 as found in Raspberry Pi Model 3B) + if [[ "${rev}" -gt 7 ]]; then + printf "%b %b Detected ARMv8 (or newer) processor\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-armv8-linux-gnueabihf" + elif [[ "${rev}" -eq 7 ]]; then + # Otherwise, if ARMv7 is found (e.g., BCM2836 as found in Raspberry Pi Model 2) + printf "%b %b Detected ARMv7 processor (with hard-float support)\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-armv7-linux-gnueabihf" + else + # Otherwise, use the ARMv6 binary (e.g., BCM2835 as found in Raspberry Pi Zero and Model 1) + printf "%b %b Detected ARMv6 processor (with hard-float support)\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-armv6-linux-gnueabihf" + fi + else + # No hard-float support found: Use gnueabi binaries + # Use the ARMv4-compliant binary only if we detected an ARMv4T core + if [[ "${rev}" -eq 4 ]]; then + printf "%b %b Detected ARMv4 processor\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-armv4-linux-gnueabi" + # Otherwise, use the ARMv5 binary. To date (end of 2020), all modern ARM processors + # are backwards-compatible to the ARMv5 + else + printf "%b %b Detected ARMv5 (or newer) processor\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-armv5-linux-gnueabi" + fi + fi + elif [[ "${machine}" == "x86_64" ]]; then + # This gives the processor of packages dpkg installs (for example, "i386") + local dpkgarch + dpkgarch=$(dpkg --print-processor 2> /dev/null || dpkg --print-architecture 2> /dev/null) + + # Special case: This is a 32 bit OS, installed on a 64 bit machine + # -> change machine processor to download the 32 bit executable + # We only check this for Debian-based systems as this has been an issue + # in the past (see https://github.com/pi-hole/pi-hole/pull/2004) + if [[ "${dpkgarch}" == "i386" ]]; then + printf "%b %b Detected 32bit (i686) processor\\n" "${OVER}" "${TICK}" + l_binary="pihole-FTL-linux-x86_32" + else + # 64bit + printf "%b %b Detected x86_64 processor\\n" "${OVER}" "${TICK}" + # set the binary to be used + l_binary="pihole-FTL-linux-x86_64" + fi + elif [[ "${machine}" == "riscv64" ]]; then + printf "%b %b Detected riscv64 processor\\n" "${OVER}" "${TICK}" + l_binary="pihole-FTL-riscv64-linux-gnu" + else + # Something else - we try to use 32bit executable and warn the user + if [[ ! "${machine}" == "i686" ]]; then + printf "%b %b %s...\\n" "${OVER}" "${CROSS}" "${str}" + printf " %b %bNot able to detect processor (unknown: %s), trying x86 (32bit) executable%b\\n" "${INFO}" "${COL_LIGHT_RED}" "${machine}" "${COL_NC}" + printf " %b Contact Pi-hole Support if you experience issues (e.g: FTL not running)\\n" "${INFO}" + else + printf "%b %b Detected 32bit (i686) processor\\n" "${OVER}" "${TICK}" + fi + l_binary="pihole-FTL-linux-x86_32" + fi + + # Returning a string value via echo + echo ${l_binary} +} + +FTLcheckUpdate() { + #In the next section we check to see if FTL is already installed (in case of pihole -r). + #If the installed version matches the latest version, then check the installed sha1sum of the binary vs the remote sha1sum. If they do not match, then download + printf " %b Checking for existing FTL binary...\\n" "${INFO}" + + local ftlLoc + ftlLoc=$(command -v pihole-FTL 2>/dev/null) + + local ftlBranch + + if [[ -f "/etc/pihole/ftlbranch" ]];then + ftlBranch=$("$TEMPLOG" + # Delete templog, but allow for addressing via file handle + # This lets us write to the log without having a temporary file on the drive, which + # is meant to be a security measure so there is not a lingering file on the drive during the install process + rm "$TEMPLOG" +} + +copy_to_install_log() { + # Copy the contents of file descriptor 3 into the install log + # Since we use color codes such as '\e[1;33m', they should be removed + sed 's/\[[0-9;]\{1,5\}m//g' < /proc/$$/fd/3 > "${installLogLoc}" + chmod 644 "${installLogLoc}" +} + +main() { + ######## FIRST CHECK ######## + # Must be root to install + local str="Root user check" + printf "\\n" + + # If the user's id is zero, + if [[ "${EUID}" -eq 0 ]]; then + # they are root and all is good + printf " %b %s\\n" "${TICK}" "${str}" + # Show the Pi-hole logo so people know it's genuine since the logo and name are trademarked + show_ascii_berry + make_temporary_log + else + # Otherwise, they do not have enough privileges, so let the user know + printf " %b %s\\n" "${INFO}" "${str}" + printf " %b %bScript called with non-root privileges%b\\n" "${INFO}" "${COL_LIGHT_RED}" "${COL_NC}" + printf " The Pi-hole requires elevated privileges to install and run\\n" + printf " Please check the installer for any concerns regarding this requirement\\n" + printf " Make sure to download this script from a trusted source\\n\\n" + printf " %b Sudo utility check" "${INFO}" + + # If the sudo command exists, try rerunning as admin + if is_command sudo ; then + printf "%b %b Sudo utility check\\n" "${OVER}" "${TICK}" + + # when run via curl piping + if [[ "$0" == "bash" ]]; then + # Download the install script and run it with admin rights + exec curl -sSL https://raw.githubusercontent.com/pi-hole/pi-hole/master/automated%20install/basic-install.sh | sudo bash "$@" + else + # when run via calling local bash script + exec sudo bash "$0" "$@" + fi + + exit $? + else + # Otherwise, tell the user they need to run the script as root, and bail + printf "%b %b Sudo utility check\\n" "${OVER}" "${CROSS}" + printf " %b Sudo is needed for the Web Interface to run pihole commands\\n\\n" "${INFO}" + printf " %b %bPlease re-run this installer as root${COL_NC}\\n" "${INFO}" "${COL_LIGHT_RED}" + exit 1 + fi + fi + + # Check if SELinux is Enforcing and exit before doing anything else + checkSelinux + + # Check for supported package managers so that we may install dependencies + package_manager_detect + + # Notify user of package availability + notify_package_updates_available + + # Install packages necessary to perform os_check + printf " %b Checking for / installing Required dependencies for OS Check...\\n" "${INFO}" + install_dependent_packages "${OS_CHECK_DEPS[@]}" + + # Check that the installed OS is officially supported - display warning if not + os_check + + # Install packages used by this installation script + printf " %b Checking for / installing Required dependencies for this install script...\\n" "${INFO}" + install_dependent_packages "${INSTALLER_DEPS[@]}" + + # If the setup variable file exists, + if [[ -f "${setupVars}" ]]; then + # if it's running unattended, + if [[ "${runUnattended}" == true ]]; then + printf " %b Performing unattended setup, no dialogs will be displayed\\n" "${INFO}" + # Use the setup variables + useUpdateVars=true + # also disable debconf-apt-progress dialogs + export DEBIAN_FRONTEND="noninteractive" + else + # If running attended, show the available options (repair/reconfigure) + update_dialogs + fi + fi + + if [[ "${useUpdateVars}" == false ]]; then + # Display welcome dialogs + welcomeDialogs + # Create directory for Pi-hole storage + install -d -m 755 /etc/pihole/ + # Determine available interfaces + get_available_interfaces + # Find interfaces and let the user choose one + chooseInterface + # find IPv4 and IPv6 information of the device + collect_v4andv6_information + # Decide what upstream DNS Servers to use + setDNS + # Give the user a choice of blocklists to include in their install. Or not. + chooseBlocklists + # Let the user decide if they want the web interface to be installed automatically + setAdminFlag + # Let the user decide if they want query logging enabled... + setLogging + # Let the user decide the FTL privacy level + setPrivacyLevel + else + # Setup adlist file if not exists + installDefaultBlocklists + + # Source ${setupVars} to use predefined user variables in the functions + source "${setupVars}" + + # Get the privacy level if it exists (default is 0) + if [[ -f "${FTL_CONFIG_FILE}" ]]; then + # get the value from $FTL_CONFIG_FILE (and ignoring all commented lines) + PRIVACY_LEVEL=$(sed -e '/^[[:blank:]]*#/d' "${FTL_CONFIG_FILE}" | grep "PRIVACYLEVEL" | awk -F "=" 'NR==1{printf$2}') + + # If no setting was found, default to 0 + PRIVACY_LEVEL="${PRIVACY_LEVEL:-0}" + fi + fi + # Download or update the scripts by updating the appropriate git repos + clone_or_update_repos + + # Install the Core dependencies + local dep_install_list=("${PIHOLE_DEPS[@]}") + if [[ "${INSTALL_WEB_SERVER}" == true ]]; then + # And, if the setting says so, install the Web admin interface dependencies + dep_install_list+=("${PIHOLE_WEB_DEPS[@]}") + fi + + # Install packages used by the actual software + printf " %b Checking for / installing Required dependencies for Pi-hole software...\\n" "${INFO}" + install_dependent_packages "${dep_install_list[@]}" + unset dep_install_list + + # On some systems, lighttpd is not enabled on first install. We need to enable it here if the user + # has chosen to install the web interface, else the LIGHTTPD_ENABLED check will fail + if [[ "${INSTALL_WEB_SERVER}" == true ]]; then + enable_service lighttpd + fi + # Determine if lighttpd is correctly enabled + if check_service_active "lighttpd"; then + LIGHTTPD_ENABLED=true + else + LIGHTTPD_ENABLED=false + fi + # Create the pihole user + create_pihole_user + + # Check if FTL is installed - do this early on as FTL is a hard dependency for Pi-hole + local funcOutput + funcOutput=$(get_binary_name) #Store output of get_binary_name here + local binary + binary="pihole-FTL${funcOutput##*pihole-FTL}" #binary name will be the last line of the output of get_binary_name (it always begins with pihole-FTL) + local theRest + theRest="${funcOutput%pihole-FTL*}" # Print the rest of get_binary_name's output to display (cut out from first instance of "pihole-FTL") + if ! FTLdetect "${binary}" "${theRest}"; then + printf " %b FTL Engine not installed\\n" "${CROSS}" + exit 1 + fi + + # Install and log everything to a file + installPihole | tee -a /proc/$$/fd/3 + + # Copy the temp log file into final log location for storage + copy_to_install_log + + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # Add password to web UI if there is none + pw="" + # If no password is set, + if [[ $(grep 'WEBPASSWORD' -c "${setupVars}") == 0 ]] ; then + # generate a random password + pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8) + # shellcheck disable=SC1091 + . /opt/pihole/webpage.sh + echo "WEBPASSWORD=$(HashPassword "${pw}")" >> "${setupVars}" + fi + fi + + # Check for and disable systemd-resolved-DNSStubListener before reloading resolved + # DNSStubListener needs to remain in place for installer to download needed files, + # so this change needs to be made after installation is complete, + # but before starting or restarting the dnsmasq or ftl services + disable_resolved_stublistener + + # If the Web server was installed, + if [[ "${INSTALL_WEB_SERVER}" == true ]]; then + if [[ "${LIGHTTPD_ENABLED}" == true ]]; then + restart_service lighttpd + enable_service lighttpd + else + printf " %b Lighttpd is disabled, skipping service restart\\n" "${INFO}" + fi + fi + + printf " %b Restarting services...\\n" "${INFO}" + # Start services + + # Enable FTL + # Ensure the service is enabled before trying to start it + # Fixes a problem reported on Ubuntu 18.04 where trying to start + # the service before enabling causes installer to exit + enable_service pihole-FTL + + # If this is an update from a previous Pi-hole installation + # we need to move any existing `pihole*` logs from `/var/log` to `/var/log/pihole` + # if /var/log/pihole.log is not a symlink (set during FTL startup) move the files + # can be removed with Pi-hole v6.0 + # To be sure FTL is not running when we move the files we explicitly stop it here + + stop_service pihole-FTL &> /dev/null + + if [ ! -d /var/log/pihole/ ]; then + mkdir -m 0755 /var/log/pihole/ + fi + + # Special handling for pihole-FTL.log -> pihole/FTL.log + if [ -f /var/log/pihole-FTL.log ] && [ ! -L /var/log/pihole-FTL.log ]; then + # /var/log/pihole-FTL.log -> /var/log/pihole/FTL.log + # /var/log/pihole-FTL.log.1 -> /var/log/pihole/FTL.log.1 + # /var/log/pihole-FTL.log.2.gz -> /var/log/pihole/FTL.log.2.gz + # /var/log/pihole-FTL.log.3.gz -> /var/log/pihole/FTL.log.3.gz + # /var/log/pihole-FTL.log.4.gz -> /var/log/pihole/FTL.log.4.gz + # /var/log/pihole-FTL.log.5.gz -> /var/log/pihole/FTL.log.5.gz + for f in /var/log/pihole-FTL.log*; do mv "$f" "$( sed "s/pihole-/pihole\//" <<< "$f")"; done + fi + + # Remaining log files + if [ -f /var/log/pihole.log ] && [ ! -L /var/log/pihole.log ]; then + mv /var/log/pihole*.* /var/log/pihole/ 2>/dev/null + fi + + restart_service pihole-FTL + + # Download and compile the aggregated block list + runGravity + + # Update local and remote versions via updatechecker + /opt/pihole/updatecheck.sh + + if [[ "${useUpdateVars}" == false ]]; then + displayFinalMessage "${pw}" + fi + + # If the Web interface was installed, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + # If there is a password, + if (( ${#pw} > 0 )) ; then + # display the password + printf " %b Web Interface password: %b%s%b\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${pw}" "${COL_NC}" + printf " %b This can be changed using 'pihole -a -p'\\n\\n" "${INFO}" + fi + fi + + if [[ "${useUpdateVars}" == false ]]; then + # If the Web interface was installed, + if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then + printf " %b View the web interface at http://pi.hole/admin or http://%s/admin\\n\\n" "${INFO}" "${IPV4_ADDRESS%/*}" + fi + # Explain to the user how to use Pi-hole as their DNS server + printf " %b You may now configure your devices to use the Pi-hole as their DNS server\\n" "${INFO}" + [[ -n "${IPV4_ADDRESS%/*}" ]] && printf " %b Pi-hole DNS (IPv4): %s\\n" "${INFO}" "${IPV4_ADDRESS%/*}" + [[ -n "${IPV6_ADDRESS}" ]] && printf " %b Pi-hole DNS (IPv6): %s\\n" "${INFO}" "${IPV6_ADDRESS}" + printf " %b If you have not done so already, the above IP should be set to static.\\n" "${INFO}" + INSTALL_TYPE="Installation" + else + INSTALL_TYPE="Update" + fi + + # Display where the log file is + printf "\\n %b The install log is located at: %s\\n" "${INFO}" "${installLogLoc}" + printf " %b %b%s complete! %b\\n" "${TICK}" "${COL_LIGHT_GREEN}" "${INSTALL_TYPE}" "${COL_NC}" + + if [[ "${INSTALL_TYPE}" == "Update" ]]; then + printf "\\n" + "${PI_HOLE_BIN_DIR}"/pihole version --current + fi +} + +# allow to source this script without running it +if [[ "${SKIP_INSTALL}" != true ]] ; then + main "$@" +fi diff --git a/pihole/deployment.yaml b/pihole/deployment.yaml new file mode 100644 index 0000000..3fb4637 --- /dev/null +++ b/pihole/deployment.yaml @@ -0,0 +1,157 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n pihole + kompose.service.expose: pihole.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: pihole + name: pihole + namespace: pihole +spec: + ports: + - name: "80" + port: 80 + targetPort: 80 + selector: + io.kompose.service: pihole +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: pihole + namespace: pihole +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n pihole + kompose.service.expose: pihole.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: pihole + name: pihole + namespace: pihole +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: pihole + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n pihole + kompose.service.expose: pihole.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/pihole-default: "true" + io.kompose.service: pihole + spec: + containers: + - env: + - name: TZ + value: America/Chicago + - name: WEBPASSWORD + value: password + image: pihole/pihole:latest + name: pihole + ports: + - containerPort: 80 + protocol: TCP + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + volumeMounts: + - mountPath: /etc/pihole + name: pihole + - mountPath: /etc/dnsmasq.d + name: dnsmasq + restartPolicy: Always + volumes: + - name: pihole + persistentVolumeClaim: + claimName: pihole + - name: dnsmasq + persistentVolumeClaim: + claimName: dnsmasq +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n pihole + kompose.service.expose: pihole.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: pihole + name: pihole + namespace: pihole +spec: + rules: + - host: pihole.wayl.one + http: + paths: + - backend: + service: + name: pihole + port: + number: 80 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: pihole + name: pihole + namespace: pihole +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: dnsmasq + name: dnsmasq + namespace: pihole +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + diff --git a/pihole/docker-compose.yml b/pihole/docker-compose.yml index 15c4e93..07859cc 100644 --- a/pihole/docker-compose.yml +++ b/pihole/docker-compose.yml @@ -4,67 +4,23 @@ services: pihole: image: pihole/pihole:latest container_name: pihole - restart: unless-stopped - security_opt: - - no-new-privileges:true - networks: - - proxy # Volumes store your data between container upgrades volumes: - - /etc/localtime:/etc/localtime:ro - - ${PWD}/pihole/data/etc-pihole/:/etc/pihole/ - - ${PWD}/pihole/data/etc-dnsmasq.d/:/etc/dnsmasq.d/ - # ports: - # - "8080:80/tcp" - # - "53:53/tcp" - # - "53:53/udp" - # - "67:67/udp" + - pihole:/etc/pihole/ + - dnsmasq:/etc/dnsmasq.d/ + ports: + - 80 environment: - # TZ: "America/Chicago" - WEBPASSWORD: "${PIHOLE_PASSWORD}" + TZ: "America/Chicago" + WEBPASSWORD: password # Recommended but not required (DHCP needs NET_ADMIN) # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities cap_add: - NET_ADMIN labels: - - "traefik.enable=true" - - "traefik.http.routers.pihole.entrypoints=http" - - "traefik.http.routers.pihole.rule=Host(`pihole.${URL}`)" - - "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https" - - "traefik.http.routers.pihole.middlewares=pihole-https-redirect" - - "traefik.http.routers.pihole-secure.entrypoints=https" - - "traefik.http.routers.pihole-secure.rule=Host(`pihole.${URL}`)" - - "traefik.http.routers.pihole-secure.tls=true" - - "traefik.http.routers.pihole-secure.service=pihole" - - "traefik.http.services.pihole.loadbalancer.server.port=80" - - "traefik.docker.network=proxy" + kompose.service.expose: pihole.wayl.one -networks: - proxy: - external: true -# -# version: "3" - -# # More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/ -# services: -# pihole: -# container_name: pihole -# image: pihole/pihole:latest -# # For DHCP it is recommended to remove these ports and instead add: network_mode: "host" -# ports: -# # - "53:53/tcp" -# # - "53:53/udp" -# # - "67:67/udp" # Only required if you are using Pi-hole as your DHCP server -# - "8080:80/tcp" -# environment: -# TZ: "America/Chicago" -# # WEBPASSWORD: 'set a secure password here or it will be random' -# # Volumes store your data between container upgrades -# volumes: -# - "./etc-pihole:/etc/pihole" -# - "./etc-dnsmasq.d:/etc/dnsmasq.d" -# # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities -# cap_add: -# - NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed -# restart: unless-stopped +volumes: + pihole: + dnsmasq: diff --git a/pihole/justfile b/pihole/justfile new file mode 100644 index 0000000..b66ae30 --- /dev/null +++ b/pihole/justfile @@ -0,0 +1,10 @@ +default: convert deploy viz + +convert: + kompose convert -o deployment.yaml -n pihole +deploy: + kubectl apply -f deployment.yaml +viz: + k8sviz -n pihole --kubeconfig $KUBECONFIG -t png -o pihole-k8s.png +restart: + kubectl rollout restart -n pihole deployment/pihole diff --git a/registry-ui/pvc-inspector.yaml b/registry-ui/pvc-inspector.yaml index 93be5ea..f2affc9 100644 --- a/registry-ui/pvc-inspector.yaml +++ b/registry-ui/pvc-inspector.yaml @@ -5,7 +5,7 @@ metadata: namespace: registry spec: containers: - - image: registry.wayl.one/devtainer:slim + - image: registry.fokais.com/devtainer:slim name: pvc-inspector command: ["sleep", "300"] volumeMounts: @@ -15,6 +15,8 @@ spec: name: pvc-registry-auth - mountPath: /pvc-registry-config name: pvc-registry-config + imagePullSecrets: + - name: fokais-regcred volumes: - name: pvc-registry persistentVolumeClaim: diff --git a/registry-ui/registry.password b/registry-ui/registry.password new file mode 100644 index 0000000..5a7a30c --- /dev/null +++ b/registry-ui/registry.password @@ -0,0 +1,2 @@ +waylon:$2y$05$wj/1a88SL14RJc28rO5aHO7pFDnLNdKLO8t13WO58ZceiBagD0Swm + diff --git a/registry/auth/htpasswd b/registry/auth/htpasswd new file mode 100644 index 0000000..5a7a30c --- /dev/null +++ b/registry/auth/htpasswd @@ -0,0 +1,2 @@ +waylon:$2y$05$wj/1a88SL14RJc28rO5aHO7pFDnLNdKLO8t13WO58ZceiBagD0Swm + diff --git a/registry/certs/tls.crt b/registry/certs/tls.crt new file mode 100644 index 0000000..21c029a --- /dev/null +++ b/registry/certs/tls.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFMTCCAxmgAwIBAgIUNuf5H7FDNdMjTIL/gOSJxToTH04wDQYJKoZIhvcNAQEL +BQAwGjEYMBYGA1UEAwwPZG9ja2VyLXJlZ2lzdHJ5MB4XDTIzMTEwNTAwMzk1MFoX +DTI0MTEwNDAwMzk1MFowGjEYMBYGA1UEAwwPZG9ja2VyLXJlZ2lzdHJ5MIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEApY8EqeK74ryv8YSa+xYWLuzDCn3t +geuKpHhHv+BkL9XmPe/TaHFuUMV2KbnVbYhfBHe5TP2YkMz2a0KcGt8Fxdqxvj6b +GvtAhTRrWuPEJgCNDB8Ei9pW0aw+AWltXBDXnHLzzTouMC+WonCEfT8RQYK1rKTV +S9gnv8TFkjVBHRekbGogOUsXCt8H1mNyaeyWRCn4Qdiz8T6vew4TYZlxeJrxfRdZ +Oj75BbLfQcddYPfPFRwE+r1+mJV7i9mE+aWlqLK0X2vqf/qgMtLP6NqAWaRDzw7v +BdGQDnua3CwA+RL6yRQUtwE9kZmnohJFrXAyV12TKS0JqjN1KTSUpoAXcE6szkkS +eu4JTtwfMsC+xD61tKeTW/gxk1BXTJY/gI07DLDGSv4ZTrXmw17u9DHmWhOoVs/F +XxTjr+9RM0TXMj3JVwHgw7l60vsxjhcIS30NQ6T5yB555sGZ0qMO4rfA/zq0ulos +uiglj9ivSf3Y9PYdKAM07fPSyUtAupsLOcchX62CL2UfZA4kOfMTJKxbxZPZauTD +HPNLFLCnodY0+iN1UtliumAKVVcGZfXwQQjBIxMUbGue5QIVyFCbD95MoCVUOXsa +IskLSqB1r1BBK5+qxnYgZwsoezoQYnjO9V7rIcEXkHSpFinxXiS7JSGHED9r97A3 +zGm0SMpUNWYrjlkCAwEAAaNvMG0wHQYDVR0OBBYEFAeSIweAbq4nDyEaCZroFpnx +rHSzMB8GA1UdIwQYMBaAFAeSIweAbq4nDyEaCZroFpnxrHSzMA8GA1UdEwEB/wQF +MAMBAf8wGgYDVR0RBBMwEYIPZG9ja2VyLXJlZ2lzdHJ5MA0GCSqGSIb3DQEBCwUA +A4ICAQCg9gwgTSkp0UdDMTS5iHdtEFsw60LEXR4E3EQaJLn3CizJ2UCIX/uNGe7q +E99+BjIhXWj+a3/79ZPqwyui7J9Vd7dhLio+daTcrM7gbbeVxIulhJatfGuLfpxW +tDC3MAw8vDigNNG7yGgt3b1uTw2QxBxtXFFP+ki5YA8nUq2Mw1/Fzyq0hOwS5Wnh +IpmYa626G9A6mbQmH/28HDYJd/12ARAhZJVY7wTSRkwZjXmzj+wo65ez7Mq1uS8K +VK3NiM5gxCMrr8jcwP2u3gehljajGRK0gzUWWe3i0pmwwh3SMrfBeiVqJ6p2Dg0G +mWeas1fDWYWA19QaNEKdj3KNzfa3QHHHWD4AWCxfLWMYg7cJn3hb2wv4Oso2J3Wh +MM6Ddnjj4bus0wqZ5tWr0YL0TZnmiS5Cv6ibdXhgMXmpgPWCKMEOXnh6OlDw9H13 +IVdowxURR/khuVCUtr9Cx9D1o+nz5Fpp6FotmsRjRWyDt/Fo0ijLoxKXMPrmpHF1 +Wt/Jgr46p+0SaIV2ANnacSFzaj53pM+h7kmv3FyODhThumBYkIToNV0XFmpiHrab +oAN3/TEjEhxOaXSdeb8H7Yw9xgdb02h8kf7oYD8m1joKlDOnE7q2MQNkXKdQPz66 +96PIWjLLrgzjfwjbfAOJ0kbB2jgzJfCaCv5MiNnPn7RyXYIRQQ== +-----END CERTIFICATE----- diff --git a/registry/registry-volume.yaml b/registry/registry-volume.yaml new file mode 100644 index 0000000..76d64bf --- /dev/null +++ b/registry/registry-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: docker-repo-pvc + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + limits: + storage: 500Gi diff --git a/sshx-server/deployment.yaml b/sshx-server/deployment.yaml new file mode 100644 index 0000000..87f37aa --- /dev/null +++ b/sshx-server/deployment.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n sshx-server + kompose.image-pull-secret: regcred + kompose.service.expose: sshx.wayl.one + kompose.service.type: loadbalancer + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: sshx-server-tcp + name: sshx-server-tcp + namespace: sshx-server +spec: + ports: + - name: "8051" + port: 8051 + targetPort: 8051 + selector: + io.kompose.service: sshx-server + type: LoadBalancer +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: sshx-server + namespace: sshx-server +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n sshx-server + kompose.image-pull-secret: regcred + kompose.service.expose: sshx.wayl.one + kompose.service.type: loadbalancer + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: sshx-server + name: sshx-server + namespace: sshx-server +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: sshx-server + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert -o deployment.yaml -n sshx-server + kompose.image-pull-secret: regcred + kompose.service.expose: sshx.wayl.one + kompose.service.type: loadbalancer + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/sshx-server-default: "true" + io.kompose.service: sshx-server + spec: + containers: + - args: + - sh + - -c + - './sshx-server --listen :: --host "https://sshx.wayl.one" --secret=hello' + image: registry.wayl.one/sshx-server + name: sshx-server + ports: + - containerPort: 8051 + protocol: TCP + resources: {} + imagePullSecrets: + - name: regcred + restartPolicy: Always +status: {} + +--- +apiVersion: traefik.io/v1alpha1 +kind: TLSOption +metadata: + name: default + namespace: default + +spec: + alpnProtocols: + - http/1.1 + - h2 diff --git a/sshx-server/docker-compose.yml b/sshx-server/docker-compose.yml new file mode 100644 index 0000000..f5aa4c8 --- /dev/null +++ b/sshx-server/docker-compose.yml @@ -0,0 +1,11 @@ +version: "3" +services: + sshx-server: + image: registry.wayl.one/sshx-server + command: ["sh", "-c", './sshx-server --listen :: --host "https://sshx.wayl.one" --secret=hello'] + ports: + - 8051 + labels: + kompose.service.expose: sshx.wayl.one + kompose.image-pull-secret: regcred + kompose.service.type: loadbalancer diff --git a/status/85om9u.gif b/status/85om9u.gif new file mode 100644 index 0000000..65b71c3 Binary files /dev/null and b/status/85om9u.gif differ diff --git a/vault/deployment.yaml b/vault/deployment.yaml index 0234f9a..09c13e2 100644 --- a/vault/deployment.yaml +++ b/vault/deployment.yaml @@ -1,4 +1,3 @@ ---- apiVersion: v1 kind: Service metadata: @@ -62,13 +61,21 @@ spec: io.kompose.service: vault-server spec: containers: + # run vault server as the command + - env: + # - name: VAULT_LOCAL_CONFIG + # value: '{"storage": {"file": {"path": "/vault/file"}}, "listener": [{"tcp": {"address": "0.0.0.0:8200", "tls_disable": true}}], "default_lease_ttl": "168h", "max_lease_ttl": "720h", "ui": true}' - name: VAULT_ADDR value: http://0.0.0.0:8200 - - name: VAULT_DEV_ROOT_TOKEN_ID - value: vault-plaintext-root-token + # - name: VAULT_DEV_ROOT_TOKEN_ID + # valueFrom: + # secretKeyRef: + # key: VAULT_DEV_ROOT_TOKEN_ID + # name: vault-dev-root-token-id image: hashicorp/vault name: vault-server + command: ["vault", "server", "-config=/vault/config/vault.hcl"] ports: - containerPort: 8200 protocol: TCP @@ -77,6 +84,19 @@ spec: capabilities: add: - IPC_LOCK + volumeMounts: + - mountPath: /vault/data + name: vault-data + - name: vault-config + mountPath: /vault/config + volumes: + - name: vault-data + persistentVolumeClaim: + claimName: vault-data + - name: vault-config + configMap: + name: vault-config + restartPolicy: Always status: {} @@ -108,3 +128,39 @@ spec: status: loadBalancer: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: vault + name: vault-data + namespace: vault +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vault-config + namespace: vault +data: + vault.hcl: |- + disable_mlock = true + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "file" { + path = "/vault/data" + } diff --git a/wyze-bridge/docker-compose.yml b/wyze-bridge/docker-compose.yml index a20147c..853a617 100644 --- a/wyze-bridge/docker-compose.yml +++ b/wyze-bridge/docker-compose.yml @@ -4,11 +4,12 @@ services: wyze-bridge: image: mrlt8/wyze-bridge:latest container_name: wyze-bridge - restart: unless-stopped security_opt: - no-new-privileges:true - networks: - - proxy + ports: + - 5000:5000 + - 8554:8554 + - 8888:8888 environment: - QUALITY=SD30 - WYZE_EMAIL=${WYZE_EMAIL} @@ -18,23 +19,23 @@ services: - TZ=America/Chicago - RECORD_ALL=True volumes: - - /etc/localtime:/etc/localtime:ro - - ${PWD}/wyze-bridge/data/cams/img:/img - - ${PWD}/wyze-bridge/data/record:/record - + - img:/img + - record:/record labels: - - "traefik.enable=true" - - "traefik.http.routers.cams.entrypoints=http" - - "traefik.http.routers.cams.rule=Host(`cams.${URL}`)" - - "traefik.http.middlewares.cams-https-redirect.redirectscheme.scheme=https" - - "traefik.http.routers.cams.middlewares=cams-https-redirect" - - "traefik.http.routers.cams-secure.entrypoints=https" - - "traefik.http.routers.cams-secure.rule=Host(`cams.${URL}`)" - - "traefik.http.routers.cams-secure.tls=true" - - "traefik.http.routers.cams-secure.service=cams" - - "traefik.http.services.cams.loadbalancer.server.port=5000" - - "traefik.docker.network=proxy" + kompose.service.expose: cams.wayl.one + # labels: + # - "traefik.enable=true" + # - "traefik.http.routers.cams.entrypoints=http" + # - "traefik.http.routers.cams.rule=Host(`cams.${URL}`)" + # - "traefik.http.middlewares.cams-https-redirect.redirectscheme.scheme=https" + # - "traefik.http.routers.cams.middlewares=cams-https-redirect" + # - "traefik.http.routers.cams-secure.entrypoints=https" + # - "traefik.http.routers.cams-secure.rule=Host(`cams.${URL}`)" + # - "traefik.http.routers.cams-secure.tls=true" + # - "traefik.http.routers.cams-secure.service=cams" + # - "traefik.http.services.cams.loadbalancer.server.port=5000" + # - "traefik.docker.network=proxy" -networks: - proxy: - external: true +volumes: + img: + record: diff --git a/wyze-bridge/wyze-bridge.yaml b/wyze-bridge/wyze-bridge.yaml new file mode 100644 index 0000000..da7d95a --- /dev/null +++ b/wyze-bridge/wyze-bridge.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert --namespace wyze-bridge -o wyze-bridge.yaml + kompose.service.expose: cams.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: wyze-bridge + name: wyze-bridge + namespace: wyze-bridge +spec: + ports: + - name: "5000" + port: 5000 + targetPort: 5000 + - name: "8554" + port: 8554 + targetPort: 8554 + - name: "8888" + port: 8888 + targetPort: 8888 + selector: + io.kompose.service: wyze-bridge +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: Namespace +metadata: + creationTimestamp: null + name: wyze-bridge + namespace: wyze-bridge +spec: {} +status: {} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert --namespace wyze-bridge -o wyze-bridge.yaml + kompose.service.expose: cams.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: wyze-bridge + name: wyze-bridge + namespace: wyze-bridge +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: wyze-bridge + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert --namespace wyze-bridge -o wyze-bridge.yaml + kompose.service.expose: cams.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.network/wyze-bridge-default: "true" + io.kompose.service: wyze-bridge + spec: + containers: + - env: + - name: IMG_DIR + value: /img/ + - name: QUALITY + value: SD30 + - name: RECORD_ALL + value: "True" + - name: SNAPSHOT + value: RTSP1 + - name: TZ + value: America/Chicago + - name: WYZE_EMAIL + - name: WYZE_PASSWORD + image: mrlt8/wyze-bridge:latest + name: wyze-bridge + ports: + - containerPort: 5000 + hostPort: 5000 + protocol: TCP + - containerPort: 8554 + hostPort: 8554 + protocol: TCP + - containerPort: 8888 + hostPort: 8888 + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /img + name: img + - mountPath: /record + name: record + restartPolicy: Always + volumes: + - name: img + persistentVolumeClaim: + claimName: img + - name: record + persistentVolumeClaim: + claimName: record +status: {} + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kompose.cmd: kompose convert --namespace wyze-bridge -o wyze-bridge.yaml + kompose.service.expose: cams.wayl.one + kompose.version: 1.31.2 (a92241f79) + creationTimestamp: null + labels: + io.kompose.service: wyze-bridge + name: wyze-bridge + namespace: wyze-bridge +spec: + rules: + - host: cams.wayl.one + http: + paths: + - backend: + service: + name: wyze-bridge + port: + number: 5000 + path: / + pathType: Prefix +status: + loadBalancer: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: img + name: img + namespace: wyze-bridge +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: record + name: record + namespace: wyze-bridge +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} +