Uploaded image for project: 'AMQ Streams'
  1. AMQ Streams
  2. ENTMQST-754

Storage type changes not handled properly when accompanied with other changes

    XMLWordPrintable

Details

    • Bug
    • Resolution: Done
    • Major
    • 1.2.0.GA
    • None
    • None
    • None
    • 0
    • 2019.3, 2019.4

    Description

      It seems that the reconciliation of the storage changes does nto always work when it is accompanied by other changes which cause some changes to the volumes. For example when you try to change from persistent storage to JBOD while also changing from node ports to routes. I run into issues with following example custom resources.

      The original was:

      apiVersion: kafka.strimzi.io/v1alpha1
      kind: Kafka
      metadata:
        name: my-cluster
        labels:
          app: my-cluster
      spec:
        kafka:
          replicas: 3
          resources:
            requests:
              memory: 2Gi
              cpu: 500m
            limits:
              memory: 2Gi
              cpu: "1"
          jvmOptions:
            -Xms: 1024m
            -Xmx: 1024m
          listeners:
            plain:
              authentication:
                type: scram-sha-512
              networkPolicyPeers:
                - podSelector:
                    matchLabels:
                      app: kafka-consumer
                - podSelector:
                    matchLabels:
                      app: kafka-producer
            tls:
              authentication:
                type: tls
              networkPolicyPeers:
                - podSelector:
                    matchLabels:
                      app: kafka-consumer
                - podSelector:
                    matchLabels:
                      app: kafka-producer
            external:
              type: nodeport
              authentication:
                type: tls
              tls: true
              overrides:
                bootstrap:
                  nodePort: 31313
                brokers:
                - broker: 0
                  advertisedHost: minishift-0
                  #advertisedPort: 32300
                  nodePort: 31300
                - broker: 1
                  advertisedHost: minishift-1
                  #advertisedPort: 32301
                  nodePort: 31301
                - broker: 2
                  advertisedHost: minishift-2
                  #advertisedPort: 32302
                  nodePort: 31302
          authorization:
            type: simple
            superUsers:
              - CN=my-connect
          readinessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          livenessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          config:
            auto.create.topics.enable: "false"
            offsets.topic.replication.factor: 3
            transaction.state.log.replication.factor: 3
            transaction.state.log.min.isr: 2
          storage:
            type: persistent-claim
            size: 100Gi
            #class: hdd
            deleteClaim: true
          #rack:
          #  topologyKey: failure-domain.beta.kubernetes.io/zone
          template:
            statefulset:
              metadata:
                labels:
                  key1: label1x
                  key2: label2x
                annotations:
                  akey1: label1
                  akey2: label2
            pod:
              metadata:
                labels:
                  key1: label1y
                  key2: label2y
                annotations:
                  akey1: label1
                  akey2: label2
      #        securityContext:
      #          runAsUser: 1000001
      #          #runAsGroup: 1000001
      #          fsGroup: 1000001
            bootstrapService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            brokersService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            externalBootstrapService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            perPodService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            externalBootstrapRoute:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            perPodRoute:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
        zookeeper:
          replicas: 3
          resources:
            requests:
              memory: 1Gi
              cpu: "0.3"
            limits:
              memory: 1Gi
              cpu: "0.5"
          jvmOptions:
            -Xms: 512m
            -Xmx: 512m
          readinessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          livenessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          storage:
            type: persistent-claim
            size: 100Gi
            #class: ssd
            deleteClaim: true
          template:
            statefulset:
              metadata:
                labels:
                  key1: label1z
                  key2: label2z
                annotations:
                  akey1: label1
                  akey2: label2
            pod:
              metadata:
                labels:
                  key1: label1v
                  key2: label2v
                annotations:
                  akey1: label1
                  akey2: label2
      #        securityContext:
      #          runAsUser: 1000001
      #          #runAsGroup: 1000001
      #          fsGroup: 1000001
            clientService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            nodesService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
        entityOperator:
          topicOperator:
            resources:
              requests:
                memory: 256Mi
                cpu: "0.2"
              limits:
                memory: 256Mi
                cpu: "0.5"
            logging:
              loggers:
                rootLogger.level: DEBUG
              type: inline
          userOperator:
            resources:
              requests:
                memory: 512Mi
                cpu: "0.2"
              limits:
                memory: 512Mi
                cpu: "0.5"
            logging:
              loggers:
                rootLogger.level: INFO
              type: inline
            template:
              deployment:
                metadata:
                  labels:
                    key1: label1
                    key2: label2
                  annotations:
                    akey1: label1
                    akey2: label2
              pod:
                metadata:
                  labels:
                    key1: label1
                    key2: label2
                  annotations:
                    akey1: label1
                    akey2: label2
      #  clusterCa:
      #    validityDays: 1000
      #  clientsCa:
      #    validityDays: 1000
      

      The new was:

      apiVersion: kafka.strimzi.io/v1alpha1
      kind: Kafka
      metadata:
        name: my-cluster
        labels:
          app: my-cluster
      spec:
        kafka:
          replicas: 3
          resources:
            requests:
              memory: 2Gi
              cpu: 500m
            limits:
              memory: 2Gi
              cpu: "1"
          jvmOptions:
            -Xms: 1024m
            -Xmx: 1024m
          listeners:
            plain:
              authentication:
                type: scram-sha-512
              networkPolicyPeers:
                - podSelector:
                    matchLabels:
                      app: kafka-consumer
                - podSelector:
                    matchLabels:
                      app: kafka-producer
            tls:
              authentication:
                type: tls
              networkPolicyPeers:
                - podSelector:
                    matchLabels:
                      app: kafka-consumer
                - podSelector:
                    matchLabels:
                      app: kafka-producer
            external:
              type: route
              authentication:
                type: tls
          authorization:
            type: simple
            superUsers:
              - CN=my-connect
          readinessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          livenessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          config:
            auto.create.topics.enable: "false"
            offsets.topic.replication.factor: 3
            transaction.state.log.replication.factor: 3
            transaction.state.log.min.isr: 2
          storage:
            type: jbod
            volumes:
            - id: 0
              type: persistent-claim
              size: 100Gi
              deleteClaim: true
            - id: 1
              type: persistent-claim
              size: 100Gi
              deleteClaim: true
          #rack:
          #  topologyKey: failure-domain.beta.kubernetes.io/zone
          template:
            statefulset:
              metadata:
                labels:
                  key1: label1x
                  key2: label2x
                annotations:
                  akey1: label1
                  akey2: label2
            pod:
              metadata:
                labels:
                  key1: label1y
                  key2: label2y
                annotations:
                  akey1: label1
                  akey2: label2
      #        securityContext:
      #          runAsUser: 1000001
      #          #runAsGroup: 1000001
      #          fsGroup: 1000001
            bootstrapService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            brokersService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            externalBootstrapService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            perPodService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            externalBootstrapRoute:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            perPodRoute:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
        zookeeper:
          replicas: 3
          resources:
            requests:
              memory: 1Gi
              cpu: "0.3"
            limits:
              memory: 1Gi
              cpu: "0.5"
          jvmOptions:
            -Xms: 512m
            -Xmx: 512m
          readinessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          livenessProbe:
            initialDelaySeconds: 15
            timeoutSeconds: 5
          storage:
            type: persistent-claim
            size: 100Gi
            #class: ssd
            deleteClaim: true
          template:
            statefulset:
              metadata:
                labels:
                  key1: label1z
                  key2: label2z
                annotations:
                  akey1: label1
                  akey2: label2
            pod:
              metadata:
                labels:
                  key1: label1v
                  key2: label2v
                annotations:
                  akey1: label1
                  akey2: label2
      #        securityContext:
      #          runAsUser: 1000001
      #          #runAsGroup: 1000001
      #          fsGroup: 1000001
            clientService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
            nodesService:
              metadata:
                labels:
                  key1: label1
                  key2: label2
                annotations:
                  akey1: label1
                  akey2: label2
        entityOperator:
          topicOperator:
            resources:
              requests:
                memory: 256Mi
                cpu: "0.2"
              limits:
                memory: 256Mi
                cpu: "0.5"
            logging:
              loggers:
                rootLogger.level: DEBUG
              type: inline
          userOperator:
            resources:
              requests:
                memory: 512Mi
                cpu: "0.2"
              limits:
                memory: 512Mi
                cpu: "0.5"
            logging:
              loggers:
                rootLogger.level: INFO
              type: inline
            template:
              deployment:
                metadata:
                  labels:
                    key1: label1
                    key2: label2
                  annotations:
                    akey1: label1
                    akey2: label2
              pod:
                metadata:
                  labels:
                    key1: label1
                    key2: label2
                  annotations:
                    akey1: label1
                    akey2: label2
        clusterCa:
          validityDays: 1000
        clientsCa:
          validityDays: 1000
      

      Attachments

        Activity

          People

            asorokht Andriy Sorokhtey (Inactive)
            scholzj JAkub Scholz
            Votes:
            0 Vote for this issue
            Watchers:
            3 Start watching this issue

            Dates

              Created:
              Updated:
              Resolved: