bin/connect-standalone.sh config/connect-standalone.properties config/connector1.properties [2016-10-17 20:57:03,615] INFO StandaloneConfig values: cluster = connect rest.advertised.host.name = null task.shutdown.graceful.timeout.ms = 5000 rest.host.name = null rest.advertised.port = null bootstrap.servers = [192.168.10.40:9092] offset.flush.timeout.ms = 5000 offset.flush.interval.ms = 10000 rest.port = 8083 internal.key.converter = class org.apache.kafka.connect.json.JsonConverter access.control.allow.methods = access.control.allow.origin = offset.storage.file.filename = /tmp/connect.offsets internal.value.converter = class org.apache.kafka.connect.json.JsonConverter value.converter = class org.apache.kafka.connect.json.JsonConverter key.converter = class org.apache.kafka.connect.json.JsonConverter (org.apache.kafka.connect.runtime.standalone.StandaloneConfig:178) [2016-10-17 20:57:03,726] INFO Logging initialized @336ms (org.eclipse.jetty.util.log:186) [2016-10-17 20:57:03,873] INFO Kafka Connect starting (org.apache.kafka.connect.runtime.Connect:52) [2016-10-17 20:57:03,874] INFO Herder starting (org.apache.kafka.connect.runtime.standalone.StandaloneHerder:71) [2016-10-17 20:57:03,874] INFO Worker starting (org.apache.kafka.connect.runtime.Worker:102) [2016-10-17 20:57:03,881] INFO ProducerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS sasl.mechanism = GSSAPI max.block.ms = 9223372036854775807 interceptor.classes = null ssl.truststore.password = null client.id = ssl.endpoint.identification.algorithm = null request.timeout.ms = 2147483647 acks = all receive.buffer.bytes = 32768 ssl.truststore.type = JKS retries = 2147483647 ssl.truststore.location = null ssl.keystore.password = null send.buffer.bytes = 131072 compression.type = none metadata.fetch.timeout.ms = 60000 retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit buffer.memory = 33554432 timeout.ms = 30000 key.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX block.on.buffer.full = false ssl.key.password = null sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 max.in.flight.requests.per.connection = 1 metrics.num.samples = 2 ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] batch.size = 16384 ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT max.request.size = 1048576 value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner linger.ms = 0 (org.apache.kafka.clients.producer.ProducerConfig:178) [2016-10-17 20:57:03,903] INFO ProducerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS sasl.mechanism = GSSAPI max.block.ms = 9223372036854775807 interceptor.classes = null ssl.truststore.password = null client.id = producer-1 ssl.endpoint.identification.algorithm = null request.timeout.ms = 2147483647 acks = all receive.buffer.bytes = 32768 ssl.truststore.type = JKS retries = 2147483647 ssl.truststore.location = null ssl.keystore.password = null send.buffer.bytes = 131072 compression.type = none metadata.fetch.timeout.ms = 60000 retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit buffer.memory = 33554432 timeout.ms = 30000 key.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX block.on.buffer.full = false ssl.key.password = null sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 max.in.flight.requests.per.connection = 1 metrics.num.samples = 2 ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] batch.size = 16384 ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT max.request.size = 1048576 value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner linger.ms = 0 (org.apache.kafka.clients.producer.ProducerConfig:178) [2016-10-17 20:57:03,904] INFO Kafka version : 0.10.0.1 (org.apache.kafka.common.utils.AppInfoParser:83) [2016-10-17 20:57:03,904] INFO Kafka commitId : a7a17cdec9eaa6c5 (org.apache.kafka.common.utils.AppInfoParser:84) [2016-10-17 20:57:03,905] INFO Starting FileOffsetBackingStore with file /tmp/connect.offsets (org.apache.kafka.connect.storage.FileOffsetBackingStore:60) [2016-10-17 20:57:03,951] INFO Worker started (org.apache.kafka.connect.runtime.Worker:124) [2016-10-17 20:57:03,951] INFO Herder started (org.apache.kafka.connect.runtime.standalone.StandaloneHerder:73) [2016-10-17 20:57:03,951] INFO Starting REST server (org.apache.kafka.connect.runtime.rest.RestServer:98) [2016-10-17 20:57:04,026] INFO jetty-9.2.15.v20160210 (org.eclipse.jetty.server.Server:327) Oct 17, 2016 8:57:04 PM org.glassfish.jersey.internal.Errors logErrors WARNING: The following warnings have been detected: WARNING: The (sub)resource method listConnectors in org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource contains empty path annotation. WARNING: The (sub)resource method createConnector in org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource contains empty path annotation. WARNING: The (sub)resource method listConnectorPlugins in org.apache.kafka.connect.runtime.rest.resources.ConnectorPluginsResource contains empty path annotation. WARNING: The (sub)resource method serverInfo in org.apache.kafka.connect.runtime.rest.resources.RootResource contains empty path annotation. [2016-10-17 20:57:04,513] INFO Started o.e.j.s.ServletContextHandler@784b990c{/,null,AVAILABLE} (org.eclipse.jetty.server.handler.ContextHandler:744) [2016-10-17 20:57:04,520] INFO Started ServerConnector@312ab28e{HTTP/1.1}{0.0.0.0:8083} (org.eclipse.jetty.server.ServerConnector:266) [2016-10-17 20:57:04,521] INFO Started @1133ms (org.eclipse.jetty.server.Server:379) [2016-10-17 20:57:04,521] INFO REST server listening at http://127.0.1.1:8083/, advertising URL http://127.0.1.1:8083/ (org.apache.kafka.connect.runtime.rest.RestServer:150) [2016-10-17 20:57:04,521] INFO Kafka Connect started (org.apache.kafka.connect.runtime.Connect:58) [2016-10-17 20:57:04,522] INFO ConnectorConfig values: connector.class = io.debezium.connector.mysql.MySqlConnector tasks.max = 1 name = TestingAgain-connector (org.apache.kafka.connect.runtime.ConnectorConfig:178) [2016-10-17 20:57:04,523] INFO Creating connector TestingAgain-connector of type io.debezium.connector.mysql.MySqlConnector (org.apache.kafka.connect.runtime.Worker:168) [2016-10-17 20:57:04,550] INFO Instantiated connector TestingAgain-connector with version 0.3.2 of type io.debezium.connector.mysql.MySqlConnector (org.apache.kafka.connect.runtime.Worker:176) [2016-10-17 20:57:04,551] INFO Finished creating connector TestingAgain-connector (org.apache.kafka.connect.runtime.Worker:181) [2016-10-17 20:57:04,552] INFO SourceConnectorConfig values: connector.class = io.debezium.connector.mysql.MySqlConnector tasks.max = 1 name = TestingAgain-connector (org.apache.kafka.connect.runtime.SourceConnectorConfig:178) [2016-10-17 20:57:04,556] INFO TaskConfig values: task.class = class io.debezium.connector.mysql.MySqlConnectorTask (org.apache.kafka.connect.runtime.TaskConfig:178) [2016-10-17 20:57:04,557] INFO Creating task TestingAgain-connector-0 (org.apache.kafka.connect.runtime.Worker:315) [2016-10-17 20:57:04,557] INFO Instantiated task TestingAgain-connector-0 with version 0.3.2 of type io.debezium.connector.mysql.MySqlConnectorTask (org.apache.kafka.connect.runtime.Worker:326) [2016-10-17 20:57:04,579] INFO Created connector TestingAgain-connector (org.apache.kafka.connect.cli.ConnectStandalone:91) [2016-10-17 20:57:04,751] INFO KafkaDatabaseHistory Consumer config: {enable.auto.commit=false, value.deserializer=org.apache.kafka.common.serialization.StringDeserializer, group.id=TestingAgain-connector-dbhistory, auto.offset.reset=earliest, session.timeout.ms=30000, bootstrap.servers=192.168.10.40:9092, client.id=TestingAgain-connector-dbhistory, key.deserializer=org.apache.kafka.common.serialization.StringDeserializer, fetch.min.bytes=1} (io.debezium.relational.history.KafkaDatabaseHistory:140) [2016-10-17 20:57:04,752] INFO KafkaDatabaseHistory Producer config: {bootstrap.servers=192.168.10.40:9092, value.serializer=org.apache.kafka.common.serialization.StringSerializer, buffer.memory=1048576, retries=1, key.serializer=org.apache.kafka.common.serialization.StringSerializer, client.id=TestingAgain-connector-dbhistory, linger.ms=0, batch.size=32768, acks=1} (io.debezium.relational.history.KafkaDatabaseHistory:141) [2016-10-17 20:57:04,755] INFO ProducerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS sasl.mechanism = GSSAPI max.block.ms = 60000 interceptor.classes = null ssl.truststore.password = null client.id = TestingAgain-connector-dbhistory ssl.endpoint.identification.algorithm = null request.timeout.ms = 30000 acks = 1 receive.buffer.bytes = 32768 ssl.truststore.type = JKS retries = 1 ssl.truststore.location = null ssl.keystore.password = null send.buffer.bytes = 131072 compression.type = none metadata.fetch.timeout.ms = 60000 retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit buffer.memory = 1048576 timeout.ms = 30000 key.serializer = class org.apache.kafka.common.serialization.StringSerializer sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX block.on.buffer.full = false ssl.key.password = null sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 max.in.flight.requests.per.connection = 5 metrics.num.samples = 2 ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] batch.size = 32768 ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT max.request.size = 1048576 value.serializer = class org.apache.kafka.common.serialization.StringSerializer ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner linger.ms = 0 (org.apache.kafka.clients.producer.ProducerConfig:178) [2016-10-17 20:57:04,767] INFO ProducerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS sasl.mechanism = GSSAPI max.block.ms = 60000 interceptor.classes = null ssl.truststore.password = null client.id = TestingAgain-connector-dbhistory ssl.endpoint.identification.algorithm = null request.timeout.ms = 30000 acks = 1 receive.buffer.bytes = 32768 ssl.truststore.type = JKS retries = 1 ssl.truststore.location = null ssl.keystore.password = null send.buffer.bytes = 131072 compression.type = none metadata.fetch.timeout.ms = 60000 retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit buffer.memory = 1048576 timeout.ms = 30000 key.serializer = class org.apache.kafka.common.serialization.StringSerializer sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX block.on.buffer.full = false ssl.key.password = null sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 max.in.flight.requests.per.connection = 5 metrics.num.samples = 2 ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] batch.size = 32768 ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT max.request.size = 1048576 value.serializer = class org.apache.kafka.common.serialization.StringSerializer ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner linger.ms = 0 (org.apache.kafka.clients.producer.ProducerConfig:178) [2016-10-17 20:57:04,770] INFO Kafka version : 0.10.0.1 (org.apache.kafka.common.utils.AppInfoParser:83) [2016-10-17 20:57:04,771] INFO Kafka commitId : a7a17cdec9eaa6c5 (org.apache.kafka.common.utils.AppInfoParser:84) [2016-10-17 20:57:04,802] INFO Found existing offset: {file=mysql-bin.000024, pos=276, row=0, snapshot=true} (io.debezium.connector.mysql.MySqlConnectorTask:79) [2016-10-17 20:57:05,059] INFO ConsumerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 partition.assignment.strategy = [org.apache.kafka.clients.consumer.RangeAssignor] reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 max.partition.fetch.bytes = 1048576 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS enable.auto.commit = false sasl.mechanism = GSSAPI interceptor.classes = null exclude.internal.topics = true ssl.truststore.password = null client.id = TestingAgain-connector-dbhistory ssl.endpoint.identification.algorithm = null max.poll.records = 2147483647 check.crcs = true request.timeout.ms = 40000 heartbeat.interval.ms = 3000 auto.commit.interval.ms = 5000 receive.buffer.bytes = 65536 ssl.truststore.type = JKS ssl.truststore.location = null ssl.keystore.password = null fetch.min.bytes = 1 send.buffer.bytes = 131072 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer group.id = TestingAgain-connector-dbhistory retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX ssl.key.password = null fetch.max.wait.ms = 500 sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 session.timeout.ms = 30000 metrics.num.samples = 2 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 auto.offset.reset = earliest (org.apache.kafka.clients.consumer.ConsumerConfig:178) [2016-10-17 20:57:05,063] INFO ConsumerConfig values: metric.reporters = [] metadata.max.age.ms = 300000 partition.assignment.strategy = [org.apache.kafka.clients.consumer.RangeAssignor] reconnect.backoff.ms = 50 sasl.kerberos.ticket.renew.window.factor = 0.8 max.partition.fetch.bytes = 1048576 bootstrap.servers = [192.168.10.40:9092] ssl.keystore.type = JKS enable.auto.commit = false sasl.mechanism = GSSAPI interceptor.classes = null exclude.internal.topics = true ssl.truststore.password = null client.id = TestingAgain-connector-dbhistory ssl.endpoint.identification.algorithm = null max.poll.records = 2147483647 check.crcs = true request.timeout.ms = 40000 heartbeat.interval.ms = 3000 auto.commit.interval.ms = 5000 receive.buffer.bytes = 65536 ssl.truststore.type = JKS ssl.truststore.location = null ssl.keystore.password = null fetch.min.bytes = 1 send.buffer.bytes = 131072 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer group.id = TestingAgain-connector-dbhistory retry.backoff.ms = 100 sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 ssl.trustmanager.algorithm = PKIX ssl.key.password = null fetch.max.wait.ms = 500 sasl.kerberos.min.time.before.relogin = 60000 connections.max.idle.ms = 540000 session.timeout.ms = 30000 metrics.num.samples = 2 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer ssl.protocol = TLS ssl.provider = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.keystore.location = null ssl.cipher.suites = null security.protocol = PLAINTEXT ssl.keymanager.algorithm = SunX509 metrics.sample.window.ms = 30000 auto.offset.reset = earliest (org.apache.kafka.clients.consumer.ConsumerConfig:178) [2016-10-17 20:57:05,079] INFO Kafka version : 0.10.0.1 (org.apache.kafka.common.utils.AppInfoParser:83) [2016-10-17 20:57:05,080] INFO Kafka commitId : a7a17cdec9eaa6c5 (org.apache.kafka.common.utils.AppInfoParser:84) [2016-10-17 20:57:05,245] WARN Error while fetching metadata with correlation id 1 : {dbhistory.Cooldb=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient:600) [2016-10-17 20:57:05,246] INFO Discovered coordinator 192.168.10.40:9092 (id: 2147483647 rack: null) for group TestingAgain-connector-dbhistory. (org.apache.kafka.clients.consumer.internals.AbstractCoordinator:505) [2016-10-17 20:57:05,246] INFO Revoking previously assigned partitions [] for group TestingAgain-connector-dbhistory (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:292) [2016-10-17 20:57:05,246] INFO (Re-)joining group TestingAgain-connector-dbhistory (org.apache.kafka.clients.consumer.internals.AbstractCoordinator:326) [2016-10-17 20:57:05,281] INFO Successfully joined group TestingAgain-connector-dbhistory with generation 1 (org.apache.kafka.clients.consumer.internals.AbstractCoordinator:434) [2016-10-17 20:57:05,282] INFO Setting newly assigned partitions [] for group TestingAgain-connector-dbhistory (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:231) [2016-10-17 20:57:05,388] INFO Reflections took 1405 ms to scan 64 urls, producing 3107 keys and 22371 values (org.reflections.Reflections:229) [2016-10-17 20:57:05,691] INFO Prior execution was an incomplete snapshot, so starting new snapshot (io.debezium.connector.mysql.MySqlConnectorTask:94) [2016-10-17 20:57:05,702] INFO binlog_format={}ROW (io.debezium.connector.mysql.MySqlConnectorTask:369) [2016-10-17 20:57:05,727] INFO Source task WorkerSourceTask{id=TestingAgain-connector-0} finished initialization and start (org.apache.kafka.connect.runtime.WorkerSourceTask:138) [2016-10-17 20:57:05,727] INFO Starting snapshot for jdbc:mysql://192.168.10.9:3306/?useInformationSchema=true&nullCatalogMeansCurrent=false&useSSL=false&useUnicode=true&characterEncoding=UTF-8&characterSetResults=UTF-8&zeroDateTimeBehavior=convertToNull with user 'debezium' (io.debezium.connector.mysql.SnapshotReader:153) [2016-10-17 20:57:05,730] INFO Snapshot is using user 'debezium' with these MySQL grants: (io.debezium.connector.mysql.SnapshotReader:514) [2016-10-17 20:57:05,731] INFO GRANT ALL PRIVILEGES ON *.* TO 'debezium'@'%' IDENTIFIED BY PASSWORD '*A33874BB36A7B89FE97C225853E425948AECD613' WITH GRANT OPTION (io.debezium.connector.mysql.SnapshotReader:515) [2016-10-17 20:57:05,731] INFO MySQL server variables related to change data capture: (io.debezium.connector.mysql.SnapshotReader:487) [2016-10-17 20:57:05,739] INFO binlog_cache_size = 32768 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_checksum = CRC32 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_direct_non_transactional_updates = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_error_action = IGNORE_ERROR (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_format = ROW (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_gtid_simple_recovery = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_max_flush_queue_time = 0 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_order_commits = ON (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,740] INFO binlog_row_image = FULL (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO binlog_rows_query_log_events = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO binlog_stmt_cache_size = 32768 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO binlogging_impossible_mode = IGNORE_ERROR (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO character_set_client = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO character_set_connection = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO character_set_database = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,741] INFO character_set_filesystem = binary (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO character_set_results = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO character_set_server = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO character_set_system = utf8 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO character_sets_dir = /mnt/data/mysql/share/charsets/ (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO collation_connection = utf8_general_ci (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO collation_database = utf8_general_ci (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO collation_server = utf8_general_ci (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,742] INFO enforce_gtid_consistency = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO gtid_executed = (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO gtid_mode = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO gtid_next = AUTOMATIC (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO gtid_owned = (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO gtid_purged = (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO innodb_api_enable_binlog = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO innodb_locks_unsafe_for_binlog = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,743] INFO innodb_version = 5.6.33 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO max_binlog_cache_size = 18446744073709547520 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO max_binlog_size = 10485760 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO max_binlog_stmt_cache_size = 18446744073709547520 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO protocol_version = 10 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO simplified_binlog_gtid_recovery = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO slave_type_conversions = (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO sync_binlog = 0 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO tx_isolation = REPEATABLE-READ (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO tx_read_only = OFF (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,744] INFO version = 5.6.33-log (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,745] INFO version_comment = MySQL Community Server (GPL) (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,745] INFO version_compile_machine = x86_64 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,745] INFO version_compile_os = linux-glibc2.5 (io.debezium.connector.mysql.SnapshotReader:490) [2016-10-17 20:57:05,745] INFO Step 0: disabling autocommit and enabling repeatable read transactions (io.debezium.connector.mysql.SnapshotReader:170) [2016-10-17 20:57:05,750] INFO Step 1: start transaction with consistent snapshot (io.debezium.connector.mysql.SnapshotReader:184) [2016-10-17 20:57:05,758] INFO Step 2: flush and obtain global read lock (preventing writes to database) (io.debezium.connector.mysql.SnapshotReader:195) [2016-10-17 20:57:05,759] INFO Step 3: read binlog position of MySQL master (io.debezium.connector.mysql.SnapshotReader:204) [2016-10-17 20:57:05,761] INFO using binlog 'mysql-bin.000024' at position '276' and gtid '' (io.debezium.connector.mysql.SnapshotReader:216) [2016-10-17 20:57:05,762] INFO Step 4: read list of available databases (io.debezium.connector.mysql.SnapshotReader:235) [2016-10-17 20:57:05,767] INFO list of available databases is: [information_schema, NextErpQa, NextErptest, bootstrap, mysql, or_test, performance_schema] (io.debezium.connector.mysql.SnapshotReader:243) [2016-10-17 20:57:05,767] INFO Step 5: read list of available tables in each database (io.debezium.connector.mysql.SnapshotReader:251) [2016-10-17 20:57:05,771] INFO 'information_schema.CHARACTER_SETS' is filtered out, discarding (io.debezium.connector.mysql.SnapshotReader:264) [2016-10-17 20:57:05,877] INFO Step 6: generating DROP and CREATE statements to reflect current database schemas: (io.debezium.connector.mysql.SnapshotReader:275) [2016-10-17 20:57:05,882] INFO SET character_set_server=utf8, collation_server=utf8_general_ci; (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,032] INFO DROP TABLE IF EXISTS NextErptest.test (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,037] INFO DROP TABLE IF EXISTS NextErptest.TEST (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,043] INFO DROP DATABASE IF EXISTS NextErptest (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,046] INFO CREATE DATABASE NextErptest (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,050] INFO USE NextErptest (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,139] INFO CREATE TABLE `TEST` ( `ID` binary(16) DEFAULT NULL, `NAME` varchar(255) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8 (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,199] INFO CREATE TABLE `test` ( `id` int(11) NOT NULL, `name` varchar(20) NOT NULL DEFAULT '0', `city` varchar(20) NOT NULL DEFAULT '0', `mobile` int(11) NOT NULL DEFAULT '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 (io.debezium.connector.mysql.SnapshotReader:547) [2016-10-17 20:57:06,210] INFO Step 7: releasing global read lock to enable MySQL writes (io.debezium.connector.mysql.SnapshotReader:315) [2016-10-17 20:57:06,215] INFO Step 7: blocked writes to MySQL for a total of 00:00:00.455 (io.debezium.connector.mysql.SnapshotReader:320) [2016-10-17 20:57:06,217] INFO Step 8: scanning contents of 2 tables (io.debezium.connector.mysql.SnapshotReader:331) [2016-10-17 20:57:06,640] INFO Step 8: - scanning table 'NextErptest.TEST' (1 of 2 tables) (io.debezium.connector.mysql.SnapshotReader:359) [2016-10-17 20:57:06,646] INFO Step 8: - 1 of 1 rows scanned from table 'NextErptest.TEST' after 00:00:00.006 (io.debezium.connector.mysql.SnapshotReader:377) [2016-10-17 20:57:06,648] INFO Step 8: - scanning table 'NextErptest.test' (2 of 2 tables) (io.debezium.connector.mysql.SnapshotReader:359) [2016-10-17 20:57:06,650] INFO Step 8: - 6 of 6 rows scanned from table 'NextErptest.test' after 00:00:00.002 (io.debezium.connector.mysql.SnapshotReader:377) [2016-10-17 20:57:06,652] INFO Step 8: scanned 7 rows in 2 tables in 00:00:00.434 (io.debezium.connector.mysql.SnapshotReader:402) [2016-10-17 20:57:06,652] INFO Step 9: committing transaction (io.debezium.connector.mysql.SnapshotReader:437) [2016-10-17 20:57:06,654] INFO Completed snapshot in 00:00:00.926 (io.debezium.connector.mysql.SnapshotReader:449) [2016-10-17 20:57:06,964] WARN Error while fetching metadata with correlation id 4 : {Cooldb.NextErptest.TEST=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient:600) [2016-10-17 20:57:07,280] WARN Error while fetching metadata with correlation id 7 : {Cooldb.NextErptest.test=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient:600) Oct 17, 2016 8:57:07 PM com.github.shyiko.mysql.binlog.BinaryLogClient connect INFO: Connected to 192.168.10.9:3306 at mysql-bin.000024/276 (sid:306, cid:6473) [2016-10-17 20:57:07,612] INFO Connected to MySQL binlog at 192.168.10.9:3306, starting at binlog file 'mysql-bin.000024', pos=276, row=0 (io.debezium.connector.mysql.BinlogReader:453) [2016-10-17 20:57:14,568] INFO Finished WorkerSourceTask{id=TestingAgain-connector-0} commitOffsets successfully in 5 ms (org.apache.kafka.connect.runtime.WorkerSourceTask:358)