[mysqld] datadir=/var/lib/mysql/data/ socket=/var/lib/mysql/mysql.sock transaction-isolation = READ-COMMITTED # Disabling symbolic-links is recommended to prevent assorted security risks; # to do so, uncomment this line: #lower_case_table_names=1 symbolic-links = 0 key_buffer_size = 32M max_allowed_packet = 32M thread_stack = 256K thread_cache_size = 64 #query_cache_limit = 8M #query_cache_size = 64M #query_cache_type = 1 max_connections = 550 #expire_logs_days = 10 #max_binlog_size = 100M #log_bin should be on a disk with enough free space. #Replace '/var/lib/mysql/mysql_binary_log' with an appropriate path for your #system and chown the specified folder to the mysql user. log_bin=/var/lib/mysql/binlog/mysql-bin #In later versions of MySQL, if you enable the binary log and do not set #a server_id, MySQL will not start. The server_id must be unique within #the replicating group. server_id=11 binlog_format = mixed read_buffer_size = 2M read_rnd_buffer_size = 16M sort_buffer_size = 8M join_buffer_size = 8M # InnoDB settings innodb_file_per_table = 1 innodb_flush_log_at_trx_commit = 2 innodb_log_buffer_size = 64M innodb_buffer_pool_size = 4G innodb_thread_concurrency = 8 innodb_flush_method = O_DIRECT innodb_log_file_size = 512M [mysqld_safe] prompt=mysql5729_db01 [\\d]> sql_mode=STRICT_ALL_TABLES socket=/var/lib/mysql/mysql.sock
Failed to add storage directory [DISK]file:/data1/dfs/dn java.io.IOException: Incompatible clusterIDs in /data1/dfs/dn: namenodeclusterID= cluster8; datanodeclusterID= cluster7 at org.apache.hadoop.hdfs.server.datanode.DataStorage.doTransition(DataStorage.java:722) at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadStorageDirectory(DataStorage.java:286) at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadDataStorage(DataStorage.java:399) at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:379) at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:544) at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1740) at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1676) at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:390) at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:282) at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:822) at java.lang.Thread.run(Thread.java:748)
已启用透明大页面压缩,可能会导致重大性能问题。请运行“echo never > /sys/kernel/mm/transparent_hugepage/defrag”和“echo never > /sys/kernel/mm/transparent_hugepage/enabled”以禁用此设置,然后将同一命令添加到 /etc/rc.local 等初始化脚本中, 以便在系统重启时予以设置。以下主机将受到影响:
解决方式
1 2
echo never > /sys/kernel/mm/transparent_hugepage/defrag echo never > /sys/kernel/mm/transparent_hugepage/enabled
Fatal error during KafkaServer startup. Prepare to shutdown kafka.common.InconsistentBrokerIdException: Configured broker.id 56 doesn't match stored broker.id 102 in meta.properties. If you moved your data, make sure your configured broker.id matches. If you intend to create a new broker, you should remove all data in your data directories (log.dirs). at kafka.server.KafkaServer.getBrokerIdAndOfflineDirs(KafkaServer.scala:707) at kafka.server.KafkaServer.startup(KafkaServer.scala:212) at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:42) at kafka.Kafka$.main(Kafka.scala:75) at com.cloudera.kafka.wrap.Kafka$$anonfun$1.apply(Kafka.scala:92) at com.cloudera.kafka.wrap.Kafka$$anonfun$1.apply(Kafka.scala:92) at com.cloudera.kafka.wrap.Kafka$.runMain(Kafka.scala:103) at com.cloudera.kafka.wrap.Kafka$.main(Kafka.scala:95) at com.cloudera.kafka.wrap.Kafka.main(Kafka.scala)
2021-01-2214:42:14,126 ERROR ParcelUpdateService:com.cloudera.parcel.components.ParcelDownloaderImpl: (11 skipped) Unable to retrieve remote parcel repository manifest java.util.concurrent.ExecutionException: java.net.UnknownHostException: archive.cloudera.com: 未知的名称或服务 at com.ning.http.client.providers.netty.future.NettyResponseFuture.abort(NettyResponseFuture.java:231) at com.ning.http.client.providers.netty.request.NettyRequestSender.abort(NettyRequestSender.java:422) at com.ning.http.client.providers.netty.request.NettyRequestSender.sendRequestWithNewChannel(NettyRequestSender.java:290) at com.ning.http.client.providers.netty.request.NettyRequestSender.sendRequestWithCertainForceConnect(NettyRequestSender.java:142) at com.ning.http.client.providers.netty.request.NettyRequestSender.sendRequest(NettyRequestSender.java:117) at com.ning.http.client.providers.netty.NettyAsyncHttpProvider.execute(NettyAsyncHttpProvider.java:87) at com.ning.http.client.AsyncHttpClient.executeRequest(AsyncHttpClient.java:506) at com.ning.http.client.AsyncHttpClient$BoundRequestBuilder.execute(AsyncHttpClient.java:229) at com.cloudera.parcel.components.ParcelDownloaderImpl.getRepositoryInfoFuture(ParcelDownloaderImpl.java:592) at com.cloudera.parcel.components.ParcelDownloaderImpl.getRepositoryInfo(ParcelDownloaderImpl.java:544) at com.cloudera.parcel.components.ParcelDownloaderImpl.syncRemoteRepos(ParcelDownloaderImpl.java:357) at com.cloudera.parcel.components.ParcelDownloaderImpl$1.run(ParcelDownloaderImpl.java:464) at com.cloudera.parcel.components.ParcelDownloaderImpl$1.run(ParcelDownloaderImpl.java:459) at com.cloudera.cmf.persist.ReadWriteDatabaseTaskCallable.call(ReadWriteDatabaseTaskCallable.java:36) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.net.UnknownHostException: archive.cloudera.com: 未知的名称或服务 at java.net.Inet4AddressImpl.lookupAllHostAddr(Native Method) at java.net.InetAddress$2.lookupAllHostAddr(InetAddress.java:928) at java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1323) at java.net.InetAddress.getAllByName0(InetAddress.java:1276) at java.net.InetAddress.getAllByName(InetAddress.java:1192) at java.net.InetAddress.getAllByName(InetAddress.java:1126) at java.net.InetAddress.getByName(InetAddress.java:1076) at com.ning.http.client.NameResolver$JdkNameResolver.resolve(NameResolver.java:28) at com.ning.http.client.providers.netty.request.NettyRequestSender.remoteAddress(NettyRequestSender.java:358) at com.ning.http.client.providers.netty.request.NettyRequestSender.connect(NettyRequestSender.java:369) at com.ning.http.client.providers.netty.request.NettyRequestSender.sendRequestWithNewChannel(NettyRequestSender.java:283) ... 15 more
Failed to add storage directory [DISK]file:/data1/dfs/dn java.io.IOException: Incompatible clusterIDs in /data1/dfs/dn: namenodeclusterID= cluster8; datanodeclusterID= cluster7 at org.apache.hadoop.hdfs.server.datanode.DataStorage.doTransition(DataStorage.java:722) at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadStorageDirectory(DataStorage.java:286) at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadDataStorage(DataStorage.java:399) at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:379) at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:544) at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1740) at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1676) at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:390) at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:282) at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:822) at java.lang.Thread.run(Thread.java:748)