本文共 16865 字,大约阅读时间需要 56 分钟。
本文基于MongoDB 4.0介绍如何搭建shard集群服务,环境如下表所示:
在三个节点分别创建以下目录:
[root@hdp06 ~]# mkdir -p /data/mongo/{config,router,shard}[root@hdp06 ~]# mkdir -p /data/mongo/config/{data,logs}[root@hdp06 ~]# mkdir -p /data/mongo/router/logs[root@hdp06 ~]# mkdir -p /data/mongo/shard/{data,logs}[root@hdp06 ~]# mkdir -p /data/mongo/shard/data/{shard1,shard2,shard3}[root@hdp06 ~]# chown -R mongod:mongod /data
[root@hdp06 ~]# vi /data/mongo/config/mongodb.config net: bindIp: 0.0.0.0 port: 27017processManagement: fork: "true"replication: replSetName: configRSsharding: clusterRole: configsvrstorage: dbPath: /data/mongo/config/datasystemLog: destination: file path: /data/mongo/config/logs/mongodb.log
[root@hdp06 ~]# scp /data/mongo/config/mongodb.config hdp07:/data/mongo/config[root@hdp06 ~]# scp /data/mongo/config/mongodb.config hdp08:/data/mongo/config
[root@hdp06 ~]# mongod -f /data/mongo/config/mongodb.config [root@hdp07 ~]# mongod -f /data/mongo/config/mongodb.config [root@hdp08 ~]# mongod -f /data/mongo/config/mongodb.config
在一个节点上执行集群初始化操作:
[root@hdp06 ~]# mongo>rs.initiate( { _id: "configRS", configsvr: true, members: [ { _id : 0, host : "hdp06.thinkjoy.tt:27017" }, { _id : 1, host : "hdp07.thinkjoy.tt:27017" }, { _id : 2, host : "hdp08.thinkjoy.tt:27017" } ] })--输出结果如下{ "ok" : 1, "operationTime" : Timestamp(1534816254, 1), "$gleStats" : { "lastOpTime" : Timestamp(1534816254, 1), "electionId" : ObjectId("000000000000000000000000") }, "lastCommittedOpTime" : Timestamp(0, 0), "$clusterTime" : { "clusterTime" : Timestamp(1534816254, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}--其他节点验证[root@hdp07 ~]# mongo......configRS:SECONDARY> rs.status(){ "set" : "configRS", "date" : ISODate("2018-08-21T01:52:59.734Z"), "myState" : 2, "term" : NumberLong(1), "syncingTo" : "hdp06.thinkjoy.tt:27017", "syncSourceHost" : "hdp06.thinkjoy.tt:27017", "syncSourceId" : 0, "configsvr" : true, "heartbeatIntervalMillis" : NumberLong(2000), "optimes" : { "lastCommittedOpTime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "readConcernMajorityOpTime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "appliedOpTime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "durableOpTime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) } }, "lastStableCheckpointTimestamp" : Timestamp(1534816327, 1), "members" : [ { "_id" : 0, "name" : "hdp06.thinkjoy.tt:27017", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 115, "optime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "optimeDurable" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-08-21T01:52:44Z"), "optimeDurableDate" : ISODate("2018-08-21T01:52:44Z"), "lastHeartbeat" : ISODate("2018-08-21T01:52:57.945Z"), "lastHeartbeatRecv" : ISODate("2018-08-21T01:52:59.093Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "", "syncingTo" : "", "syncSourceHost" : "", "syncSourceId" : -1, "infoMessage" : "", "electionTime" : Timestamp(1534816265, 1), "electionDate" : ISODate("2018-08-21T01:51:05Z"), "configVersion" : 1 }, { "_id" : 1, "name" : "hdp07.thinkjoy.tt:27017", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 320, "optime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-08-21T01:52:44Z"), "syncingTo" : "hdp06.thinkjoy.tt:27017", "syncSourceHost" : "hdp06.thinkjoy.tt:27017", "syncSourceId" : 0, "infoMessage" : "", "configVersion" : 1, "self" : true, "lastHeartbeatMessage" : "" }, { "_id" : 2, "name" : "hdp08.thinkjoy.tt:27017", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 115, "optime" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "optimeDurable" : { "ts" : Timestamp(1534816364, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-08-21T01:52:44Z"), "optimeDurableDate" : ISODate("2018-08-21T01:52:44Z"), "lastHeartbeat" : ISODate("2018-08-21T01:52:57.945Z"), "lastHeartbeatRecv" : ISODate("2018-08-21T01:52:57.944Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "", "syncingTo" : "hdp06.thinkjoy.tt:27017", "syncSourceHost" : "hdp06.thinkjoy.tt:27017", "syncSourceId" : 0, "infoMessage" : "", "configVersion" : 1 } ], "ok" : 1, "operationTime" : Timestamp(1534816364, 1), "$gleStats" : { "lastOpTime" : Timestamp(0, 0), "electionId" : ObjectId("000000000000000000000000") }, "lastCommittedOpTime" : Timestamp(1534816364, 1), "$clusterTime" : { "clusterTime" : Timestamp(1534816364, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}
root@hdp06 ~]# vi /data/mongo/shard/data/shard1/mongod.shard1net: bindIp: 0.0.0.0 port: 27019processManagement: fork: "true"replication: replSetName: myShard_1sharding: clusterRole: shardsvrstorage: dbPath: /data/mongo/shard/data/shard1systemLog: destination: file path: /data/mongo/shard/logs/shard1/mongodbs1.log[root@hdp06 ~]# vi /data/mongo/shard/data/shard2/mongod.shard2net: bindIp: 0.0.0.0 port: 27020processManagement: fork: "true"replication: replSetName: myShard_2sharding: clusterRole: shardsvrstorage: dbPath: /data/mongo/shard/data/shard2systemLog: destination: file path: /data/mongo/shard/logs/shard2/mongodbs2.log[root@hdp06 ~]# vi /data/mongo/shard/data/shard3/mongod.shard3net: bindIp: 0.0.0.0 port: 27021processManagement: fork: "true"replication: replSetName: myShard_3sharding: clusterRole: shardsvrstorage: dbPath: /data/mongo/shard/data/shard3systemLog: destination: file path: /data/mongo/shard/logs/shard3/mongodbs3.log
[root@hdp06 ~]# scp -r /data/mongo/shard/data/* hdp07:/data/mongo/shard/data/[root@hdp06 ~]# scp -r /data/mongo/shard/data/* hdp08:/data/mongo/shard/data/
[root@hdp06 ~]# mongod -f /data/mongo/shard/data/shard1/mongod.shard1[root@hdp06 ~]# mongod -f /data/mongo/shard/data/shard2/mongod.shard2[root@hdp06 ~]# mongod -f /data/mongo/shard/data/shard3/mongod.shard3[root@hdp07 ~]# mongod -f /data/mongo/shard/data/shard1/mongod.shard1[root@hdp07 ~]# mongod -f /data/mongo/shard/data/shard2/mongod.shard2[root@hdp07 ~]# mongod -f /data/mongo/shard/data/shard3/mongod.shard3[root@hdp08 ~]# mongod -f /data/mongo/shard/data/shard1/mongod.shard1[root@hdp08 ~]# mongod -f /data/mongo/shard/data/shard2/mongod.shard2[root@hdp08 ~]# mongod -f /data/mongo/shard/data/shard3/mongod.shard3
在任意一个节点执行初始化服务。
[root@hdp06 ~]# mongod --port 27019> rs.initiate({_id:"myShard_1",members:[{_id:1,host:"hdp06.thinkjoy.tt:27019",priority:2},{_id:2,host:"hdp07.thinkjoy.tt:27019"},{_id:3,host:"hdp08.thinkjoy.tt:27019"}]}){ "ok" : 1, "operationTime" : Timestamp(1534818896, 1), "$clusterTime" : { "clusterTime" : Timestamp(1534818896, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}[root@hdp06 ~]# mongod --port 27020> rs.initiate({_id:"myShard_2",members:[{_id:1,host:"hdp06.thinkjoy.tt:27020",priority:2},{_id:2,host:"hdp07.thinkjoy.tt:27020"},{_id:3,host:"hdp08.thinkjoy.tt:27020"}]}){ "operationTime" : Timestamp(1534818908, 2), "ok" : 0, "errmsg" : "already initialized", "code" : 23, "codeName" : "AlreadyInitialized", "$clusterTime" : { "clusterTime" : Timestamp(1534818908, 2), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}[root@hdp06 ~]# mongod --port 27021> rs.initiate({_id:"myShard_3",members:[{_id:1,host:"hdp06.thinkjoy.tt:27021",priority:2},{_id:2,host:"hdp07.thinkjoy.tt:27021"},{_id:3,host:"hdp08.thinkjoy.tt:27021"}]}){ "operationTime" : Timestamp(1534818928, 1), "ok" : 0, "errmsg" : "already initialized", "code" : 23, "codeName" : "AlreadyInitialized", "$clusterTime" : { "clusterTime" : Timestamp(1534818928, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}
[root@hdp06 ~]# vi /data/mongo/router/mongod.routernet: bindIp: 0.0.0.0 port: 27018processManagement: fork: "true"sharding: configDB: configRS/hdp06.thinkjoy.tt:27017,hdp07.thinkjoy.tt:27017,hdp08.thinkjoy.tt:27017systemLog: destination: file path: /data/mongo/router/logs/mongo_router.log[root@hdp06 ~]# scp /data/mongo/router/mongod.router hdp07:/data/mongo/router[root@hdp06 ~]# scp /data/mongo/router/mongod.router hdp08:/data/mongo/router[root@hdp06 ~]# mongos -f /data/mongo/router/mongod.router[root@hdp07 ~]# mongos -f /data/mongo/router/mongod.router[root@hdp08 ~]# mongos -f /data/mongo/router/mongod.router
配置分片,将主片添加至集群,如下:
[root@hdp06 ~]# mongo --port 27018mongos> show dbsadmin 0.000GBconfig 0.000GBmongos> use adminswitched to db adminmongos> db.runCommand({"addShard":"myShard_1/hdp06.thinkjoy.tt:27019" ,"maxsize":1024}){ "shardAdded" : "myShard_1", "ok" : 1, "operationTime" : Timestamp(1534819816, 6), "$clusterTime" : { "clusterTime" : Timestamp(1534819816, 6), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}mongos> db.runCommand({"addShard":"myShard_2/hdp06.thinkjoy.tt:27020" ,"maxsize":1024}){ "shardAdded" : "myShard_2", "ok" : 1, "operationTime" : Timestamp(1534819823, 5), "$clusterTime" : { "clusterTime" : Timestamp(1534819823, 5), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}mongos> db.runCommand({"addShard":"myShard_3/hdp06.thinkjoy.tt:27021" ,"maxsize":1024}){ "shardAdded" : "myShard_3", "ok" : 1, "operationTime" : Timestamp(1534819830, 5), "$clusterTime" : { "clusterTime" : Timestamp(1534819830, 5), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }
Shard cluster状态查询:
mongos> sh.status()--- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("5b7b700b1cfaf6f26d2d0284") } shards: { "_id" : "myShard_1", "host" : "myShard_1/hdp06.thinkjoy.tt:27019,hdp07.thinkjoy.tt:27019,hdp08.thinkjoy.tt:27019", "state" : 1 } { "_id" : "myShard_2", "host" : "myShard_2/hdp06.thinkjoy.tt:27020,hdp07.thinkjoy.tt:27020,hdp08.thinkjoy.tt:27020", "state" : 1 } { "_id" : "myShard_3", "host" : "myShard_3/hdp06.thinkjoy.tt:27021,hdp07.thinkjoy.tt:27021,hdp08.thinkjoy.tt:27021", "state" : 1 } active mongoses: "4.0.1" : 3 autosplit: Currently enabled: yes balancer: Currently enabled: yes Currently running: no Failed balancer rounds in last 5 attempts: 0 Migration Results for the last 24 hours: No recent migrations databases: { "_id" : "MyDB", "primary" : "myShard_2", "partitioned" : true, "version" : { "uuid" : UUID("202a1d72-aa92-403d-a2a6-c7c3aa273323"), "lastMod" : 1 } } MyDB.chapter shard key: { "id" : 1, "subjectId" : 1 } unique: false balancing: true chunks: myShard_2 1 { "id" : { "$minKey" : 1 }, "subjectId" : { "$minKey" : 1 } } -->> { "id" : { "$maxKey" : 1 }, "subjectId" : { "$maxKey" : 1 } } on : myShard_2 Timestamp(1, 0) MyDB.question_knowledge_basic_id shard key: { "knowledge_basic_id" : 1, "question_id" : 1 } unique: false balancing: true chunks: myShard_2 1 { "knowledge_basic_id" : { "$minKey" : 1 }, "question_id" : { "$minKey" : 1 } } -->> { "knowledge_basic_id" : { "$maxKey" : 1 }, "question_id" : { "$maxKey" : 1 } } on : myShard_2 Timestamp(1, 0) { "_id" : "config", "primary" : "config", "partitioned" : true } config.system.sessions shard key: { "_id" : 1 } unique: false balancing: true chunks: myShard_1 1 { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : myShard_1 Timestamp(1, 0)
这里创建了一个mydb库,使用以下命令对其启用分片功能。
mongos>sh.enableSharding("MyDB"){ "ok" : 1, "operationTime" : Timestamp(1534837173, 2214), "$clusterTime" : { "clusterTime" : Timestamp(1534837173, 2408), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}
下面对mydb的chapter启用分片技术:
--创建一个索引mongos> use MyDBswitched to db MyDBmongos> db.chapter.createIndex({"id" : 1,"subjectId" : 1},{"name" : "new_idx"})--对表进行分片mongos> sh.shardCollection('MyDB.chapter',{id:1,subjectId:1}){ "collectionsharded" : "MyDB.chapter", "collectionUUID" : UUID("a5f3b95c-ced6-4ae5-82d8-06d36176cbef"), "ok" : 1, "operationTime" : Timestamp(1535077873, 13), "$clusterTime" : { "clusterTime" : Timestamp(1535077873, 13), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }}
针对MongoDB的监控除了使用第三方软件外,强烈推荐是官方提供的Ops Manager,如下图所示:
转载于:https://blog.51cto.com/candon123/2163736