扫二维码与项目经理沟通
我们在微信上24小时期待你的声音
解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流
在大悟等地区,都构建了全面的区域性战略布局,加强发展的系统性、市场前瞻性、产品创新能力,以专注、极致的服务理念,为客户提供网站建设、网站设计 网站设计制作按需开发网站,公司网站建设,企业网站建设,成都品牌网站建设,全网营销推广,成都外贸网站建设,大悟网站建设费用合理。
[mongo@vq12stmsg01 /app/mongo]
$top
top - 13:00:41 up 2 days, 21:35, 3 users, load average: 0.31, 0.23, 0.23
Tasks: 250 total, 1 running, 249 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.7 us, 1.7 sy, 0.0 ni, 97.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 5.0 us, 7.3 sy, 0.0 ni, 87.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 1.0 us, 1.7 sy, 0.0 ni, 97.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 0.7 us, 1.0 sy, 0.0 ni, 98.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu4 : 5.6 us, 7.3 sy, 0.0 ni, 87.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu5 : 0.0 us, 0.7 sy, 0.0 ni, 99.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu6 : 0.3 us, 1.0 sy, 0.0 ni, 98.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu7 : 5.3 us, 7.9 sy, 0.0 ni, 86.8 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu8 : 0.3 us, 1.0 sy, 0.0 ni, 98.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu9 : 1.0 us, 2.0 sy, 0.0 ni, 97.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu10 : 5.3 us, 7.0 sy, 0.0 ni, 87.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu11 : 1.3 us, 2.0 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem: 41038580 total, 1805648 used, 39232932 free, 319880 buffers
KiB Swap: 0 total, 0 used, 0 free. 469688 cached Mem
[mongo@vq12stmsg01 /app/mongo]
$free -m
total used free shared buffers cached
Mem: 40076 1762 38314 8 312 458
-/+ buffers/cache: 991 39085
Swap: 0 0 0
[mongo@vq12stmsg01 /app/mongo]
$df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg00-lv_root 19G 3.5G 14G 21% /
devtmpfs 20G 0 20G 0% /dev
tmpfs 20G 80K 20G 1% /dev/shm
tmpfs 20G 8.9M 20G 1% /run
tmpfs 20G 0 20G 0% /sys/fs/cgroup
/dev/sda1 969M 95M 809M 11% /boot
/dev/mapper/vg00-lv_data 40G 1.3G 80G 4% /data
/dev/mapper/vg00-lv_app 21G 328M 19G 2% /app
echo -ne "
10.78.200.105 vq12stmsg01
10.78.200.106 vq12stmsg02
10.78.200.107 vq12stmsg03
" >>/etc/hosts
mkdir -p /app/logs/
105:
mkdir -p /data/mdb/{mdb1_1,mdb1_1/repair,mdb2_2,mdb2_2/repair,mdb3_3,mdb3_3/repair}
mkdir -p /data/configdb/configdb1_1/repair
106:
mkdir -p /data/mdb/{mdb1_3,mdb1_3/repair,mdb2_1,mdb2_1/repair,mdb3_2,mdb3_2/repair}
mkdir -p /data/configdb/configdb2_1/repair
107:
mkdir -p /data/mdb/{mdb1_2,mdb1_2/repair,mdb2_3,mdb2_3/repair,mdb3_1,mdb3_1/repair}
mkdir -p /data/configdb/configdb3_1/repair
systemctl stop firewalld.service
systemctl disable firewalld.service
openssl rand -base64 741 > /app/conf/keyfile
chmod 600 /app/MongoDB/conf/keyfile
copy keyfile文件到各个节点
主机环境变量及参数,参考
$cat /etc/rc.local
.....
#####MongoDB#####
sleep 2
blockdev --setra 32 /dev/mapper/vg00-lv_data
sleep 2
echo '512' > /sys/block/sdb/queue/nr_requests
sleep 2
echo 0 > /proc/sys/vm/zone_reclaim_mode
#####blockdev --report /dev/mapper/vg00-lv_data
#chown mongo:dba -R /app/
#chown mongo:dba -R /data/
#####MongoDB#####
$cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Wed Mar 16 06:50:02 2016
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/vg00-lv_root / ext3 defaults 1 1
/dev/mapper/vg00-lv_app /app ext3 defaults 1 2
UUID=4f86d0d2-a559-4f07-8a91-a605e0005f5c /boot ext3 defaults 1 2
/dev/mapper/vg00-lv_data /data ext4 noatime 0 0
$cat /etc/security/limits.conf
mongo soft nofile 65535
mongo hard nofile 65535
mongo soft nproc 65535
mongo hard nproc 65535
关闭透明页
https://docs.mongodb.org/manual/tutorial/transparent-huge-pages/
https://docs.mongodb.org/manual/administration/production-checklist/
[mongo@vq12stmsg01 /app/mongo]
$cat .bash_profile
...
#####add by mongoDB#####
export LANG=en_US
export PATH=$PATH:/app/mongodb_3_2_4/bin
set -o vi
stty erase ^H
umask 022
export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
alias 'l=ls -altr'
alias 'cdm=cd /app/mongodb_3_2_4'
alias 'cdl=cd /app/logs'
alias 'cdc=cd /app/conf'
alias 'cddb=cd /data/'
alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos1.conf'
alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_1.conf'
alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_1.conf'
alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_2.conf'
alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_3.conf'
alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
#####add by mongoDB#####
[mongo@vq12stmsg01 /app/conf]
$cat configsvr1_1.conf
systemLog:
destination: file
path: "/app/logs/configsvr1_1.log"
logAppend: true
storage:
dbPath: "/data/configdb/configdb1_1"
repairPath: "/data/configdb/configdb1_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.105
port: 20000
sharding:
clusterRole: configsvr
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
$cat mongos1.conf
systemLog:
destination: file
path: /app/logs/mongos1.log
logAppend: true
net:
bindIp: 127.0.0.1,10.78.200.105
port: 10000
processManagement:
fork: true
replication:
localPingThresholdMs: 15
sharding:
configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
chunkSize: 64
security:
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
$cat shardsvr1_1.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr1_1.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb1_1/"
repairPath: "/data/mdb/mdb1_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.105
port: 30001
replication:
oplogSizeMB: 10240
replSetName: pns1
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
$cat shardsvr2_2.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr2_2.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb2_2/"
repairPath: "/data/mdb/mdb2_2/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.105
port: 30002
replication:
oplogSizeMB: 10240
replSetName: pns2
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
$cat shardsvr3_3.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr3_3.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb3_3/"
repairPath: "/data/mdb/mdb3_3/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.105
port: 30003
replication:
oplogSizeMB: 10240
replSetName: pns3
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
106主机环境变量及参数:
#####add by mongoDB#####
export LANG=en_US
export PATH=$PATH:/app/mongodb_3_2_4/bin
set -o vi
stty erase ^H
umask 022
export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
alias 'l=ls -altr'
alias 'cdm=cd /app/mongodb_3_2_4'
alias 'cdl=cd /app/logs'
alias 'cdc=cd /app/conf'
alias 'cddb=cd /data/'
alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos2.conf'
alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_2.conf'
alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_3.conf'
alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_1.conf'
alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_2.conf'
alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
#####add by mongoDB#####
[root@vq12stmsg02 conf]# ll
total 24
-rw-r--r-- 1 mongo dba 859 Mar 24 15:47 configsvr1_2.conf
-rw------- 1 mongo dba 1004 Mar 24 11:02 keyfile
-rw-r--r-- 1 mongo dba 527 Mar 24 15:40 mongos2.conf
-rw-r--r-- 1 mongo dba 930 Mar 24 15:47 shardsvr1_3.conf
-rw-r--r-- 1 mongo dba 928 Mar 24 15:48 shardsvr2_1.conf
-rw-r--r-- 1 mongo dba 930 Mar 24 15:48 shardsvr3_2.conf
[root@vq12stmsg02 conf]# cat configsvr1_2.conf
systemLog:
destination: file
path: "/app/logs/configsvr2_1.log"
logAppend: true
storage:
dbPath: "/data/configdb/configdb2_1"
repairPath: "/data/configdb/configdb2_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.106
port: 20000
sharding:
clusterRole: configsvr
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
[root@vq12stmsg02 conf]# cat mongos2.conf
systemLog:
destination: file
path: /app/logs/mongos2.log
logAppend: true
net:
bindIp: 127.0.0.1,10.78.200.106
port: 10000
processManagement:
fork: true
replication:
localPingThresholdMs: 15
sharding:
configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
chunkSize: 64
security:
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
[root@vq12stmsg02 conf]# cat shardsvr1_3.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr1_3.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb1_3/"
repairPath: "/data/mdb/mdb1_3/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.106
port: 30003
replication:
oplogSizeMB: 10240
replSetName: pns1
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
[root@vq12stmsg02 conf]# cat shardsvr2_1.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr2_1.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb2_1/"
repairPath: "/data/mdb/mdb2_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.106
port: 30001
replication:
oplogSizeMB: 10240
replSetName: pns2
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
[root@vq12stmsg02 conf]# cat shardsvr3_2.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr3_2.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb3_2/"
repairPath: "/data/mdb/mdb3_2/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.106
port: 30002
replication:
oplogSizeMB: 10240
replSetName: pns3
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
[root@vq12stmsg02 conf]#
107主机环境变量及参数:
#####add by mongoDB#####
export LANG=en_US
export PATH=$PATH:/app/mongodb_3_2_4/bin
set -o vi
stty erase ^H
umask 022
export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
alias 'l=ls -altr'
alias 'cdm=cd /app/mongodb_3_2_4'
alias 'cdl=cd /app/logs'
alias 'cdc=cd /app/conf'
alias 'cddb=cd /data/'
alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos3.conf'
alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_3.conf'
alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_2.conf'
alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_3.conf'
alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_1.conf'
alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
#####add by mongoDB#####
[root@vq12stmsg03 conf]# ll
total 24
-rw-r--r-- 1 mongo dba 861 Mar 24 15:45 configsvr1_3.conf
-rw------- 1 mongo dba 1004 Mar 24 11:02 keyfile
-rw-r--r-- 1 mongo dba 526 Mar 24 15:46 mongos3.conf
-rw-r--r-- 1 mongo dba 930 Mar 24 15:46 shardsvr1_2.conf
-rw-r--r-- 1 mongo dba 930 Mar 24 15:47 shardsvr2_3.conf
-rw-r--r-- 1 mongo dba 930 Mar 24 15:47 shardsvr3_1.conf
[root@vq12stmsg03 conf]# cat configsvr1_3.conf
systemLog:
destination: file
path: "/app/logs/configsvr3_1.log"
logAppend: true
storage:
dbPath: "/data/configdb/configdb3_1"
repairPath: "/data/configdb/configdb3_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.107
port: 20000
sharding:
clusterRole: configsvr
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
[root@vq12stmsg03 conf]# cat mongos3.conf
systemLog:
destination: file
path: /app/logs/mongos3.log
logAppend: true
net:
bindIp: 127.0.0.1,10.78.200.107
port: 10000
processManagement:
fork: true
replication:
localPingThresholdMs: 15
sharding:
configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
chunkSize: 64
security:
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 5000
[root@vq12stmsg03 conf]# cat shardsvr1_2.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr1_2.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb1_2/"
repairPath: "/data/mdb/mdb1_2/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.107
port: 30002
replication:
oplogSizeMB: 10240
replSetName: pns1
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
[root@vq12stmsg03 conf]# cat shardsvr2_3.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr2_3.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb2_3/"
repairPath: "/data/mdb/mdb2_3/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.107
port: 30003
replication:
oplogSizeMB: 10240
replSetName: pns2
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
[root@vq12stmsg03 conf]# cat shardsvr3_1.conf
systemLog:
destination: file
path: "/app/logs/sharedsvr3_1.log"
logAppend: true
storage:
dbPath: "/data/mdb/mdb3_1/"
repairPath: "/data/mdb/mdb3_1/repair"
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 10
journalCompressor: snappy
directoryForIndexes: true
collectionConfig:
blockCompressor: snappy
indexConfig:
prefixCompression: true
processManagement:
fork: true
net:
bindIp: 127.0.0.1,10.78.200.107
port: 30001
replication:
oplogSizeMB: 10240
replSetName: pns3
sharding:
clusterRole: shardsvr
archiveMovedChunks: false
security:
authorization: disabled
clusterAuthMode: keyFile
keyFile: /app/conf/keyfile
setParameter:
enableLocalhostAuthBypass: true
authenticationMechanisms: SCRAM-SHA-1
connPoolMaxShardedConnsPerHost: 200
connPoolMaxConnsPerHost: 15000
分片/副本集配置:
mongo 127.0.0.1:30001
rs.initiate()
rs.add("10.78.200.107:30002")
rs.add("10.78.200.106:30003")
rs.conf()
mongo 127.0.0.1:30001
rs.initiate()
rs.add("10.78.200.105:30002")
rs.add("10.78.200.107:30003")
rs.conf()
mongo 127.0.0.1:30001
rs.initiate()
rs.add("10.78.200.106:30002")
rs.add("10.78.200.105:30003")
rs.conf()
mongo 127.0.0.1:10000
use admin
sh.addShard( "pns1/10.78.200.105:30001,10.78.200.107:30002,10.78.200.106:30003" )
sh.addShard( "pns2/10.78.200.106:30001,10.78.200.105:30002,10.78.200.107:30003" )
sh.addShard( "pns3/10.78.200.107:30001,10.78.200.106:30002,10.78.200.105:30003" )
db.runCommand({listshards:1})
用户及权限:
use admin
db.createUser(
{
user: "admin",
pwd: "xxxxxx",
roles: [ { role: "root", db: "admin" },{ role: "clusterManager", db: "admin" } ,{ role: "clusterMonitor", db: "admin" },{ role: "hostManager", db: "admin" } ]
}
)
db.updateUser( "admin",
{
roles: [ { role: "root", db: "admin" },{ role: "dbAdminAnyDatabase", db: "admin" },{"role" : "readWriteAnyDatabase",db: "admin" }, { role: "userAdminAnyDatabase", db: "admin" },{ role: "clusterManager", db: "admin" } ,{ role: "clusterMonitor", db: "admin" },{ role: "hostManager", db: "admin" } ]
}
);
mongo 10.78.200.107:10000/admin -u admin -p xxxxxx
--db.auth("admin","xxxxxx")
mongo 10.78.200.107:10000/mdb -u pns -p xxxxxx
mongostat -h 10.78.200.107:30001 -u admin -p xxxxxx --discover --authenticationDatabase admin 1
mongotop -h 10.78.200.107:30001 -u admin -p xxxxxx --authenticationDatabase admin
mongo 10.78.200.107:30001 -u pns -p xxxxxx --eval "printjson(db.printSlaveReplicationInfo())"
mongo 127.0.0.1:10000
use admin
db.runCommand({"enablesharding":"mdb"})
db.runCommand({"shardcollection":"mdb.mcUser293","key":{"_id":"hashed"}})
db.runCommand({shardcollection:"mdb.mcUser293", key:{_id:1}})
use mdb
for(var i=1;i<=20000;i++) db.test.insert({id:i,addr_1:"Beijing",addr_2:"Shanghai"});
db.test.stats()
db.test.drop()
我们在微信上24小时期待你的声音
解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流