MongoDB分片集群搭建

环境

  • CentOS 7.5

  • MongoDB 4.0.1

  • shard分片主机:

    • shard1:IP:192.168.50.211
    • shard2:IP:192.168.50.212
    • shard3:IP:192.168.50.213
      三台主机分别启动三个mongod实例:
      • mongod1:端口:27017
      • mongod2:端口:27018
      • mongod3:端口:27019
  • configsrv主机:

    • IP:192.168.50.214
      • mongod1:端口:27019
      • mongod2:端口:37018
      • mongod3:端口:47019
  • route主机:192.168.50.215

    • mongods:端口:27017

准备工作

  1. 在所有节点安装mongodb-4 并创建相关文件夹
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat << EOF > /etc/yum.repos.d/mongodb.repo
[mongodb-org-4.0]
name=MongoDB 4.0 Repository
baseurl=https://mirrors.aliyun.com/mongodb/yum/redhat/\$releasever/mongodb-org/4.0/\$basearch/
gpgcheck=0
enabled=1
EOF

yum install -y mongodb-org

mkdir -p /var/run/mongodb
mkdir -p /data/mongod{1..3}
mkdir -p /etc/mongo
mkdir -p /tmp/mongod{1..3}

chown -R mongod.mongod /data
chown -R mongod.mongod /var/run/mongodb
chown -R mongod.mongod /tmp/mongod{1..3}
  1. 生成key并复制至所有主机
1
2
3
4
5
6
7
8
9
10
#在192.168.50.211主机执行

openssl rand -base64 756 > /etc/mongo/mongo.key
chown -R mongod.mongod /etc/mongo
chmod -R 600 /etc/mongo

scp -r /etc/mongo 192.168.50.212:/etc/
scp -r /etc/mongo 192.168.50.213:/etc/
scp -r /etc/mongo 192.168.50.214:/etc/
scp -r /etc/mongo 192.168.50.215:/etc/

配置configsvr

  1. 在configsvr主机(IP:192.168.50.214)操作
  2. 生成三个configsvr的配置文件:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#configsvr1的配置文件

cat << EOF > /etc/mongo/configsvc1.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod1.log

storage:
dbPath: /data/mongod1
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod1.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod1
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: BigBoss
sharding:
clusterRole: configsvr

EOF

#configsvr2的配置文件

cat << EOF > /etc/mongo/configsvc2.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod2.log

storage:
dbPath: /data/mongod2
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod2.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo
net:
port: 37019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod2
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: BigBoss
sharding:
clusterRole: configsvr

EOF

#configsvr3的配置文件

cat << EOF > /etc/mongo/configsvc3.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod3.log

storage:
dbPath: /data/mongod3
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod3.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 47019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod3
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: BigBoss
sharding:
clusterRole: configsvr
EOF
  1. 启动mongod:
1
2
3
mongod -f /etc/mongo/configsvc1.conf
mongod -f /etc/mongo/configsvc2.conf
mongod -f /etc/mongo/configsvc3.conf
  1. 初始化configsrv副本集群
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
mongo --port 27019

rs.initiate(
{
_id: "BigBoss",
version: 1,
protocolVersion: 1,
writeConcernMajorityJournalDefault: true,
configsvr: true,
members: [
{
_id: 0,
host: "192.168.50.214:27019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 66,
tags: {
BigBoss: "YES"
},
slaveDelay: 0,
votes: 1
},
{
_id: 1,
host: "192.168.50.214:37019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 55,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
},
{
_id: 2,
host: "192.168.50.214:47019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 33,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
}
],
settings: {
chainingAllowed : true,
}
}
)

#查看副本集状态
rs.status()

配置shard1副本集

  1. 在shard1主机(IP:192.68.50.211)操作
  2. 生成三个mongod的配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#mongod1.conf配置文件:

cat << EOF > /etc/mongo/mongod1.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod1.log

storage:
dbPath: /data/mongod1
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod1.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27017
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod1
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard1
sharding:
clusterRole: shardsvr

EOF

#mongod2.conf配置文件:

cat << EOF > /etc/mongo/mongod2.conf

systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod2.log

storage:
dbPath: /data/mongod2
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod2.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27018
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod2
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard1

sharding:
clusterRole: shardsvr
EOF

#mongod3.conf配置文件:

cat << EOF > /etc/mongo/mongod3.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod3.log

storage:
dbPath: /data/mongod3
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod3.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod3
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard1

sharding:
clusterRole: shardsvr

EOF
  1. 启动mongod
1
2
3
mongod -f /etc/mongo/mongod1.conf
mongod -f /etc/mongo/mongod2.conf
mongod -f /etc/mongo/mongod3.conf
  1. 初始化shard1副本集
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
mongo

rs.initiate(
{
_id: "shard1",
version: 1,
protocolVersion: 1,
writeConcernMajorityJournalDefault: true,
members: [
{
_id: 0,
host: "192.168.50.211:27017",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 66,
tags: {
BigBoss: "YES"
},
slaveDelay: 0,
votes: 1
},
{
_id: 1,
host: "192.168.50.211:27018",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 55,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
},
{
_id: 2,
host: "192.168.50.211:27019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 33,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
}
],
settings: {
chainingAllowed : true,
}
}
)

#查看副本集状态
rs.status()

配置shard2副本集

  1. 在shard2主机(IP:192.168.50.212)操作
  2. 生成三个mongod的配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#mongod1.conf配置文件:
cat << EOF > /etc/mongo/mongod1.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod1.log

storage:
dbPath: /data/mongod1
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod1.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27017
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod1
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard2
sharding:
clusterRole: shardsvr

EOF

#mongod2.conf配置文件:

cat << EOF > /etc/mongo/mongod2.conf

systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod2.log

storage:
dbPath: /data/mongod2
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod2.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27018
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod2
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard2

sharding:
clusterRole: shardsvr
EOF

#mongod3.conf配置文件:

cat << EOF > /etc/mongo/mongod3.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod3.log

storage:
dbPath: /data/mongod3
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod3.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod3
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard2

sharding:
clusterRole: shardsvr

EOF
  1. 启动mongod
1
2
3
mongod -f /etc/mongo/mongod1.conf
mongod -f /etc/mongo/mongod2.conf
mongod -f /etc/mongo/mongod3.conf
  1. 初始化shard2副本集
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
mongo

rs.initiate(
{
_id: "shard2",
version: 1,
protocolVersion: 1,
writeConcernMajorityJournalDefault: true,
members: [
{
_id: 0,
host: "192.168.50.212:27017",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 66,
tags: {
BigBoss: "YES"
},
slaveDelay: 0,
votes: 1
},
{
_id: 1,
host: "192.168.50.212:27018",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 55,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
},
{
_id: 2,
host: "192.168.50.212:27019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 33,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
}
],
settings: {
chainingAllowed : true,
}
}
)

#查看shard2副本集状态

rs.status()

配置shard3副本集

  1. 在shard3主机(IP:192.168.50.203)操作
  2. 生成三个mongod的配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#mongod1.conf配置文件:
cat << EOF > /etc/mongo/mongod1.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod1.log

storage:
dbPath: /data/mongod1
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod1.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27017
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod1
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard3
sharding:
clusterRole: shardsvr

EOF

#mongod2.conf配置文件:

cat << EOF > /etc/mongo/mongod2.conf

systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod2.log

storage:
dbPath: /data/mongod2
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod2.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27018
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod2
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard3

sharding:
clusterRole: shardsvr
EOF

#mongod3.conf配置文件:

cat << EOF > /etc/mongo/mongod3.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod3.log

storage:
dbPath: /data/mongod3
journal:
enabled: true
wiredTiger:
engineConfig:
directoryForIndexes: true

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod3.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
port: 27019
#bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp/mongod3
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
authorization: enabled

replication:
replSetName: shard3

sharding:
clusterRole: shardsvr

EOF
  1. 启动mongod
1
2
3
mongod -f /etc/mongo/mongod1.conf
mongod -f /etc/mongo/mongod2.conf
mongod -f /etc/mongo/mongod3.conf
  1. 初始化shard3副本集
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
mongo

rs.initiate(
{
_id: "shard3",
version: 1,
protocolVersion: 1,
writeConcernMajorityJournalDefault: true,
members: [
{
_id: 0,
host: "192.168.50.213:27017",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 66,
tags: {
BigBoss: "YES"
},
slaveDelay: 0,
votes: 1
},
{
_id: 1,
host: "192.168.50.213:27018",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 55,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
},
{
_id: 2,
host: "192.168.50.213:27019",
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 33,
tags: {
BigBoss: "NO"
},
slaveDelay: 0,
votes: 1
}
],
settings: {
chainingAllowed : true,
}
}
)

#查看shard3副本集状态

rs.status()

配置route

  1. 创建mongos配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#route是无状态的,在任何一台主机启动都行,只要能够连接至configsrv即可

cat << EOF > /etc/mongo/route.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log

processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

net:
bindIpAll: true
maxIncomingConnections: 500
unixDomainSocket:
enabled: true
pathPrefix: /tmp
filePermissions: 0700

security:
keyFile: /etc/mongo/mongo.key
# authorization: enabled

#replication:

sharding:
configDB: BigBoss/192.168.50.214:27019,192.168.50.214:37019,192.168.50.214:47019
EOF
  1. 启动mongos并设置一个连接的账号密码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#启动
mongos -f /etc/mongo/route.conf

#连接
mongo

#设置管理员账号密码
use admin

db.createUser(
{
user: "root",
pwd: "123456",
roles: [ { role: "__system", db: "admin" } ]
}
)

exit
  1. 重连至mongodb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
mongo -uroot -p123456  --authenticationDatabase admin

#添加分片主机至集群中
sh.addShard("shard1/192.168.50.211:27017,192.168.50.211:27018,192.168.50.211:27019")
sh.addShard("shard2/192.168.50.212:27017,192.168.50.212:27018,192.168.50.212:27019")
sh.addShard("shard3/192.168.50.213:27017,192.168.50.213:27018,192.168.50.213:27019")

#查看状态
sh.status()

####为了展示出效果,修改一下默认的chunksize大小,这里修改为1M
#默认的chunksize大小为64M,示例修改命令如下:
#use config
#db.settings.save( { _id:"chunksize", value: <sizeInMB> } )

use config
db.settings.save( { _id:"chunksize", value: 1 } )

#test数据库开启分片
#选择一个片键age并指定一个集合mycoll对其进行分片

sh.enableSharding("test")
sh.shardCollection("test.mycoll", {"age": 1})

#测试分片,写入数据到数据库中

use test
for (i = 1; i <= 10000; i++) db.mycoll.insert({age:(i%100), name:"bigboss_user"+i, address:i+", Some Road, Zhengzhou, Henan", country:"China", course:"cousre"+"(i%12)"})

#写入完成之后就可以查看分片信息了

sh.status()