Skip to content

Commit fd181c6

Browse files
committed
Merge remote-tracking branch 'origin/HDDS-5713' into master-new
2 parents ce54200 + 4232d21 commit fd181c6

File tree

95 files changed

+10534
-177
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

95 files changed

+10534
-177
lines changed

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,9 @@ public final class HddsConfigKeys {
332332
public static final String OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL =
333333
"ozone.security.reconfigure.protocol.acl";
334334

335+
public static final String HDDS_SECURITY_CLIENT_DATANODE_DISK_BALANCER_PROTOCOL_ACL =
336+
"hdds.security.client.datanode.disk.balancer.protocol.acl";
337+
335338
// Determines if the Container Chunk Manager will write user data to disk
336339
// Set to false only for specific performance tests
337340
public static final String HDDS_CONTAINER_PERSISTDATA =
@@ -398,6 +401,10 @@ public final class HddsConfigKeys {
398401
public static final String OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY =
399402
"ozone.volume.io.percentiles.intervals.seconds";
400403

404+
public static final String HDDS_DATANODE_DISK_BALANCER_ENABLED_KEY =
405+
"hdds.datanode.disk.balancer.enabled";
406+
public static final boolean HDDS_DATANODE_DISK_BALANCER_ENABLED_DEFAULT = false;
407+
401408
public static final String HDDS_DATANODE_DNS_INTERFACE_KEY =
402409
"hdds.datanode.dns.interface";
403410
public static final String HDDS_DATANODE_DNS_NAMESERVER_KEY =

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ItemsFromStdin.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ public abstract class ItemsFromStdin implements Iterable<String> {
3232
protected static final String FORMAT_DESCRIPTION =
3333
": one or more, separated by spaces. To read from stdin, specify '-' and supply one item per line.";
3434

35-
private List<String> items;
35+
private List<String> items = new ArrayList<>();
3636

3737
protected void setItems(List<String> arguments) {
3838
items = readItemsFromStdinIfNeeded(arguments);

hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,9 @@ public final class OzoneConsts {
199199
*/
200200
public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id";
201201

202+
public static final String
203+
OZONE_SCM_DATANODE_DISK_BALANCER_INFO_FILE_DEFAULT = "diskBalancer.info";
204+
202205
/**
203206
* The ServiceListJSONServlet context attribute where OzoneManager
204207
* instance gets stored.

hadoop-hdds/common/src/main/resources/ozone-default.xml

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,15 @@
208208
The value should be between 0-1. Such as 0.1 which means 10% of volume space will be reserved.
209209
</description>
210210
</property>
211+
<property>
212+
<name>hdds.datanode.disk.balancer.enabled</name>
213+
<value>false</value>
214+
<tag>OZONE, DATANODE, DISKBALANCER</tag>
215+
<description>If this property is set to true, then the Disk Balancer
216+
feature is enabled on Datanodes, and users can use
217+
this service. By default, this is disabled.
218+
</description>
219+
</property>
211220
<property>
212221
<name>hdds.datanode.volume.choosing.policy</name>
213222
<value>org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy</value>
@@ -371,7 +380,6 @@
371380
defined with postfix (ns,ms,s,m,h,d)</description>
372381
</property>
373382

374-
375383
<property>
376384
<name>hdds.prometheus.endpoint.enabled</name>
377385
<value>true</value>
@@ -784,6 +792,16 @@
784792
Then maximum 500000/(100/8) = 40000 blocks will be sent to each DN in every interval.
785793
</description>
786794
</property>
795+
<property>
796+
<name>ozone.scm.block.deletion.per.dn.distribution.factor</name>
797+
<value>8</value>
798+
<tag>OZONE, SCM</tag>
799+
<description>
800+
Factor with which number of delete blocks sent to each datanode in every interval.
801+
If total number of DNs are 100 and hdds.scm.block.deletion.per-interval.max is 500000
802+
Then maximum 500000/(100/8) = 40000 blocks will be sent to each DN in every interval.
803+
</description>
804+
</property>
787805
<property>
788806
<name>ozone.scm.block.size</name>
789807
<value>256MB</value>
@@ -2974,7 +2992,14 @@
29742992
Comma separated list of users and groups allowed to access reconfigure protocol.
29752993
</description>
29762994
</property>
2977-
2995+
<property>
2996+
<name>hdds.security.client.datanode.disk.balancer.protocol.acl</name>
2997+
<value>*</value>
2998+
<tag>SECURITY</tag>
2999+
<description>
3000+
Comma separated list of users and groups allowed to access disk balancer protocol.
3001+
</description>
3002+
</property>
29783003
<property>
29793004
<name>hdds.datanode.http.auth.kerberos.principal</name>
29803005
<value>HTTP/_HOST@REALM</value>

hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,5 +53,6 @@ public enum ConfigTag {
5353
TOKEN,
5454
UPGRADE,
5555
X509,
56+
DISKBALANCER,
5657
CRYPTO_COMPLIANCE
5758
}

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,16 @@
3333
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
3434
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
3535
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
36+
import org.apache.hadoop.hdds.protocol.DiskBalancerProtocol;
37+
import org.apache.hadoop.hdds.protocol.proto.DiskBalancerProtocolProtos;
3638
import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos;
39+
import org.apache.hadoop.hdds.protocolPB.DiskBalancerProtocolPB;
40+
import org.apache.hadoop.hdds.protocolPB.DiskBalancerProtocolServerSideTranslatorPB;
3741
import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolDatanodePB;
3842
import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB;
3943
import org.apache.hadoop.hdds.server.ServerUtils;
4044
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
45+
import org.apache.hadoop.hdds.utils.HddsServerUtil;
4146
import org.apache.hadoop.hdds.utils.VersionInfo;
4247
import org.apache.hadoop.ipc_.ProtobufRpcEngine;
4348
import org.apache.hadoop.ipc_.RPC;
@@ -56,12 +61,13 @@ public class HddsDatanodeClientProtocolServer extends ServiceRuntimeInfoImpl {
5661

5762
protected HddsDatanodeClientProtocolServer(
5863
DatanodeDetails datanodeDetails, OzoneConfiguration conf,
59-
VersionInfo versionInfo, ReconfigurationHandler reconfigurationHandler
64+
VersionInfo versionInfo, ReconfigurationHandler reconfigurationHandler,
65+
DiskBalancerProtocol diskBalancerProtocol
6066
) throws IOException {
6167
super(versionInfo);
6268
this.conf = conf;
6369

64-
rpcServer = getRpcServer(conf, reconfigurationHandler);
70+
rpcServer = getRpcServer(conf, reconfigurationHandler, diskBalancerProtocol);
6571
clientRpcAddress = ServerUtils.updateRPCListenAddress(this.conf,
6672
HDDS_DATANODE_CLIENT_ADDRESS_KEY,
6773
HddsUtils.getDatanodeRpcAddress(conf), rpcServer);
@@ -96,12 +102,15 @@ public void join() throws InterruptedException {
96102
* running then returns the same.
97103
*/
98104
private RPC.Server getRpcServer(OzoneConfiguration configuration,
99-
ReconfigurationHandler reconfigurationHandler)
105+
ReconfigurationHandler reconfigurationHandler,
106+
DiskBalancerProtocol diskBalancerProtocol)
100107
throws IOException {
101108
InetSocketAddress rpcAddress = HddsUtils.getDatanodeRpcAddress(conf);
102-
// Add reconfigureProtocolService.
109+
// Set protocol engines for all protocols before creating the server.
103110
RPC.setProtocolEngine(
104111
configuration, ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class);
112+
RPC.setProtocolEngine(
113+
configuration, DiskBalancerProtocolPB.class, ProtobufRpcEngine.class);
105114

106115
final int handlerCount = conf.getInt(HDDS_DATANODE_HANDLER_COUNT_KEY,
107116
HDDS_DATANODE_HANDLER_COUNT_DEFAULT);
@@ -113,8 +122,17 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration,
113122
.ReconfigureProtocolService.newReflectiveBlockingService(
114123
reconfigureServerProtocol);
115124

116-
return startRpcServer(configuration, rpcAddress,
125+
RPC.Server server = startRpcServer(configuration, rpcAddress,
117126
ReconfigureProtocolDatanodePB.class, reconfigureService, handlerCount, readThreads);
127+
if (diskBalancerProtocol != null) {
128+
DiskBalancerProtocolServerSideTranslatorPB diskBalancerTranslator =
129+
new DiskBalancerProtocolServerSideTranslatorPB(diskBalancerProtocol);
130+
BlockingService diskBalancerService = DiskBalancerProtocolProtos
131+
.DiskBalancerProtocolService.newReflectiveBlockingService(diskBalancerTranslator);
132+
HddsServerUtil.addPBProtocol(configuration, DiskBalancerProtocolPB.class,
133+
diskBalancerService, server);
134+
}
135+
return server;
118136
}
119137

120138
/**

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
6767
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
6868
import org.apache.hadoop.hdds.protocol.DatanodeID;
69+
import org.apache.hadoop.hdds.protocol.DiskBalancerProtocol;
6970
import org.apache.hadoop.hdds.protocol.SecretKeyProtocol;
7071
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
7172
import org.apache.hadoop.hdds.security.SecurityConfig;
@@ -91,6 +92,7 @@
9192
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
9293
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
9394
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
95+
import org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerProtocolServer;
9496
import org.apache.hadoop.ozone.util.OzoneNetUtils;
9597
import org.apache.hadoop.ozone.util.ShutdownHookManager;
9698
import org.apache.hadoop.security.SecurityUtil;
@@ -342,9 +344,12 @@ public String getNamespace() {
342344
LOG.error("HttpServer failed to start.", ex);
343345
}
344346

347+
DiskBalancerProtocol diskBalancerProtocol =
348+
new DiskBalancerProtocolServer(datanodeStateMachine,
349+
this::checkAdminPrivilege);
345350
clientProtocolServer = new HddsDatanodeClientProtocolServer(
346351
datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO,
347-
reconfigurationHandler);
352+
reconfigurationHandler, diskBalancerProtocol);
348353

349354
int clientRpcport = clientProtocolServer.getClientRpcAddress().getPort();
350355
serviceRuntimeInfo.setClientRpcPort(String.valueOf(clientRpcport));

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,15 @@
1717

1818
package org.apache.hadoop.ozone;
1919

20+
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_DATANODE_DISK_BALANCER_PROTOCOL_ACL;
2021
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL;
2122

22-
import java.util.Collections;
23+
import java.util.Arrays;
2324
import java.util.List;
2425
import java.util.function.Supplier;
2526
import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private;
2627
import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable;
28+
import org.apache.hadoop.hdds.protocol.DiskBalancerProtocol;
2729
import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
2830
import org.apache.hadoop.security.authorize.PolicyProvider;
2931
import org.apache.hadoop.security.authorize.Service;
@@ -40,10 +42,13 @@ public final class HddsPolicyProvider extends PolicyProvider {
4042
MemoizedSupplier.valueOf(HddsPolicyProvider::new);
4143

4244
private static final List<Service> DN_SERVICES =
43-
Collections.singletonList(
45+
Arrays.asList(
4446
new Service(
4547
OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL,
46-
ReconfigureProtocol.class)
48+
ReconfigureProtocol.class),
49+
new Service(
50+
HDDS_SECURITY_CLIENT_DATANODE_DISK_BALANCER_PROTOCOL_ACL,
51+
DiskBalancerProtocol.class)
4752
);
4853

4954
private HddsPolicyProvider() {

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,29 @@ private void updateContainerIdTable(long containerId, ContainerData containerDat
235235
}
236236
}
237237

238+
/**
239+
* Update Container to container map.
240+
* @param container container to be added
241+
* @return If container is added to containerMap returns true, otherwise
242+
* false
243+
*/
244+
public Container updateContainer(Container<?> container) throws
245+
StorageContainerException {
246+
Objects.requireNonNull(container, "container cannot be null");
247+
248+
long containerId = container.getContainerData().getContainerID();
249+
if (!containerMap.containsKey(containerId)) {
250+
LOG.error("Container doesn't exists with container Id {}", containerId);
251+
throw new StorageContainerException("Container doesn't exist with " +
252+
"container Id " + containerId,
253+
ContainerProtos.Result.CONTAINER_NOT_FOUND);
254+
} else {
255+
LOG.debug("Container with container Id {} is updated to containerMap",
256+
containerId);
257+
return containerMap.put(containerId, container);
258+
}
259+
}
260+
238261
/**
239262
* Returns the Container with specified containerId.
240263
* @param containerId ID of the container to get

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import java.io.IOException;
2222
import java.io.InputStream;
2323
import java.io.OutputStream;
24+
import java.nio.file.Path;
2425
import java.time.Instant;
2526
import java.util.Map;
2627
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -195,6 +196,11 @@ ContainerReplicaProto getContainerReport()
195196
DataScanResult scanData(DataTransferThrottler throttler, Canceler canceler)
196197
throws InterruptedException;
197198

199+
/**
200+
* Copy all the data of the container to the destination path.
201+
*/
202+
void copyContainerDirectory(Path destPath) throws IOException;
203+
198204
/** Acquire read lock. */
199205
void readLock();
200206

0 commit comments

Comments
 (0)