HDFS副本存放读取 - 谭志宇 - 博客园 http://www.cnblogs.com/chengxin1982/p/3978767.html
//
如何判定一个数据节点合不合适呢,它是通过isGoodTarget方法来确定的:
private boolean isGoodTarget(DatanodeDescriptor node, long blockSize, int maxTargetPerLoc, boolean considerLoad, List<DatanodeDescriptor> results) {
Log logr = FSNamesystem.LOG;
// 节点不可用了
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node is (being) decommissioned");
return false;
}
long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize);
// 节点剩余的容量够不够
if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node does not have enough space");
return false;
}
// 节点当前的负载情况
if (considerLoad) {
double avgLoad = 0;
int size = clusterMap.getNumOfLeaves();
if (size != 0) {
avgLoad = (double)fs.getTotalLoad()/size;
}
if (node.getXceiverCount() > (2.0 * avgLoad)) {
logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node is too busy");
return false;
}
}
// 该节点坐在的机架被选择存放当前数据块副本的数据节点过多
String rackname = node.getNetworkLocation();
int counter=1;
for(Iterator<DatanodeDescriptor> iter = results.iterator(); iter.hasNext();) {
Node result = iter.next();
if (rackname.equals(result.getNetworkLocation())) {
counter++;
}
}
if (counter>maxTargetPerLoc) {
logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the rack has too many chosen nodes");
return false;
}
return true;
}