Skip to content

Commit

Permalink
[#156] Convert /dump output to JSON
Browse files Browse the repository at this point in the history
  • Loading branch information
pjeli authored Nov 2, 2018
1 parent 2a3e68c commit b79d0c5
Show file tree
Hide file tree
Showing 8 changed files with 267 additions and 266 deletions.
43 changes: 42 additions & 1 deletion docs/REST_Endpoints/Dump.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,47 @@
`/dump` is a GET only call that only READER users can access.
It takes a `?path=<path>` argument that represents a single INode in the file system.

Response code is 200 and a plaintext representation of all information about that INode.
Example response:
```json
{
accessTime: "Dec 31, 1969 4:00:00 PM",
aclsCount: "NONE",
children: [
"dir0",
"dir1",
"dir2",
"dir4",
"dir5",
"dir6",
"dir7",
"dir8",
"dir9"
],
dsQuota: -1,
isWithSnapshot: true,
modTime: "Dec 31, 1969 4:00:00 PM",
nodeId: 16385,
nsQuota: 9223372036854776000,
path: "/",
permisssions: "root:supergroup:rwxr-xr-x",
snapshottable: true,
storagePolicy: {
id: 7,
name: "HOT",
storageTypes: [
"DISK"
],
creationFallbacks: [ ],
replicationFallbacks: [
"ARCHIVE"
],
copyOnCreateFile: false
},
type: "directory",
xAttrs: "NONE"
}
```

Response code is 200 and a JSON representation of all information about that INode.

Response code of 403 means you are not authorized to view this endpoint.
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@
import java.util.Collection;
import java.util.Date;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.server.namenode.queries.Histograms;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.io.IOUtils;

Expand All @@ -50,52 +52,45 @@ public void dumpINodeInDetail(String path, HttpServletResponse resp) throws IOEx
writer.flush();
return;
}
Map<String, Object> nodeDetails = new TreeMap<>();
INode node = namesystem.getFSDirectory().getINode(path);
writer.write("Full Path: " + node.getFullPathName() + "\n");
writer.write("Permissions: " + node.getPermissionStatus().toString() + "\n");
writer.write("Access Time: " + new Date(node.getAccessTime()) + "\n");
writer.write("Mod Time: " + new Date(node.getModificationTime()) + "\n");
writer.write("ID: " + node.getId() + "\n");
writer.write("Parent: " + node.getParentString() + "\n");
writer.write("Namespace Quota: " + node.getQuotaCounts().get(Quota.NAMESPACE) + "\n");
writer.write("Diskspace Quota: " + node.getQuotaCounts().get(Quota.DISKSPACE) + "\n");
nodeDetails.put("path", node.getFullPathName());
nodeDetails.put("permisssions", node.getPermissionStatus().toString());
nodeDetails.put("accessTime", new Date(node.getAccessTime()));
nodeDetails.put("modTime", new Date(node.getModificationTime()));
nodeDetails.put("nodeId", node.getId());
nodeDetails.put("nsQuota", node.getQuotaCounts().get(Quota.NAMESPACE));
nodeDetails.put("dsQuota", node.getQuotaCounts().get(Quota.DISKSPACE));
AclFeature aclFeature = node.getAclFeature();
writer.write("ACLs: " + ((aclFeature == null) ? "NONE" : aclFeature.getEntries()) + "\n");
nodeDetails.put("acls", ((aclFeature == null) ? "NONE" : aclFeature.getEntries()));
if (node.isFile()) {
nodeDetails.put("type", "file");
INodeFile file = node.asFile();
writer.write("Under Construction?: " + file.isUnderConstruction() + "\n");
writer.write("Under Snapshot?: " + file.isWithSnapshot() + "\n");
writer.write("File Size: " + file.computeFileSize() + "\n");
writer.write(
"File Size w/o UC Block: " + file.computeFileSizeNotIncludingLastUcBlock() + "\n");
writer.write("Replication Factor: " + file.getFileReplication() + "\n");
writer.write("Number of Blocks: " + file.getBlocks().length + "\n");
writer.write(
"Blocks:\n"
+ Arrays.stream(file.getBlocks())
.map(
k ->
k.getBlockName()
+ "_"
+ k.getGenerationStamp()
+ " "
+ k.getNumBytes()
+ "\n")
.collect(Collectors.toList()));
nodeDetails.put("underConstruction", file.isUnderConstruction());
nodeDetails.put("isWithSnapshot", file.isWithSnapshot());
nodeDetails.put("fileSize", file.computeFileSize());
nodeDetails.put("replicationFactor", file.getFileReplication());
nodeDetails.put("numBlocks", file.getBlocks().length);
nodeDetails.put(
"blocks",
Arrays.stream(file.getBlocks())
.map(k -> k.getBlockName() + "_" + k.getGenerationStamp() + " " + k.getNumBytes())
.collect(Collectors.toList()));
} else {
nodeDetails.put("type", "directory");
INodeDirectory dir = node.asDirectory();
writer.write("Has Quotas?: " + dir.isWithQuota() + "\n");
writer.write("Is Snapshottable?: " + dir.isSnapshottable() + "\n");
writer.write("Under Snapshot?: " + dir.isWithSnapshot() + "\n");
writer.write("Number of Children: " + dir.getChildrenNum(Snapshot.CURRENT_STATE_ID) + "\n");
writer.write(
"Children:\n"
+ StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(child -> child.getFullPathName() + "\n")
.collect(Collectors.toList()));
writer.flush();
nodeDetails.put("snapshottable", dir.isSnapshottable());
nodeDetails.put("isWithSnapshot", dir.isWithSnapshot());
nodeDetails.put(
"children",
StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(INode::getLocalName)
.collect(Collectors.toList()));
}
String json = Histograms.toJson(nodeDetails);
writer.write(json);
writer.flush();
} finally {
IOUtils.closeStream(writer);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.queries.Histograms;
import org.apache.hadoop.hdfs.server.namenode.queries.StorageTypeHistogram;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Canceler;
Expand All @@ -54,59 +56,50 @@ public void dumpINodeInDetail(String path, HttpServletResponse resp) throws IOEx
writer.flush();
return;
}
Map<String, Object> nodeDetails = new TreeMap<>();
INode node = namesystem.getFSDirectory().getINode(path);
writer.write("Full Path: " + node.getFullPathName() + "\n");
writer.write("Permissions: " + node.getPermissionStatus().toString() + "\n");
writer.write("Access Time: " + new Date(node.getAccessTime()) + "\n");
writer.write("Mod Time: " + new Date(node.getModificationTime()) + "\n");
writer.write("ID: " + node.getId() + "\n");
writer.write(
"Storage Policy: "
+ BlockStoragePolicySuite.createDefaultSuite().getPolicy(node.getStoragePolicyID())
+ "\n");
writer.write("Parent: " + node.getParentString() + "\n");
writer.write("Namespace Quota: " + node.getQuotaCounts().get(Quota.NAMESPACE) + "\n");
writer.write("Diskspace Quota: " + node.getQuotaCounts().get(Quota.DISKSPACE) + "\n");
nodeDetails.put("path", node.getFullPathName());
nodeDetails.put("permisssions", node.getPermissionStatus().toString());
nodeDetails.put("accessTime", new Date(node.getAccessTime()));
nodeDetails.put("modTime", new Date(node.getModificationTime()));
nodeDetails.put("nodeId", node.getId());
nodeDetails.put(
"storagePolicy",
BlockStoragePolicySuite.createDefaultSuite().getPolicy(node.getStoragePolicyID()));
nodeDetails.put("nsQuota", node.getQuotaCounts().get(Quota.NAMESPACE));
nodeDetails.put("dsQuota", node.getQuotaCounts().get(Quota.DISKSPACE));
XAttrFeature xattrs = node.getXAttrFeature();
writer.write("XAttrs: " + ((xattrs == null) ? "NONE" : xattrs.getXAttrs()) + "\n");
nodeDetails.put("xAttrs", ((xattrs == null) ? "NONE" : xattrs.getXAttrs()));
AclFeature aclFeature = node.getAclFeature();
writer.write(
"ACLs (size): " + ((aclFeature == null) ? "NONE" : aclFeature.getEntries()) + "\n");
nodeDetails.put("acls", ((aclFeature == null) ? "NONE" : aclFeature.getEntries()));
if (node.isFile()) {
nodeDetails.put("type", "file");
INodeFile file = node.asFile();
writer.write("Under Construction?: " + file.isUnderConstruction() + "\n");
writer.write("Under Snapshot?: " + file.isWithSnapshot() + "\n");
writer.write("File Size: " + file.computeFileSize() + "\n");
writer.write(
"File Size w/o UC Block: " + file.computeFileSizeNotIncludingLastUcBlock() + "\n");
writer.write("Replication Factor: " + file.getFileReplication() + "\n");
writer.write("Number of Blocks: " + file.getBlocks().length + "\n");
writer.write(
"Blocks:\n"
+ Arrays.stream(file.getBlocks())
.map(
k ->
k.getBlockName()
+ "_"
+ k.getGenerationStamp()
+ " "
+ k.getNumBytes()
+ "\n")
.collect(Collectors.toList()));
nodeDetails.put("underConstruction", file.isUnderConstruction());
nodeDetails.put("isWithSnapshot", file.isWithSnapshot());
nodeDetails.put("fileSize", file.computeFileSize());
nodeDetails.put("replicationFactor", file.getFileReplication());
nodeDetails.put("numBlocks", file.getBlocks().length);
nodeDetails.put(
"blocks",
Arrays.stream(file.getBlocks())
.map(k -> k.getBlockName() + "_" + k.getGenerationStamp() + " " + k.getNumBytes())
.collect(Collectors.toList()));
} else {
nodeDetails.put("type", "directory");
INodeDirectory dir = node.asDirectory();
writer.write("Has Quotas?: " + dir.isWithQuota() + "\n");
writer.write("Is Snapshottable?: " + dir.isSnapshottable() + "\n");
writer.write("Under Snapshot?: " + dir.isWithSnapshot() + "\n");
writer.write("Number of Children: " + dir.getChildrenNum(Snapshot.CURRENT_STATE_ID) + "\n");
writer.write(
"Children:\n"
+ StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(child -> child.getFullPathName() + "\n")
.collect(Collectors.toList()));
writer.flush();
nodeDetails.put("snapshottable", dir.isSnapshottable());
nodeDetails.put("isWithSnapshot", dir.isWithSnapshot());
nodeDetails.put(
"children",
StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(INode::getLocalName)
.collect(Collectors.toList()));
}
String json = Histograms.toJson(nodeDetails);
writer.write(json);
writer.flush();
} finally {
IOUtils.closeStream(writer);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.queries.Histograms;
import org.apache.hadoop.hdfs.server.namenode.queries.StorageTypeHistogram;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Canceler;
Expand All @@ -54,59 +56,50 @@ public void dumpINodeInDetail(String path, HttpServletResponse resp) throws IOEx
writer.flush();
return;
}
Map<String, Object> nodeDetails = new TreeMap<>();
INode node = namesystem.getFSDirectory().getINode(path);
writer.write("Full Path: " + node.getFullPathName() + "\n");
writer.write("Permissions: " + node.getPermissionStatus().toString() + "\n");
writer.write("Access Time: " + new Date(node.getAccessTime()) + "\n");
writer.write("Mod Time: " + new Date(node.getModificationTime()) + "\n");
writer.write("ID: " + node.getId() + "\n");
writer.write(
"Storage Policy: "
+ BlockStoragePolicySuite.createDefaultSuite().getPolicy(node.getStoragePolicyID())
+ "\n");
writer.write("Parent: " + node.getParentString() + "\n");
writer.write("Namespace Quota: " + node.getQuotaCounts().getNameSpace() + "\n");
writer.write("Diskspace Quota: " + node.getQuotaCounts().getStorageSpace() + "\n");
nodeDetails.put("path", node.getFullPathName());
nodeDetails.put("permisssions", node.getPermissionStatus().toString());
nodeDetails.put("accessTime", new Date(node.getAccessTime()));
nodeDetails.put("modTime", new Date(node.getModificationTime()));
nodeDetails.put("nodeId", node.getId());
nodeDetails.put(
"storagePolicy",
BlockStoragePolicySuite.createDefaultSuite().getPolicy(node.getStoragePolicyID()));
nodeDetails.put("nsQuota", node.getQuotaCounts().getNameSpace());
nodeDetails.put("dsQuota", node.getQuotaCounts().getStorageSpace());
XAttrFeature xattrs = node.getXAttrFeature();
writer.write("XAttrs: " + ((xattrs == null) ? "NONE" : xattrs.getXAttrs()) + "\n");
nodeDetails.put("xAttrs", ((xattrs == null) ? "NONE" : xattrs.getXAttrs()));
AclFeature aclFeature = node.getAclFeature();
writer.write(
"ACLs (size): " + ((aclFeature == null) ? "NONE" : aclFeature.getEntriesSize()) + "\n");
nodeDetails.put("aclsCount", ((aclFeature == null) ? "NONE" : aclFeature.getEntriesSize()));
if (node.isFile()) {
nodeDetails.put("type", "file");
INodeFile file = node.asFile();
writer.write("Under Construction?: " + file.isUnderConstruction() + "\n");
writer.write("Under Snapshot?: " + file.isWithSnapshot() + "\n");
writer.write("File Size: " + file.computeFileSize() + "\n");
writer.write(
"File Size w/o UC Block: " + file.computeFileSizeNotIncludingLastUcBlock() + "\n");
writer.write("Replication Factor: " + file.getFileReplication() + "\n");
writer.write("Number of Blocks: " + file.getBlocks().length + "\n");
writer.write(
"Blocks:\n"
+ Arrays.stream(file.getBlocks())
.map(
k ->
k.getBlockName()
+ "_"
+ k.getGenerationStamp()
+ " "
+ k.getNumBytes()
+ "\n")
.collect(Collectors.toList()));
nodeDetails.put("underConstruction", file.isUnderConstruction());
nodeDetails.put("isWithSnapshot", file.isWithSnapshot());
nodeDetails.put("fileSize", file.computeFileSize());
nodeDetails.put("replicationFactor", file.getFileReplication());
nodeDetails.put("numBlocks", file.getBlocks().length);
nodeDetails.put(
"blocks",
Arrays.stream(file.getBlocks())
.map(k -> k.getBlockName() + "_" + k.getGenerationStamp() + " " + k.getNumBytes())
.collect(Collectors.toList()));
} else {
nodeDetails.put("type", "directory");
INodeDirectory dir = node.asDirectory();
writer.write("Has Quotas?: " + dir.isWithQuota() + "\n");
writer.write("Is Snapshottable?: " + dir.isSnapshottable() + "\n");
writer.write("Under Snapshot?: " + dir.isWithSnapshot() + "\n");
writer.write("Number of Children: " + dir.getChildrenNum(Snapshot.CURRENT_STATE_ID) + "\n");
writer.write(
"Children:\n"
+ StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(child -> child.getFullPathName() + "\n")
.collect(Collectors.toList()));
writer.flush();
nodeDetails.put("snapshottable", dir.isSnapshottable());
nodeDetails.put("isWithSnapshot", dir.isWithSnapshot());
nodeDetails.put(
"children",
StreamSupport.stream(
dir.getChildrenList(Snapshot.CURRENT_STATE_ID).spliterator(), false)
.map(INode::getLocalName)
.collect(Collectors.toList()));
}
String json = Histograms.toJson(nodeDetails);
writer.write(json);
writer.flush();
} finally {
IOUtils.closeStream(writer);
}
Expand Down
Loading

0 comments on commit b79d0c5

Please sign in to comment.