create hadoop image, configure hadoop.

install hadoop, create hadoop base image.

Change-Id: Idb719e663764f45a7386c69b94246e412c73692f

configure and start Hadoop.

Change-Id: Ie76b25ec1fb6ad8ff531310b7fc4a17de95c3ed7
This commit is contained in:
Tim Miller 2012-04-05 09:26:39 -07:00
parent a2d43dca7e
commit 3052b2759b
11 changed files with 454 additions and 45 deletions

3
.gitignore vendored
View File

@ -2,3 +2,6 @@
*.pyc
*.class
target/*
.classpath
.settings
.project

View File

@ -1,15 +1,16 @@
# Overview
MapReduce as a Service
MapReduce as a Service:
* Spin up a Hadoop cluster on HPCloud
* Submit MapReduce jobs to a cluster
* Store job results in Swift
* Tear down clusters
# Prerequisites
* Install openstack-sdk (this needs to be put up on a mvn server somewhere)
git clone git@github.com:echohead/openstack-java-sdk.git
cd openstack-java-sdk
mvn install -Dmaven.test.skip=true
* for the command line clients:
$ sudo gem install json && sudo gem install httparty
# Running The Application
@ -30,3 +31,24 @@ Run MRaaS with the following commands:
* A rest client which exercises the service:
./bin/client --help
# Setting up eclipse
* mvn eclipse:eclipse
# Submitting Code
All merges to master go through Gerritt (https://review.stackforge.org/):
git review -v
# Gotchas
Public IPs are re-used, which can cause ssh to complain.
To prevent this, add the following to ~/.ssh/config:
Host 15.185.*.*
UserKnownHostsFile /dev/null
StrictHostKeyChecking no

View File

@ -22,7 +22,6 @@ end
options = {}
OptionParser.new do |opts|
opts.on('-h', '--help', 'Display this screen') do
puts opts
exit
end
opts.on('-c', '--create', 'Create a cluster') { options[:create] = true }
@ -31,17 +30,21 @@ OptionParser.new do |opts|
opts.on('-i', '--id ID', 'Specify cluster id for --show or --destroy') { |id| options[:id] = id }
end.parse!
raise "You must specify --id for this action" if ((!options[:id]) and (options[:show] or options[:destroy]))
raise "You must specify an action" if (!options[:create] and !options[:show] and !options[:destroy])
def pretty(resp)
JSON.pretty_generate(JSON.parse(resp.body))
end
if options[:create] then
puts MRClient.post('/clusters', :body => create_cluster_request.to_json)
puts pretty MRClient.post('/clusters', :body => create_cluster_request.to_json)
end
if options[:show] then
puts MRClient.get("/cluster/#{options[:id]}")
puts pretty MRClient.get("/cluster/#{options[:id]}")
end
if options[:destroy] then
puts MRClient.delete("/cluster/#{options[:id]}")
puts pretty MRClient.delete("/cluster/#{options[:id]}")
end

View File

@ -1,6 +1,6 @@
#!/usr/bin/ruby
# start the service in your shell.
dir = File.dirname(__FILE__)
jar = "#{dir}/../target/mraas-0.0.1-SNAPSHOT.jar"
jar = "#{dir}/../target/hpcloud-mraas-0.0.1-SNAPSHOT.jar"
config = "#{dir}/../config/dev-config.yml"
exec "java -jar #{jar} server #{config}"

View File

@ -1,6 +1,6 @@
#!/usr/bin/ruby
# apply the schema to the h2 db specified by dev-config.yml
dir = File.dirname(__FILE__)
jar = "#{dir}/../target/mraas-0.0.1-SNAPSHOT.jar"
jar = "#{dir}/../target/hpcloud-mraas-0.0.1-SNAPSHOT.jar"
config = "#{dir}/../config/dev-config.yml"
exec "java -jar #{jar} setup #{config}"

View File

@ -7,6 +7,9 @@ database:
url: jdbc:h2:target/mraas
logging:
level: INFO
loggers:
com.hpcloud.mraas: DEBUG
console:
enabled: true
threshold: ALL

23
pom.xml
View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.hpcloud</groupId>
<artifactId>mraas</artifactId>
<artifactId>hpcloud-mraas</artifactId>
<version>0.0.1-SNAPSHOT</version>
<dependencies>
@ -37,10 +37,12 @@
<version>3.2.1</version>
</dependency>
<!-- nova client -->
<dependency>
<groupId>org.jclouds</groupId>
<artifactId>jclouds-allcompute</artifactId>
<version>1.5.0-alpha.1</version>
<version>1.5.0-SNAPSHOT</version>
<!-- <version>1.5.0-alpha.1</version> -->
</dependency>
<!-- swift -->
@ -56,19 +58,32 @@
<version>0.20.203.0</version>
</dependency>
<!-- handy pojo / bean annotations -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>0.11.0</version>
</dependency>
<!-- ssh / scp client -->
<dependency>
<groupId>ch.ethz.ganymed</groupId>
<artifactId>ganymed-ssh2</artifactId>
<version>build210</version>
</dependency>
<dependency>
<groupId>sshtools</groupId>
<artifactId>j2ssh-core</artifactId>
<version>0.2.9</version>
</dependency>
</dependencies>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
<build>
<plugins>

View File

@ -18,7 +18,7 @@ public class Destroyer extends Thread {
}
public void run() {
for (String id : cluster.getNodeIds().values()) client.destroyHost(id);
// TODO
}
}

View File

@ -3,7 +3,12 @@ package com.hpcloud.mraas.app;
import com.hpcloud.mraas.domain.Cluster;
import com.hpcloud.mraas.nova.Client;
import com.hpcloud.mraas.persistence.ClusterDAO;
import com.hpcloud.mraas.ssh.SSH;
import com.yammer.dropwizard.logging.Log;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.jclouds.openstack.nova.v1_1.domain.Server;
@ -13,9 +18,15 @@ public class Provisioner extends Thread {
}
private class ProvisionerRunnable extends Thread {
private final Log LOG = Log.forClass(ProvisionerRunnable.class);
private Cluster cluster;
private Client client;
private ClusterDAO store;
private Map<String, Server> servers = new HashMap<String, Server>();
private Set<String> serverNames = new HashSet<String>();
private String privateKey;
private String BASE_IMAGE_NAME = "Ubuntu Lucid 10.04 LTS Server 64-bit 20111212";
private String HADOOP_IMAGE_NAME = "Hadoop";
public ProvisionerRunnable(Cluster cluster, ClusterDAO store) {
this.cluster = cluster;
@ -27,21 +38,216 @@ public class Provisioner extends Thread {
}
public void run() {
provisionNodes();
System.out.println("......... done ........");
setupKeyPair();
servers = createImage(HADOOP_IMAGE_NAME);
servers = provisionNodes();
// sleep(60);
servers = assignPublicIps();
sleep(10);
setupSSHKeys();
updateEtcHosts();
configureHadoop();
startHadoop();
LOG.info("Finished provisioning cluster {}", cluster.getId());
}
private void createHost(String name) {
Server s = client.createHost(name);
cluster.getNodeIds().put(name, s.getId());
private void sleep(int seconds) {
try {
Thread.sleep(1000 * seconds);
} catch (Exception e) { }
}
public void provisionNodes() {
Server master = client.createHost("master");
private void setupKeyPair() {
privateKey = client.newPrivateKey("hadoop");
LOG.debug("Created private key: {}", privateKey);
}
private Server createHost(String name, String imageName) {
LOG.debug("Creating host {} for cluster {}", name, cluster.getId());
Server s = client.createHost(name, "hadoop", imageName);
serverNames.add(name);
return s;
}
public Map<String, Server> provisionNodes() {
createHost("master", HADOOP_IMAGE_NAME);
for (Integer i = 0; i < cluster.getNumNodes(); i++) {
client.createHost("hadoop" + i.toString());
createHost("hadoop" + i.toString(), HADOOP_IMAGE_NAME);
}
store.updateNodes(cluster.getId(), cluster.getNodeIds());
servers = client.refreshServers(serverNames);
for (Server s : servers.values()) {
client.waitForServer(s.getId());
}
return client.refreshServers(serverNames);
}
private Map<String, Server> assignPublicIps() {
for (Server s : servers.values()) {
LOG.debug("Assigning public ips to {} for cluster {}", s.getName(), cluster.getId());
client.assignPublicIP(s);
}
return client.refreshServers(serverNames);
}
private Map<String, Server> createImage(String imageName) {
if (client.imageExists(imageName)) return servers;
LOG.info("Building hadoop base image");
Server s = createHost("image", BASE_IMAGE_NAME);
client.waitForServer(s.getId());
client.assignPublicIP(s);
sleep(30);
servers = client.refreshServers(serverNames);
installHadoop(servers.get("image"));
client.createImage(imageName, servers.get("image"));
client.waitForImage(imageName);
client.destroyHost(servers.get("image"));
serverNames.remove("image");
return client.refreshServers(serverNames);
}
private void installHadoop(Server s) {
String ip = client.publicIP(s);
//dramatically speed up ssh login
SSH.ssh_cmd(ip, privateKey, "echo \"UseDNS no\" >> /etc/ssh/sshd_config && service ssh restart");
SSH.ssh_cmd(ip, privateKey, "addgroup hadoop && " +
"adduser --system --shell /bin/bash --ingroup hadoop hadoop &&" +
"echo \"hadoop ALL=(ALL) NOPASSWD:ALL\" >> /etc/sudoers");
// sun java 6
SSH.ssh_cmd(ip, privateKey, "add-apt-repository ppa:ferramroberto/java && " +
"apt-get update && " +
"echo \"sun-java6-jdk shared/accepted-sun-dlj-v1-1 select true\" | debconf-set-selections && " +
"echo \"sun-java6-jre shared/accepted-sun-dlj-v1-1 select true\" | debconf-set-selections && " +
"DEBIAN_FRONTEND=noninteractive aptitude install -y -f sun-java6-jre sun-java6-bin sun-java6-jdk && " +
"update-java-alternatives -s java-6-sun");
// hadoop distribution
SSH.ssh_cmd(ip, privateKey, "cd /usr/local && " +
"wget http://apache.mesi.com.ar//hadoop/common/hadoop-0.20.203.0/hadoop-0.20.203.0rc1.tar.gz && " +
"tar zxf hadoop-0.20.203.0rc1.tar.gz && " +
"mv hadoop-0.20.203.0 hadoop && " +
"chown -R hadoop:hadoop hadoop");
// hadoop dirs
SSH.ssh_cmd(ip, privateKey, "cd /usr/local/hadoop/conf && chown hadoop:hadoop ./*");
SSH.ssh_cmd(ip, privateKey, "mkdir -p /usr/local/hadoop/logs && chown -R hadoop:hadoop /usr/local/hadoop/logs");
SSH.ssh_cmd(ip, privateKey, "mkdir -p /usr/local/tmp/hadoop && chown -R hadoop:hadoop /usr/local/tmp/hadoop");
// environment
SSH.ssh_cmd(ip, privateKey, "echo -e \"export HADOOP_HOME=/usr/local/hadoop\nexport JAVA_HOME=/usr/lib/jvm/java-6-sun\nexport PATH=\\$PATH:\\$HADOOP_HOME/bin\" >> /home/hadoop/.bashrc && chown hadoop:hadoop /home/hadoop/.bashrc");
SSH.ssh_cmd(ip, privateKey, "echo -e \"export JAVA_HOME=/usr/lib/jvm/java-6-sun\nexport HADOOP_OPTS=-Djava.net.preferIPv4Stack=true\" >> /usr/local/hadoop/conf/hadoop-env.sh");
SSH.ssh_cmd(ip, privateKey, "mkdir /home/hadoop/.ssh && chown hadoop:hadoop /home/hadoop/.ssh && " +
"echo -e \"UserKnownHostsFile /dev/null\nStrictHostKeyChecking no\" >> /home/hadoop/.ssh/config && " +
"chown hadoop:hadoop /home/hadoop/.ssh/config");
}
private void configureHadoop() {
String masterIP = client.publicIP(servers.get("master"));
String hadoopConfDir = "/usr/local/hadoop/conf/";
String coreConfig = coreSiteConfig(masterIP).replace("\n", "\\n");
String hdfsConfig = HDFSSiteConfig().replace("\n", "\\n");
String mapRedConfig = mapRedSiteConfig(masterIP).replace("\n", "\\n");
String slaveIPs = "";
for (Server s : servers.values()) {
if (s.getName().matches("^hadoop\\d+")) {
slaveIPs = slaveIPs + client.privateIP(s) + "\\n";
}
}
for (Server s : servers.values()) {
SSH.ssh_cmd(client.publicIP(s), privateKey, "echo -e \"" + coreConfig + "\\n\" > " + hadoopConfDir + "core-site.xml");
SSH.ssh_cmd(client.publicIP(s), privateKey, "echo -e \"" + hdfsConfig + "\\n\" > " + hadoopConfDir + "hdfs-site.xml");
SSH.ssh_cmd(client.publicIP(s), privateKey, "echo -e \"" + mapRedConfig + "\\n\" > " + hadoopConfDir + "mapred-site.xml");
SSH.ssh_cmd(client.publicIP(s), privateKey, "echo -e \"" + client.privateIP(servers.get("master")) + "\\n\" > " + hadoopConfDir + "masters");
SSH.ssh_cmd(client.publicIP(s), privateKey, "echo -e \"" + slaveIPs + "\\n\" > " + hadoopConfDir + "slaves");
}
}
private void startHadoop() {
String master_ip = client.publicIP(servers.get("master"));
SSH.ssh_cmd(master_ip, privateKey, "sudo -u hadoop /usr/local/hadoop/bin/hadoop namenode -format");
SSH.ssh_cmd(master_ip, privateKey, "sudo -u hadoop /usr/local/hadoop/bin/start-all.sh");
}
private void setupSSHKeys() {
String master_ip = client.publicIP(servers.get("master"));
SSH.ssh_cmd(master_ip, privateKey, "sudo -u hadoop ssh-keygen -t rsa -N '' -f /home/hadoop/.ssh/id_rsa");
String keyFile = SSH.getRemoteFile(master_ip, privateKey, "/home/hadoop/.ssh/id_rsa.pub");
for (Server s : servers.values()) {
String ip = client.publicIP(s);
SSH.ssh_cmd(ip, privateKey, "mkdir /home/hadoop/.ssh && chown -R hadoop:hadoop /home/hadoop/.ssh");
SSH.ssh_cmd(ip, privateKey, "echo \"" + keyFile + "\" >> /home/hadoop/.ssh/authorized_keys");
SSH.ssh_cmd(ip, privateKey, "chown hadoop:hadoop /home/hadoop/.ssh/authorized_keys && chmod 600 /home/hadoop/.ssh/authorized_keys");
}
}
private void updateEtcHosts() {
String update_cmd = "echo -e \"";
for (Server s : servers.values()) {
String private_ip = client.privateIP(s);
update_cmd = update_cmd + client.privateIP(s) + " " + s.getName() + ".novalocal" + "\\n";
}
update_cmd = update_cmd + "\" >> /etc/hosts";
for (Server s : servers.values()) {
SSH.ssh_cmd(client.publicIP(s), privateKey, update_cmd);
}
}
private String coreSiteConfig(String masterIP) {
return ""
+ "<configuration>\n"
+ " <property>\n"
+ " <name>fs.default.name</name>\n"
+ " <value>hdfs://" + masterIP + ":9000</value>\n"
+ " </property>\n"
+ " <property>\n"
+ " <name>hadoop.tmp.dir</name>\n"
+ " <value>/usr/local/tmp/hadoop</value>\n"
+ " </property>\n"
+ "</configuration>\n";
}
private String mapRedSiteConfig(String masterIP) {
return ""
+ "<configuration>\n"
+ " <property>\n"
+ " <name>mapred.job.tracker</name>\n"
+ " <value>" + masterIP + ":9001</value>\n"
+ " </property>\n"
+ " <property>\n"
+ " <name>mapred.reduce.tasks</name>\n"
+ " <value>15</value>\n"
+ " </property>\n"
+ " <property>\n"
+ " <name>mapred.tasktracker.map.tasks.maximum</name>\n"
+ " <value>3</value>\n"
+ " </property>\n"
+ " <property>\n"
+ " <name>mapred.tasktracker.reduce.tasks.maximum</name>\n"
+ " <value>3</value>\n"
+ " </property>\n"
+ "</configuration>\n";
}
private String HDFSSiteConfig() {
return ""
+ "<configuration>\n"
+ " <property>\n"
+ " <name>dfs.replication</name>\n"
+ " <value>3</value>\n"
+ " </property>\n"
+ "</configuration>";
}
}
}

View File

@ -2,20 +2,33 @@ package com.hpcloud.mraas.nova;
import com.yammer.dropwizard.logging.Log;
import org.jclouds.compute.ComputeService;
import org.jclouds.compute.ComputeServiceContext;
import org.jclouds.compute.ComputeServiceContextFactory;
import org.jclouds.openstack.nova.NovaAsyncClient;
import org.jclouds.openstack.nova.v1_1.NovaClient;
import org.jclouds.openstack.nova.v1_1.domain.Address;
import org.jclouds.openstack.nova.v1_1.domain.Flavor;
import org.jclouds.openstack.nova.v1_1.domain.FloatingIP;
import org.jclouds.openstack.nova.v1_1.domain.Image;
import org.jclouds.openstack.nova.v1_1.domain.KeyPair;
import org.jclouds.openstack.nova.v1_1.domain.Server;
import org.jclouds.openstack.nova.v1_1.extensions.FloatingIPClient;
import org.jclouds.openstack.nova.v1_1.extensions.KeyPairClient;
import org.jclouds.openstack.nova.v1_1.features.FlavorClient;
import org.jclouds.openstack.nova.v1_1.features.ImageClient;
import org.jclouds.openstack.nova.v1_1.features.ServerClient;
import org.jclouds.openstack.nova.v1_1.options.CreateServerOptions;
import org.jclouds.rest.RestContext;
import lombok.val;
import java.util.Set;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Properties;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.inject.Module;
public class Client {
private static final Log LOG = Log.forClass(Client.class);
@ -23,7 +36,6 @@ public class Client {
private NovaClient nova;
private String ZONE_ID = "az-1.region-a.geo-1";
private String FLAVOR_NAME = "standard.small";
private String IMAGE_NAME = "Ubuntu Lucid 10.04 LTS Server 64-bit 20111212";
private Set<Server> servers;
public Client(String tenant, String accessKey, String secretKey) {
@ -38,42 +50,89 @@ public class Client {
private void sandbox() {
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
for (val s : serverClient.listServersInDetail()) {
System.out.println(s);
}
System.out.println(imageIdByName(IMAGE_NAME));
System.out.println(flavorIdByName(FLAVOR_NAME));
}
public void createHosts(Set<String> names) {
for (String name : names) {
createHost(name);
}
}
public Server createHost(String name) {
public Server createHost(String name, String keyPairName, String imageName) {
LOG.debug("creating server {}", name);
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
Server s = serverClient.createServer(name, imageIdByName(IMAGE_NAME), flavorIdByName(FLAVOR_NAME));
Server s = serverClient.createServer(name, imageIdByName(imageName), flavorIdByName(FLAVOR_NAME), (new CreateServerOptions()).keyPairName(keyPairName));
servers.add(s);
return s;
}
public void destroyHost(String id) {
public void destroyHost(Server server) {
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
serverClient.deleteServer(id);
serverClient.deleteServer(server.getId());
}
public String publicIP(Server s) {
Multimap<Address.Type, Address> addresses = s.getAddresses();
return addresses.get(Address.Type.PUBLIC).iterator().next().getAddr();
}
public String privateIP(Server s) {
Multimap<Address.Type, Address> addresses = s.getAddresses();
return addresses.get(Address.Type.PRIVATE).iterator().next().getAddr();
}
public Boolean imageExists(String name) {
return (imageIdByName(name) != null);
}
// returns imageId.
public String createImage(String name, Server server) {
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
return serverClient.createImageFromServer(name, server.getId());
}
public void waitForImage(String imageName) {
Image i;
do {
i = imageByName(imageName);
if (i.getStatus().equals(Image.Status.ERROR)) return; //TODO throw new Exception("error creating image");
LOG.debug("Waiting for image {}", imageName);
try { Thread.sleep(5000); } catch (Exception e) {}
} while (!i.getStatus().equals(Image.Status.ACTIVE));
}
public String imageIdByName(String name) {
Image i = imageByName(name);
return ((i == null) ? null : i.getId());
}
private Image imageByName(String name) {
ImageClient imageClient = this.nova.getImageClientForZone(ZONE_ID);
for (Image i : imageClient.listImagesInDetail()) {
if (i.getName().equals(name)) return i.getId();
if (i.getName().equals(name)) return i;
}
return null;
}
public void waitForServer(String serverID) {
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
Server s;
do {
s = serverClient.getServer(serverID);
if (s.getStatus().equals(Server.Status.ERROR)) return; // TODO
LOG.debug("Waiting for server {}", s.getName());
try { Thread.sleep(5000); } catch (Exception e) {}
} while (!s.getStatus().equals(Server.Status.ACTIVE));
}
public Map<String, Server> refreshServers(Set<String> names) {
ServerClient serverClient = this.nova.getServerClientForZone(ZONE_ID);
Map<String, Server> res = new HashMap<String, Server>();
for (Server s : serverClient.listServersInDetail()) {
if (names.contains(s.getName())) {
res.put(s.getName(), s);
}
}
return res;
}
public String flavorIdByName(String name) {
FlavorClient flavorClient = this.nova.getFlavorClientForZone(ZONE_ID);
for (Flavor f : flavorClient.listFlavorsInDetail()) {
@ -81,4 +140,23 @@ public class Client {
}
return null;
}
public String newPrivateKey(String name) {
KeyPairClient kpClient = nova.getKeyPairExtensionForZone(ZONE_ID).get();
KeyPair keyPair = kpClient.createKeyPair(name);
return keyPair.getPrivateKey();
}
private FloatingIP getFreeIP() {
FloatingIPClient fipClient = nova.getFloatingIPExtensionForZone(ZONE_ID).get();
for (FloatingIP fip : fipClient.listFloatingIPs()) {
if (fip.getInstanceId() == null) return fip;
}
return fipClient.allocate();
}
public void assignPublicIP(Server s) {
FloatingIPClient fipClient = nova.getFloatingIPExtensionForZone(ZONE_ID).get();
fipClient.addFloatingIPToServer(getFreeIP().getIp(), s.getId());
}
}

View File

@ -0,0 +1,79 @@
package com.hpcloud.mraas.ssh;
import ch.ethz.ssh2.ChannelCondition;
import ch.ethz.ssh2.Connection;
import ch.ethz.ssh2.ConnectionInfo;
import ch.ethz.ssh2.SCPClient;
import ch.ethz.ssh2.Session;
import ch.ethz.ssh2.StreamGobbler;
import com.yammer.dropwizard.logging.Log;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
public class SSH {
private static final Log LOG = Log.forClass(SSH.class);
public static void ssh_cmd(String host, String privateKey, String command) {
LOG.debug("running {} on {}", command, host);
try {
Connection conn = getAuthedConnection(host, privateKey);
Session session = conn.openSession();
session.execCommand(command);
session.waitForCondition(ChannelCondition.EXIT_STATUS, 1000000);
/*
System.out.println(gobbleStream(session.getStdout());
*/
System.out.println( "ExitCode: " + session.getExitStatus() );
// Close the session
session.close();
} catch (Exception e) {
System.out.println(e); //TODO
e.printStackTrace(System.out);
}
}
private static String gobbleStream(InputStream in) throws Exception {
StringBuilder sb = new StringBuilder();
InputStream read = new StreamGobbler(in);
BufferedReader br = new BufferedReader(new InputStreamReader(read));
String line = br.readLine();
while( line != null ) {
sb.append( line + "\n" );
line = br.readLine();
}
return sb.toString();
}
private static Connection getAuthedConnection(String host, String privateKey) throws Exception {
Connection conn = new Connection(host);
ConnectionInfo info = conn.connect();
conn.authenticateWithPublicKey("root", privateKey.toCharArray(), "");
return conn;
}
public static String getRemoteFile(String host, String privateKey, String remoteFile) {
LOG.debug("getting file {} from {}", remoteFile, host);
try {
Connection conn = getAuthedConnection(host, privateKey);
SCPClient scp = new SCPClient(conn);
OutputStream out = new ByteArrayOutputStream();
scp.get(remoteFile, out);
InputStream in = new ByteArrayInputStream(((ByteArrayOutputStream) out).toByteArray());
return gobbleStream(in);
} catch (Exception e) {
//TODO
System.out.println(e);
e.printStackTrace(System.out);
}
return null;
}
}