文档章节

Hadoop上路_09-在Win7中操作Ubuntu的HDFS

vigiles
 vigiles
发布于 2013/05/21 16:16
字数 2460
阅读 566
收藏 3

   

  

   

和在Ubuntu中的操作雷同。

1.查询:   

        1)方式一-指定HDFSURI:  

package com.cuiweiyou.hdfs;

import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs1 {
	private static FileSystem hdfs;

	public static void main(String[] args) throws Exception {
		// 1.创建配置器
		Configuration conf = new Configuration();
		// 2.创建文件系统(手动指定HDFS的URI)
		hdfs = FileSystem.get(URI.create("hdfs://192.168.1.251:9000/"), conf);
		// 3.遍历HDFS上的文件和目录
		FileStatus[] fs = hdfs.listStatus(new Path("/"));
		if (fs.length > 0) {
			for (FileStatus f : fs) {
				showDir(f);
			}
		}
	}

	private static void showDir(FileStatus fs) throws Exception {
		Path path = fs.getPath();
		System.out.println(path);
		// 如果是目录
		if (fs.isDir()) {
			FileStatus[] f = hdfs.listStatus(path);
			if (f.length > 0) {
				for (FileStatus file : f) {
					showDir(file);
				}
			}
		}
	}
}

        2)方式二-指定HDFS的配置文件:  

                (1)在Win7系统中创建一个core-site.xml文件: 

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>fs.default.name</name>
		<!-- 指定Ubuntu的IP -->
		<value>hdfs://192.168.1.251:9000</value>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/home/hm/hadoop-${user.name}</value>
	</property>
</configuration>

                (2)使用Java遍历UbuntuHDFS上的目录和文件: 

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs2 {
	private static FileSystem hdfs;

	public static void main(String[] args) throws Exception {
		// 1.创建配置器
		Configuration conf = new Configuration();
		// 2.加载指定的配置文件
		conf.addResource(new Path("c:/core-site.xml"));
		// 3.创建文件系统
		hdfs = FileSystem.get(conf);
		// 4.遍历HDFS上的文件和目录
		FileStatus[] fs = hdfs.listStatus(new Path("/"));
		if (fs.length > 0) {
			for (FileStatus f : fs) {
				showDir(f);
			}
		}
	}

	private static void showDir(FileStatus fs) throws Exception {
		Path path = fs.getPath();
		System.out.println(path);
		// 如果是目录
		if (fs.isDir()) {
			FileStatus[] f = hdfs.listStatus(path);
			if (f.length > 0) {
				for (FileStatus file : f) {
					showDir(file);
				}
			}
		}
	}
}

  

        3)判断HDFS中指定名称的目录或文件:    

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs3 {
	private static FileSystem hdfs;

	public static void main(String[] args) throws Exception {
		// 1.配置器
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		// 2.文件系统
		hdfs = FileSystem.get(conf);
		// 3.遍历HDFS目录和文件
		FileStatus[] fs = hdfs.listStatus(new Path("/"));
		if (fs.length > 0) {
			for (FileStatus f : fs) {
				showDir(f);
			}
		}
	}

	private static void showDir(FileStatus fs) throws Exception {
		Path path = fs.getPath();
		// 如果是目录
		if (fs.isDir()) {
			if (path.getName().equals("system")) {
				System.out.println(path + "是目录");
			}
			FileStatus[] f = hdfs.listStatus(path);
			if (f.length > 0) {
				for (FileStatus file : f) {
					showDir(file);
				}
			}
		} else {
			if (path.getName().equals("test.txt")) {
				System.out.println(path + "是文件");
			}
		}
	}
}

  

        4)查看HDFS文件的最后修改时间:    

package com.cuiweiyou.hdfs;

import java.net.URI;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs4 {
	private static FileSystem hdfs;

	public static void main(String[] args) throws Exception {
		// 1.配置器
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		// 2.文件系统
		hdfs = FileSystem.get(conf);
		// 3.遍历HDFS目录和文件
		FileStatus[] fs = hdfs.listStatus(new Path("/"));
		if(fs.length>0){
			for (FileStatus f : fs) {
				showDir(f);
			}
		}
	}

	private static void showDir(FileStatus fs) throws Exception {
		Path path = fs.getPath();
		//获取最后修改时间
		long time = fs.getModificationTime();
		System.out.println("HDFS文件的最后修改时间:"+new Date(time));
		System.out.println(path);
		if (fs.isDir()) {
			FileStatus[] f = hdfs.listStatus(path);
			if(f.length>0){
				for (FileStatus file : f) {
					showDir(file);
				}
			}
		}
	}
}

   

        5查看HDFS指定文件的状态:   

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs5 {

	public static void main(String[] args) throws Exception {
		//1.配置器
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		//2.文件系统
		FileSystem fs = FileSystem.get(conf);
		//3.已存在的文件
		Path path = new Path("/test.txt");
		//4.文件状态
		FileStatus status = fs.getFileStatus(path);
		//5.文件块
		BlockLocation[] blockLocations = fs.getFileBlockLocations(status, 0, status.getLen());
		int blockLen = blockLocations.length;
		System.err.println("块数量:"+blockLen);
		for (int i = 0; i < blockLen; i++) {
			// 主机名
			String[] hosts = blockLocations[i].getHosts();
			for (String host : hosts) {
				System.err.println("主机:"+host);
			}
		}
	}
}

  

        6)读取HDFStxt文件的内容:    

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs6 {

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		FileSystem fs = FileSystem.get(conf);
		Path path = new Path("/test.txt");
      // 使用HDFS数据输入流(读)对象 读取HDSF的文件
		FSDataInputStream is = fs.open(path);
		FileStatus status = fs.getFileStatus(path);
		byte[] buffer = new byte[Integer.parseInt(String.valueOf(status.getLen()))];
		is.readFully(0, buffer);
		is.close();
		fs.close();
		System.out.println(new String(buffer));
	}
}

  

2.上传:   

        1)从Win7上传文件到UbuntuHDFS: 

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs7 {
	private static FileSystem hdfs;

	public static void main(String[] args) throws Exception {
		// 1.创建配置器
		Configuration conf = new Configuration();
		// 2.加载指定的配置文件
		conf.addResource(new Path("c:/core-site.xml"));
		// 3.创建文件系统
		hdfs = FileSystem.get(conf);
		// 4. 本地文件
		Path src = new Path("f:/民间秘方.txt");
		// 5. 目标路径
		Path dst = new Path("/home");
		// 6. 上传文件
		if (!hdfs.exists(new Path("/home/民间秘方.txt"))) {
			hdfs.copyFromLocalFile(src, dst);
			System.err.println("文件上传成功至: " + conf.get("fs.default.name") + dst);
		} else {
			System.err.println(conf.get("fs.default.name") + dst + " 中已经存在 test.txt");
		}
		// 7.遍历HDFS上的文件和目录
		FileStatus[] fs = hdfs.listStatus(new Path("/"));
		if (fs.length > 0) {
			for (FileStatus f : fs) {
				showDir(f);
			}
		}
	}

	private static void showDir(FileStatus fs) throws Exception {
		Path path = fs.getPath();
		System.out.println(path);
		// 如果是目录
		if (fs.isDir()) {
			FileStatus[] f = hdfs.listStatus(path);
			if (f.length > 0) {
				for (FileStatus file : f) {
					showDir(file);
				}
			}
		}
	}
}

        2)从Win7UbuntuHDFS远程创建目录和文件: 

package com.cuiweiyou.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class TestQueryHdfs8 {

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		FileSystem hdfs = FileSystem.get(conf);
		// 使用HDFS数据输出流(写)对象 在HDSF的根目录创建一个文件夹,其内再创建文件
		FSDataOutputStream out = hdfs.create(new Path("/eminem/hip-hop.txt"));
		// 在文件中写入一行数据,必须使用UTF-8
		// out.writeUTF("Hell使用UTF-8"); //不能用?
		out.write("痞子阿姆 Hello !".getBytes("UTF-8"));
		out = hdfs.create(new Path("/alizee.txt"));
		out.write("艾莉婕 Hello !".getBytes("UTF-8"));
		out.close();
		hdfs.close();
		hdfs = FileSystem.get(conf);
		FileStatus[] fileStatus = hdfs.listStatus(new Path("/"));
		//命名外层循环,遍历前两层目录
		outside:for (FileStatus file : fileStatus) {
			Path filePath = file.getPath();
			System.out.println(filePath);
			if (file.isDir()) {
				FileStatus[] fs = hdfs.listStatus(filePath);
				for (FileStatus f : fs) {
					Path fp= f.getPath();
					System.out.println(fp);
					//读取hip-hop.txt文件
					if(fp.getName().equals("hip-hop.txt")){
						FSDataInputStream fsis = hdfs.open(fp);
						FileStatus status = hdfs.getFileStatus(fp);
						byte[] buffer = new byte[Integer.parseInt(String.valueOf(status.getLen()))];
						fsis.readFully(0, buffer);
						fsis.close();
						hdfs.close();
						System.out.println(new String(buffer));
						break outside;	//跳出外循环
					}
				}
			}
		}
	}
}

  

3.修改:   

        1)重命名文件: 

public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	conf.addResource(new Path("c:/core-site.xml"));
	FileSystem fs = FileSystem.get(conf);
	//重命名:fs.rename(源文件,新文件)
	boolean rename = fs.rename(new Path("/alizee.txt"), new Path("/adele.txt"));
	System.out.println(rename);
}

        2)删除文件: 

public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	conf.addResource(new Path("c:/core-site.xml"));
	FileSystem fs = FileSystem.get(conf);
	//删除
	//fs.delete(new Path("/new_test.txt"));	//已过时
	//程序结束时执行
	boolean exit = fs.deleteOnExit(new Path("/eminem/hip-hop.txt"));
	System.out.println("删除执行:"+exit);
	//判断删除(路径,true。false=非空时不删除,抛RemoteException、IOException异常)
	boolean delete = fs.delete(new Path("/eminem"), true);
	System.out.println("执行删除:"+delete);
}


4.WordCount示例:    

        1)代码:  

package com.cuiweiyou.hdfs;

import java.io.IOException;
import java.util.StringTokenizer;	//分词器
import org.apache.hadoop.conf.Configuration;	//配置器
import org.apache.hadoop.fs.Path;	//路径
import org.apache.hadoop.io.IntWritable;	//整型写手
import org.apache.hadoop.io.Text;	//文本写手
import org.apache.hadoop.mapreduce.Job;	//工头
import org.apache.hadoop.mapreduce.Mapper;	//映射器
import org.apache.hadoop.mapreduce.Reducer;	//拆分器
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;	//文件格式化读取器
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;	//文件格式化创建器

public class TestQueryHdfs11 {

	/**
	 * 内部类:映射器
	 * Mapper<KEY_IN, VALUE_IN, KEY_OUT, VALUE_OUT>
	 */
	public static class MyMapper extends Mapper<Object, Text, Text, IntWritable> {
		private final static IntWritable one = new IntWritable(1);// 类似于int类型
		private Text word = new Text(); // 可以理解成String类型

		/**
		 * 重新map方法
		 */
		public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
			System.err.println(key + "," + value);
			// 分词器:默认根据空格拆分字符串
			StringTokenizer itr = new StringTokenizer(value.toString());
			while (itr.hasMoreTokens()) {
				word.set(itr.nextToken());
				context.write(word, one);
			}
		};
	}

	/**
	 * 内部类:拆分器
	 * Reducer<KEY_IN, VALUE_IN, KEY_OUT, VALUE_OUT>
	 */
	public static class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
		private IntWritable result = new IntWritable();

		/**
		 * 重新reduce方法
		 */
		protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
			System.err.println(key + "," + values);
			int sum = 0;
			for (IntWritable val : values) {
				sum += val.get();
			}
			result.set(sum);
			context.write(key, result);// 这是最后结果
		};
	}

	public static void main(String[] args) throws Exception {
		// 声明配置信息
		Configuration conf = new Configuration();
		conf.addResource(new Path("c:/core-site.xml"));
		// 声明Job
		Job job = new Job(conf, "Word Count");
		// 设置工作类
		job.setJarByClass(TestQueryHdfs11.class);
		// 设置mapper类
		job.setMapperClass(MyMapper.class);
		// 可选
		job.setCombinerClass(MyReducer.class);
		// 设置合并计算类
		job.setReducerClass(MyReducer.class);
		// 设置key为String类型
		job.setOutputKeyClass(Text.class);
		// 设置value为int类型
		job.setOutputValueClass(IntWritable.class);
		// 设置接收输入或是输出
		FileInputFormat.setInputPaths(job, new Path("/test.txt"));
		FileOutputFormat.setOutputPath(job, new Path("/out"));
		// 执行
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}

        2Run on Hadoop:  

        错误1Unable to load native-hadoop library  

                无法为你的平台加载本地hadoop类库... 程序使用了内部类 
                安全相关的用户组信息:权限执行异常:hm:许可无效:   

13/05/20 16:48:34 WARN util.NativeCodeLoader:
  Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/05/20 16:48:34 ERROR security.UserGroupInformation:
  PriviledgedActionException as:hm cause:java.io.IOException:
  Failed to set permissions of path:
  \home\hm\hadoop-hm\mapred\staging\hm-975542536\.staging to 0700
  Exception in thread "main" java.io.IOException:
  Failed to set permissions of path:
  \home\hm\hadoop-hm\mapred\staging\hm-975542536\.staging to 0700
	at org.apache.hadoop.fs.FileUtil.checkReturnValue(FileUtil.java:689)
	at org.apache.hadoop.fs.FileUtil.setPermission(FileUtil.java:662)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:509)
	at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:344)
	at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:189)
	at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:116)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:918)
	at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:912)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
	at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:912)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:500)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:530)
	at cn.cvu.hdfs.TestQueryHdfs11.main(TestQueryHdfs11.java:69)

        解决:  

                方法一: 修改源码
%hadoop%/src/core/org/apache/hadoop/fs/FileUtil.java手动制作hadoop-core-1.1.2.jar 

                1.注释662665670673678681行,并注释checkReturnValue方法


                2.编译hadoop源码包

                        没有成功,待续。

                方法二: 直接替换hadoop-core-1.1.2.jar/org/apache/hadoop/fs/FileUtil.class文件   

                文件下载:http://download.csdn.net/detail/vigiles/5422251 
                1.使用winrar打开jar包的hadoop-1.1.2\hadoop-core-1.1.2.jar\org\apache\hadoop\fs目录
                2.将已修改的FileUtil.class文件拖入
                3.替换,保存
                4.放入Ubuntu%hadoop%目录,替换原有的jar
                5.放入Win7eclipse引用的hadoop解压根目录,替换原有的jar
                6.stop-all.shstart-all.sh
                7.win7访问:Run on Hadoop :

                方法三: 在src下创建目录org.apache.hadoop.fs,在其中放入FileUtil.java文件,注释checkReturnValue方法体:
                同上图。

        错误2:Name node is in safe mode    

                安全模式异常。

13/05/21 15:27:55 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/05/21 15:27:55 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/05/21 15:27:55 WARN mapred.JobClient: No job jar file set.  User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/05/21 15:27:55 INFO input.FileInputFormat: Total input paths to process : 1
13/05/21 15:27:55 WARN snappy.LoadSnappy: Snappy native library not loaded
13/05/21 15:27:55 INFO mapred.JobClient: Running job: job_local_0001
13/05/21 15:27:55 WARN mapred.LocalJobRunner: job_local_0001
org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot create directory /out/_temporary. Name node is in safe mode.
Use "hadoop dfsadmin -safemode leave" to turn safe mode off.
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:2204)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:2178)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.mkdirs(NameNode.java:857)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:601)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:578)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1393)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1389)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1149)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1387)

	at org.apache.hadoop.ipc.Client.call(Client.java:1107)
	at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:229)
	at $Proxy1.mkdirs(Unknown Source)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:601)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:85)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:62)
	at $Proxy1.mkdirs(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:1426)
	at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:332)
	at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1126)
	at org.apache.hadoop.mapred.FileOutputCommitter.setupJob(FileOutputCommitter.java:52)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:186)
13/05/21 15:27:56 INFO mapred.JobClient:  map 0% reduce 0%
13/05/21 15:27:56 INFO mapred.JobClient: Job complete: job_local_0001
13/05/21 15:27:56 INFO mapred.JobClient: Counters: 0

                解决:  

                        关闭hadoop的安全模式 

hm@hm-ubuntu:~$ hadoop dfsadmin -safemode leave

        3Run on Hadoop:  

 

  

- end 

© 著作权归作者所有

vigiles

vigiles

粉丝 99
博文 84
码字总数 82564
作品 0
昌平
程序员
私信 提问
Hadoop上路_08-在win7中配置eclipse连接Ubuntu内的hadoop

1.设置Ubuntu静态IP: hm@hm-ubuntu:~$ ifconfig 1)配置网卡参数到interfaces文件: hm@hm-ubuntu:~$ sudo gedit /etc/network/interfaces 2)重启网卡: hm@hm-ubuntu:~$ sudo /etc/init.d......

vigiles
2013/05/21
3K
0
Hadoop上路_06-在Ubuntu中使用eclipse操作HDFS

1.解压eclipse: eclipse-SDK-4.2-linux-gtk.tar.gz (1)解压程序文件: hm@hm-ubuntu:/usr$ sudo tar eclipse.tar.gz (2)在Ubuntu桌面点鼠标右键,创建启动器: (3)在“命令”选择ecl...

vigiles
2013/05/20
7.3K
0
win7 Eclipse远程连接 vmvare上Ubuntu的hadoop

我的配置: ubuntu配置 (1)/etc/hosts上的配置 (2)mapred-site.xml 配置文件 (3)core-site.xml配置文件 win7 配置 在C:\Windows\System32\drivers\etc\hosts文件里添加: 192.168.1.21...

神是到着念
2014/07/16
317
3
Hadoop伪分布配置与基于Eclipse开发环境搭建

1、开发配置环境: 开发环境:Win7(64bit)+Eclipse(kepler service release 2) 配置环境:Ubuntu Server 14.04.1 LTS(64-bit only) 辅助工具:WinSCP + Putty Hadoop版本:2.5.0 Hadoo...

loki_lan
2014/09/02
17K
19
Hadoop系列007-HDFS客户端操作

title: Hadoop系列007-HDFS客户端操作 date: 2018-12-6 15:52:55 updated: 2018-12-6 15:52:55 categories: Hadoop tags: [Hadoop,HDFS,HDFS客户端] 本人微信公众号,欢迎扫码关注! HDFS客户......

云端笑猿
2018/12/10
0
0

没有更多内容

加载失败,请刷新页面

加载更多

NASA的10条编码规则

关于NASA的10条编程规则,他们曾表示:这些规则的作用就像汽车上的安全带:最初,它们可能有点不舒服,但过了一会儿,它们的使用就变成了第二天性,而没有使用它们就变得不可想象。 Gerard J...

京东云技术新知
2分钟前
12
0
TortoiseSVN图标未显示在Windows 7下

我似乎无法在Windows 7下显示图标,我真的很想念Windows XP。 怎么修好? #1楼 他们在这里展示得很好 您使用的是64位版本的Windows 7以及32位版本的TortoiseSVN吗? 如果是这样,那么它们只会...

javail
26分钟前
43
0
开源播放器

DPlayer

glen_xu
33分钟前
134
0
gitlab Error:Gitaly - load linguist colors - permission denied

报错信息 2020-01-20_14:35:08.43180 time=“2020-01-20T14:35:08Z” level=info msg=“Starting Gitaly” version=“Gitaly, version 0.81.0, built 20180225.183225” 2020-01-20_14:35:0......

JennerLuo
39分钟前
125
0
微信小程序的自动化测试框架

微信发布了小程序的自动化测试框架Minium,提供了多种运行验证方式,其特点: 支持一套脚本,iOS & Android & 模拟器,三端运行 提供丰富的页面跳转方式,看不到也能去得到 可以获取和设置小...

测者陈磊
51分钟前
84
0

没有更多内容

加载失败,请刷新页面

加载更多

返回顶部
顶部