文档章节

oneHot编码--标准化--主成分--聚类

hblt-j
 hblt-j
发布于 2017/08/29 11:44
字数 2642
阅读 16
收藏 0

1.导入包

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

import org.apache.spark.sql.SparkSession

import org.apache.spark.sql.Dataset

import org.apache.spark.sql.Row

import org.apache.spark.sql.DataFrame

import org.apache.spark.sql.Column

import org.apache.spark.sql.DataFrameReader

import org.apache.spark.rdd.RDD

import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder

import org.apache.spark.sql.Encoder

import org.apache.spark.sql.functions._

import org.apache.spark.sql.DataFrameStatFunctions

import org.apache.spark.ml.linalg.Vectors

import org.apache.spark.ml.feature.StringIndexer

import org.apache.spark.ml.feature.OneHotEncoder

import org.apache.spark.ml.feature.VectorAssembler

import org.apache.spark.ml.feature.MinMaxScaler

import org.apache.spark.ml.feature.StandardScaler

import org.apache.spark.ml.feature.PCA

import org.apache.spark.ml.clustering.KMeans

 

2.导入数据

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

val spark = SparkSession.builder().appName("Spark SQL basic example").config("spark.some.config.option", "some-value").getOrCreate()

    

// For implicit conversions like converting RDDs to DataFrames

import spark.implicits._

    

val data: DataFrame = spark.read.format("csv").option("header", true).load("hdfs://ns1/datafile/wangxiao/Affairs.csv")

data: org.apache.spark.sql.DataFrame = [affairs: string, gender: string ... 7 more fields]

    

data.cache

res0: data.type = [affairs: string, gender: string ... 7 more fields]

   

data.limit(10).show()

+-------+------+---+------------+--------+-------------+---------+----------+------+

|affairs|gender|age|yearsmarried|children|religiousness|education|occupation|rating|

+-------+------+---+------------+--------+-------------+---------+----------+------+

|      0|  male| 37|          10|      no|            3|       18|         7|     4|

|      0|female| 27|           4|      no|            4|       14|         6|     4|

|      0|female| 32|          15|     yes|            1|       12|         1|     4|

|      0|  male| 57|          15|     yes|            5|       18|         6|     5|

|      0|  male| 22|        0.75|      no|            2|       17|         6|     3|

|      0|female| 32|         1.5|      no|            2|       17|         5|     5|

|      0|female| 22|        0.75|      no|            2|       12|         1|     3|

|      0|  male| 57|          15|     yes|            2|       14|         4|     4|

|      0|female| 32|          15|     yes|            4|       16|         1|     2|

|      0|  male| 22|         1.5|      no|            4|       14|         4|     5|

+-------+------+---+------------+--------+-------------+---------+----------+------+

    

// 转换字符类型,将Double和String的字段分开放

val data1 = data.select(

     |   data("affairs").cast("Double"),

     |   data("age").cast("Double"),

     |   data("yearsmarried").cast("Double"),

     |   data("religiousness").cast("Double"),

     |   data("education").cast("Double"),

     |   data("occupation").cast("Double"),

     |   data("rating").cast("Double"),

     |   data("gender").cast("String"),

     |   data("children").cast("String"))

data1: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 7 more fields]

    

data1.printSchema()

root

 |-- affairs: double (nullable = true)

 |-- age: double (nullable = true)

 |-- yearsmarried: double (nullable = true)

 |-- religiousness: double (nullable = true)

 |-- education: double (nullable = true)

 |-- occupation: double (nullable = true)

 |-- rating: double (nullable = true)

 |-- gender: string (nullable = true)

 |-- children: string (nullable = true)

    

    

data1.limit(10).show

+-------+----+------------+-------------+---------+----------+------+------+--------+

|affairs| age|yearsmarried|religiousness|education|occupation|rating|gender|children|

+-------+----+------------+-------------+---------+----------+------+------+--------+

|    0.0|37.0|        10.0|          3.0|     18.0|       7.0|   4.0|  male|      no|

|    0.0|27.0|         4.0|          4.0|     14.0|       6.0|   4.0|female|      no|

|    0.0|32.0|        15.0|          1.0|     12.0|       1.0|   4.0|female|     yes|

|    0.0|57.0|        15.0|          5.0|     18.0|       6.0|   5.0|  male|     yes|

|    0.0|22.0|        0.75|          2.0|     17.0|       6.0|   3.0|  male|      no|

|    0.0|32.0|         1.5|          2.0|     17.0|       5.0|   5.0|female|      no|

|    0.0|22.0|        0.75|          2.0|     12.0|       1.0|   3.0|female|      no|

|    0.0|57.0|        15.0|          2.0|     14.0|       4.0|   4.0|  male|     yes|

|    0.0|32.0|        15.0|          4.0|     16.0|       1.0|   2.0|female|     yes|

|    0.0|22.0|         1.5|          4.0|     14.0|       4.0|   5.0|  male|      no|

+-------+----+------------+-------------+---------+----------+------+------+--------+

    

val dataDF = data1

dataDF: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 7 more fields]

    

dataDF.cache()

res4: dataDF.type = [affairs: double, age: double ... 7 more fields]

 

3.字符转换成数字索引,OneHot编码,注意setDropLast设置为false

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

字符转换成数字索引

val indexer = new StringIndexer().setInputCol("gender").setOutputCol("genderIndex").fit(dataDF)

indexer: org.apache.spark.ml.feature.StringIndexerModel = strIdx_27dba613193a

    

val indexed = indexer.transform(dataDF)

indexed: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 8 more fields]

    

// OneHot编码,注意setDropLast设置为false

val encoder = new OneHotEncoder().setInputCol("genderIndex").setOutputCol("genderVec").setDropLast(false)

encoder: org.apache.spark.ml.feature.OneHotEncoder = oneHot_155a53de3aef

    

val encoded = encoder.transform(indexed)

encoded: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 9 more fields]

    

encoded.show()

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+

|affairs| age|yearsmarried|religiousness|education|occupation|rating|gender|children|genderIndex|    genderVec|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+

|    0.0|37.0|        10.0|          3.0|     18.0|       7.0|   4.0|  male|      no|        1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       6.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|32.0|        15.0|          1.0|     12.0|       1.0|   4.0|female|     yes|        0.0|(2,[0],[1.0])|

|    0.0|57.0|        15.0|          5.0|     18.0|       6.0|   5.0|  male|     yes|        1.0|(2,[1],[1.0])|

|    0.0|22.0|        0.75|          2.0|     17.0|       6.0|   3.0|  male|      no|        1.0|(2,[1],[1.0])|

|    0.0|32.0|         1.5|          2.0|     17.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          2.0|     12.0|       1.0|   3.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|57.0|        15.0|          2.0|     14.0|       4.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|

|    0.0|32.0|        15.0|          4.0|     16.0|       1.0|   2.0|female|     yes|        0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          4.0|     14.0|       4.0|   5.0|  male|      no|        1.0|(2,[1],[1.0])|

|    0.0|37.0|        15.0|          2.0|     20.0|       7.0|   2.0|  male|     yes|        1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     18.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|

|    0.0|47.0|        15.0|          5.0|     17.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|

|    0.0|22.0|         1.5|          2.0|     17.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|37.0|        15.0|          1.0|     17.0|       5.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|

|    0.0|37.0|        15.0|          2.0|     18.0|       4.0|   3.0|female|     yes|        0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          3.0|     16.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          2.0|     16.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|

|    0.0|27.0|        10.0|          2.0|     14.0|       1.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+

only showing top 20 rows

   

val indexer1 = new StringIndexer().setInputCol("children").setOutputCol("childrenIndex").fit(encoded)

indexer1: org.apache.spark.ml.feature.StringIndexerModel = strIdx_55db099c07b7

    

val indexed1 = indexer1.transform(encoded)

indexed1: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 10 more fields]

    

val encoder1 = new OneHotEncoder().setInputCol("childrenIndex").setOutputCol("childrenVec").setDropLast(false)

    

val encoded1 = encoder1.transform(indexed1)

encoded1: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 11 more fields]

    

encoded1.show()

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

|affairs| age|yearsmarried|religiousness|education|occupation|rating|gender|children|genderIndex|    genderVec|childrenIndex|  childrenVec|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

|    0.0|37.0|        10.0|          3.0|     18.0|       7.0|   4.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       6.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|32.0|        15.0|          1.0|     12.0|       1.0|   4.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|57.0|        15.0|          5.0|     18.0|       6.0|   5.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          2.0|     17.0|       6.0|   3.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|32.0|         1.5|          2.0|     17.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|22.0|        0.75|          2.0|     12.0|       1.0|   3.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|57.0|        15.0|          2.0|     14.0|       4.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|32.0|        15.0|          4.0|     16.0|       1.0|   2.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          4.0|     14.0|       4.0|   5.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|37.0|        15.0|          2.0|     20.0|       7.0|   2.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|27.0|         4.0|          4.0|     18.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|47.0|        15.0|          5.0|     17.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          2.0|     17.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|37.0|        15.0|          1.0|     17.0|       5.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|37.0|        15.0|          2.0|     18.0|       4.0|   3.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          3.0|     16.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|22.0|         1.5|          2.0|     16.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|        10.0|          2.0|     14.0|       1.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

only showing top 20 rows

   

    

val encodeDF: DataFrame = encoded1

encodeDF: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 11 more fields]

    

encodeDF.show()

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

|affairs| age|yearsmarried|religiousness|education|occupation|rating|gender|children|genderIndex|    genderVec|childrenIndex|  childrenVec|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

|    0.0|37.0|        10.0|          3.0|     18.0|       7.0|   4.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       6.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|32.0|        15.0|          1.0|     12.0|       1.0|   4.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|57.0|        15.0|          5.0|     18.0|       6.0|   5.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          2.0|     17.0|       6.0|   3.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|32.0|         1.5|          2.0|     17.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|22.0|        0.75|          2.0|     12.0|       1.0|   3.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|57.0|        15.0|          2.0|     14.0|       4.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|32.0|        15.0|          4.0|     16.0|       1.0|   2.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          4.0|     14.0|       4.0|   5.0|  male|      no|        1.0|(2,[1],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|37.0|        15.0|          2.0|     20.0|       7.0|   2.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|27.0|         4.0|          4.0|     18.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|47.0|        15.0|          5.0|     17.0|       6.0|   4.0|  male|     yes|        1.0|(2,[1],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|         1.5|          2.0|     17.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|         4.0|          4.0|     14.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|37.0|        15.0|          1.0|     17.0|       5.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|37.0|        15.0|          2.0|     18.0|       4.0|   3.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

|    0.0|22.0|        0.75|          3.0|     16.0|       5.0|   4.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|22.0|         1.5|          2.0|     16.0|       5.0|   5.0|female|      no|        0.0|(2,[0],[1.0])|          1.0|(2,[1],[1.0])|

|    0.0|27.0|        10.0|          2.0|     14.0|       1.0|   5.0|female|     yes|        0.0|(2,[0],[1.0])|          0.0|(2,[0],[1.0])|

+-------+----+------------+-------------+---------+----------+------+------+--------+-----------+-------------+-------------+-------------+

only showing top 20 rows

    

    

encodeDF.printSchema()

root

 |-- affairs: double (nullable = true)

 |-- age: double (nullable = true)

 |-- yearsmarried: double (nullable = true)

 |-- religiousness: double (nullable = true)

 |-- education: double (nullable = true)

 |-- occupation: double (nullable = true)

 |-- rating: double (nullable = true)

 |-- gender: string (nullable = true)

 |-- children: string (nullable = true)

 |-- genderIndex: double (nullable = true)

 |-- genderVec: vector (nullable = true)

 |-- childrenIndex: double (nullable = true)

 |-- childrenVec: vector (nullable = true)

 

4.将字段组合成向量feature

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

//将字段组合成向量feature

val assembler = new VectorAssembler().setInputCols(Array("affairs", "age", "yearsmarried", "religiousness", "education", "occupation", "rating", "genderVec", "childrenVec")).setOutputCol("features")

assembler: org.apache.spark.ml.feature.VectorAssembler = vecAssembler_df76d5d1e3f4

    

val vecDF: DataFrame = assembler.transform(encodeDF)

vecDF: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 12 more fields]

    

vecDF.select("features").show

+--------------------+

|            features|

+--------------------+

|[0.0,37.0,10.0,3....|

|[0.0,27.0,4.0,4.0...|

|[0.0,32.0,15.0,1....|

|[0.0,57.0,15.0,5....|

|[0.0,22.0,0.75,2....|

|[0.0,32.0,1.5,2.0...|

|[0.0,22.0,0.75,2....|

|[0.0,57.0,15.0,2....|

|[0.0,32.0,15.0,4....|

|[0.0,22.0,1.5,4.0...|

|[0.0,37.0,15.0,2....|

|[0.0,27.0,4.0,4.0...|

|[0.0,47.0,15.0,5....|

|[0.0,22.0,1.5,2.0...|

|[0.0,27.0,4.0,4.0...|

|[0.0,37.0,15.0,1....|

|[0.0,37.0,15.0,2....|

|[0.0,22.0,0.75,3....|

|[0.0,22.0,1.5,2.0...|

|[0.0,27.0,10.0,2....|

+--------------------+

only showing top 20 rows

 

5.标准化--均值标准差

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

// 标准化--均值标准差

val scaler = new StandardScaler().setInputCol("features").setOutputCol("scaledFeatures").setWithStd(true).setWithMean(true)

scaler: org.apache.spark.ml.feature.StandardScaler = stdScal_43d3da1cd3bf

    

// Compute summary statistics by fitting the StandardScaler.

val scalerModel = scaler.fit(vecDF)

scalerModel: org.apache.spark.ml.feature.StandardScalerModel = stdScal_43d3da1cd3bf

    

// Normalize each feature to have unit standard deviation.

val scaledData: DataFrame = scalerModel.transform(vecDF)

scaledData: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 13 more fields]

    

scaledData.select("features", "scaledFeatures").show

+--------------------+--------------------+

|            features|      scaledFeatures|

+--------------------+--------------------+

|[0.0,37.0,10.0,3....|[-0.4413500298573...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|

|[0.0,32.0,15.0,1....|[-0.4413500298573...|

|[0.0,57.0,15.0,5....|[-0.4413500298573...|

|[0.0,22.0,0.75,2....|[-0.4413500298573...|

|[0.0,32.0,1.5,2.0...|[-0.4413500298573...|

|[0.0,22.0,0.75,2....|[-0.4413500298573...|

|[0.0,57.0,15.0,2....|[-0.4413500298573...|

|[0.0,32.0,15.0,4....|[-0.4413500298573...|

|[0.0,22.0,1.5,4.0...|[-0.4413500298573...|

|[0.0,37.0,15.0,2....|[-0.4413500298573...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|

|[0.0,47.0,15.0,5....|[-0.4413500298573...|

|[0.0,22.0,1.5,2.0...|[-0.4413500298573...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|

|[0.0,37.0,15.0,1....|[-0.4413500298573...|

|[0.0,37.0,15.0,2....|[-0.4413500298573...|

|[0.0,22.0,0.75,3....|[-0.4413500298573...|

|[0.0,22.0,1.5,2.0...|[-0.4413500298573...|

|[0.0,27.0,10.0,2....|[-0.4413500298573...|

+--------------------+--------------------+

only showing top 20 rows

 

6.主成分PCA

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

// 主成分

val pca = new PCA().setInputCol("scaledFeatures").setOutputCol("pcaFeatures").setK(3).fit(scaledData)

    

pca.explainedVariance.values //解释变量方差

res11: Array[Double] = Array(0.28779526464781313, 0.23798543640278289, 0.11742828783633019)

    

pca.pc //载荷(观测变量与主成分的相关系数)

res12: org.apache.spark.ml.linalg.DenseMatrix =

-0.12034310848156521  0.05153952289637974   0.6678769450480689

-0.42860623714516627  0.05417889891307473   -0.05592377098140197

-0.44404074412877986  0.1926596811059294    -0.017025575192258197

-0.12233707317255231  0.08053139375662526   -0.5093149296300096

-0.14664751606128462  -0.3872166556211308   -0.03406819489501708

-0.145543746024348    -0.43054860653839705  0.07841454709046872

0.17703994181974803   -0.12792784984216296  -0.5173229755329072

0.2459668445061567    0.4915809641798787    0.010477548320795945

-0.2459668445061567   -0.4915809641798787   -0.010477548320795945

-0.44420980045271047  0.240652448514566     -0.089356723885704

0.4442098004527103    -0.24065244851456588  0.08935672388570405

    

pca.extractParamMap()

res13: org.apache.spark.ml.param.ParamMap =

{

    pca_40a453a54776-inputCol: scaledFeatures,

    pca_40a453a54776-k: 3,

    pca_40a453a54776-outputCol: pcaFeatures

}

    

pca.params

res14: Array[org.apache.spark.ml.param.Param[_]] = Array(pca_40a453a54776__inputCol, pca_40a453a54776__k, pca_40a453a54776__outputCol)

    

   

    

val pcaDF: DataFrame = pca.transform(scaledData)

pcaDF: org.apache.spark.sql.DataFrame = [affairs: double, age: double ... 14 more fields]

    

pcaDF.cache()

res15: pcaDF.type = [affairs: double, age: double ... 14 more fields]

    

    

pcaDF.printSchema()

root

 |-- affairs: double (nullable = true)

 |-- age: double (nullable = true)

 |-- yearsmarried: double (nullable = true)

 |-- religiousness: double (nullable = true)

 |-- education: double (nullable = true)

 |-- occupation: double (nullable = true)

 |-- rating: double (nullable = true)

 |-- gender: string (nullable = true)

 |-- children: string (nullable = true)

 |-- genderIndex: double (nullable = true)

 |-- genderVec: vector (nullable = true)

 |-- childrenIndex: double (nullable = true)

 |-- childrenVec: vector (nullable = true)

 |-- features: vector (nullable = true)

 |-- scaledFeatures: vector (nullable = true)

 |-- pcaFeatures: vector (nullable = true)

    

    

pcaDF.select("features", "scaledFeatures", "pcaFeatures").show

+--------------------+--------------------+--------------------+

|            features|      scaledFeatures|         pcaFeatures|

+--------------------+--------------------+--------------------+

|[0.0,37.0,10.0,3....|[-0.4413500298573...|[0.27828160409293...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|[2.42147114101165...|

|[0.0,32.0,15.0,1....|[-0.4413500298573...|[0.18301418047489...|

|[0.0,57.0,15.0,5....|[-0.4413500298573...|[-2.9795960667914...|

|[0.0,22.0,0.75,2....|[-0.4413500298573...|[1.79299133565688...|

|[0.0,32.0,1.5,2.0...|[-0.4413500298573...|[2.65694237441759...|

|[0.0,22.0,0.75,2....|[-0.4413500298573...|[3.48234503794570...|

|[0.0,57.0,15.0,2....|[-0.4413500298573...|[-2.4215838062079...|

|[0.0,32.0,15.0,4....|[-0.4413500298573...|[-0.6964555195741...|

|[0.0,22.0,1.5,4.0...|[-0.4413500298573...|[2.18771069800414...|

|[0.0,37.0,15.0,2....|[-0.4413500298573...|[-2.4259075891377...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|[-0.7743038356008...|

|[0.0,47.0,15.0,5....|[-0.4413500298573...|[-2.6176149267534...|

|[0.0,22.0,1.5,2.0...|[-0.4413500298573...|[2.95788535193022...|

|[0.0,27.0,4.0,4.0...|[-0.4413500298573...|[2.50146472861263...|

|[0.0,37.0,15.0,1....|[-0.4413500298573...|[-0.5123817022008...|

|[0.0,37.0,15.0,2....|[-0.4413500298573...|[-0.9191740114044...|

|[0.0,22.0,0.75,3....|[-0.4413500298573...|[2.97391491782863...|

|[0.0,22.0,1.5,2.0...|[-0.4413500298573...|[3.17940505267806...|

|[0.0,27.0,10.0,2....|[-0.4413500298573...|[0.74585406839527...|

+--------------------+--------------------+--------------------+

only showing top 20 rows

 

7.聚类

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

// 注意最大迭代次數和轮廓系数

    

val KSSE = (2 to 20 by 1).toList.map { k =>

      // 聚类

      // Trains a k-means model.

      val kmeans = new KMeans().setK(k).setSeed(1L).setFeaturesCol("scaledFeatures")

      val model = kmeans.fit(scaledData)

 

      // Evaluate clustering by computing Within Set Sum of Squared Errors.

      val WSSSE = model.computeCost(scaledData)

 

      // K,实际迭代次数,SSE,聚类类别编号,每类的记录数,类中心点

      (k, model.getMaxIter, WSSSE, model.summary.cluster, model.summary.clusterSizes, model.clusterCenters)

    }

 

    // 根据SSE确定K值

    val KSSEdf:DataFrame=KSSE.map{x=>(x._1,x._2,x._3,x._5)}.toDF("K", "MaxIter", "SSE", "clusterSizes")

    

KSSE.foreach(println)

 

本文转载自:http://www.cnblogs.com/wwxbi/p/6028175.html

共有 人打赏支持
hblt-j
粉丝 19
博文 159
码字总数 67057
作品 0
海淀
架构师
私信 提问
GMM与K-means聚类效果实战

目录 一、数据探索和预处理 二、无监督学习-降维和聚类分析 三、聚类效果对比分析 四、小结和建议 备注 分析软件:python 数据已经分享在百度云:客户年消费数据 密码:lehv 该份数据中包含客...

weston_Xiang
05/07
0
0
关于数据建模变量标准化,你想知道的都在这里了

很多人在建模前看到一组变量,都会有这样的一个问题,这些变量需要标准化吗?然后转身问了下身边的同事、教授。哦!原来要做标准化的,接着把所有变量转化成平均数为0,标准差为1的变量,开开...

Yan文怡
01/26
0
0
Python-用sklearn做特征工程

第18章 特征工程(Feature Engineering) 本章从商业数据分析和挖掘的角度详细地介绍了特征工程及其使用的响应方法:数据预处理、特征构造、特征抽取以及特征选择。系统性地说明了用于构建分...

Ben_Chang
05/01
0
0
Apache Mahout中的机器学习算法集

Apache Mahout中的机器学习算法集 Apache Mahout 是 ApacheSoftware Foundation (ASF) 旗下的一个开源项目,提供一些可扩展的机器学习领域经典算法的实现,旨在帮助开发人员更加方便快捷地创...

yuzh
2012/12/27
0
0
最实用的机器学习算法优缺点分析,没有比这篇说得更好了

AI技术年度盛会即将开启!11月8-9日,来自Google、Amazon、微软、Facebook、LinkedIn、阿里巴巴、百度、腾讯、美团、京东、小米、字节跳动、滴滴、商汤、旷视、思必驰、第四范式、云知声等企...

AI科技大本营
10/27
0
0

没有更多内容

加载失败,请刷新页面

加载更多

缓存

并发情况下发生的缓存问题: 缓存一致性: 缓存穿透:是指在高并发场景下,如果某一个key被高并发的访问,缓存没有命中,出于容错性的考虑,会去数据库获取数据,从而导致大量请求访问数据库...

wuyiyi
15分钟前
1
0
eclipse 和idea 快捷键对照

分类 功能点 Eclipse快捷键 IDEA快捷键 搜索 搜索文本 Ctrl + F Ctrl + F Ctrl + R 查找替换 Alt + P/A 逐个/全部替换 Alt + F3 查找当前选中词 继续搜索 Ctrl + K 向前 Ctrl + Shift + K 向...

郭恩洲_OSC博客
15分钟前
2
0
PowerHA IP 替换方式与IP 别名方式的区别

IPAT via replacement (IP替换) The service IP label replaces the boot IP address on the interface. The heartbeat IP alias address remains. IPAT via aliasing (IP别名) The servic......

突突突酱
19分钟前
2
0
Kafka 几个重要的配置总结

注意:配置基于Kafka 0.8.2.1 broker配置 #非负整数,用于唯一标识broker broker.id 0 #kafka持久化数据存储的路径,可以指定多个,以逗号分隔 log.dirs /tmp/kafka-logs #broker接收连接请求...

hblt-j
20分钟前
2
0
开发函数计算的正确姿势 —— 排查超时问题

写不尽的 code,查不完的 bug 通常我们写 bug,哦,不对,写代码时总不会一帆风顺,往往各种 bug 充斥其中,即使测试有较高的代码覆盖率往往也会有漏网之鱼。能写出一些比较隐蔽或者看起来像...

阿里云官方博客
24分钟前
1
0

没有更多内容

加载失败,请刷新页面

加载更多

返回顶部
顶部