码迷,mamicode.com
首页 > 其他好文 > 详细

第3集:Scala函数编程彻底精通

时间:2016-01-10 22:48:00      阅读:329      评论:0      收藏:0      [点我收藏+]

标签:


定义函数
scala> def fun1(name: String) {println(name)}
fun1: (name: String)Unit

scala> var fun1_v=fun1 _//加空格加下划线
fun1_v: String => Unit = <function1>

调用结果
scala> fun1("Spark")
Spark

scala> fun1_v("Hadoop")
Hadoop

匿名函数
scala> val fun2=(content: String) => println(content)
fun2: String => Unit = <function1>

scala> fun2("HBase")
HBase

高级函数
scala> val hiScala=(content: String) => println(content)
hiScala: String => Unit = <function1>

把匿名函数当作参数传入
scala> def bigData(func: (String) => Unit, content: String){func(content)}
bigData: (func: String => Unit, content: String)Unit

scala> bigData(hiScala,"Hadoop")
Hadoop


scala> val array = Array(1,2,3,4,5,6,7,8,9)
array: Array[Int] = Array(1, 2, 3, 4, 5, 6, 7, 8, 9)

scala> array.map(item => 2*item)
res4: Array[Int] = Array(2, 4, 6, 8, 10, 12, 14, 16, 18)

scala> array.map(item => println(item))
1
2
3
4
5
6
7
8
9
res5: Array[Unit] = Array((), (), (), (), (), (), (), (), ())

函数的返回值是个函数
scala> def func_Return(content: String)=(message: String) => println(message)
func_Return: (content: String)String => Unit

scala> func_Return("Spark")
res6: String => Unit = <function1>

scala> def func_Return(content: String) = (message: String) => println(content +" " + message)
func_Return: (content: String)String => Unit

scala> val returned = func_Return("Spark")
returned: String => Unit = <function1>


高级函数具有类型推断的功能
scala> def spark(func: (String) => Unit, name: String){func(name)}
spark: (func: String => Unit, name: String)Unit

scala> spark((name) => println(name),"Scala")
Scala

scala> spark(name => println, "Scala")


scala> spark(name => println(name), "Scala")
Scala

scala> array.map(2*_)
res10: Array[Int] = Array(2, 4, 6, 8, 10, 12, 14, 16, 18)

scala> array.map(2*_).foreach(println(_))
2
4
6
8
10
12
14
16
18

scala> array.map(2*_).foreach(println)
2
4
6
8
10
12
14
16
18

scala> array.map(2*_).foreach(println _)
2
4
6
8
10
12
14
16
18

scala> array.map(2*_).filter(_>10).foreach(println)
12
14
16
18

闭包就是当变量超出函数范围之外还能够访问
scala> def scala(content: String) = (message: String) => println(content+" "+message)
scala: (content: String)String => Unit

scala> val funcResult=scala("Spark")
funcResult: String => Unit = <function1>

scala> funcResult("Flink")
Spark Flink

柯里化函数
scala> def sum(x: Int,y: Int)=x+y
sum: (x: Int, y: Int)Int

scala> sum(1,2)
res16: Int = 3

scala> def sum_Curring(x: Int)=(y: Int) => x+y
sum_Curring: (x: Int)Int => Int

scala> sum_Curring(1)(2)
res17: Int = 3

scala> def sum_Curring_Better(x:Int)(y:Int)=x+y
sum_Curring_Better: (x: Int)(y: Int)Int

scala> sum_Curring_Better(1)(2)
res18: Int = 3

scala> (1 to 100).reduceLeft(_+_)
res19: Int = 5050

scala> val list= List("Scala","Spark","Flink")
list: List[String] = List(Scala, Spark, Flink)

scala> val cal=list.map("The content is " + _)
cal: List[String] = List(The content is Scala, The content is Spark, The content is Flink)

scala> list.map(println)
Scala
Spark
Flink
res20: List[Unit] = List((), (), ())

scala> list.zip(List(10,6,5))
res22: List[(String, Int)] = List((Scala,10), (Spark,6), (Flink,5))

scala> cal.flatMap(_.split(" "))
res23: List[String] = List(The, content, is, Scala, The, content, is, Spark, The, content, is, Flink)

scala> cal.flatMap(_.split(" ")).foreach(println)
The
content
is
Scala
The
content
is
Spark
The
content
is
Flink

作业:用scala统计一个文件夹下面所有文件的单词出现的总次数
package scala

object WordCounter {
//导入jar包
import scala.io.Source
import java.io._
//存储单词和个数
var map = Map.empty[String, Int]

def main(args: Array[String]): Unit = {
scanDir(new File("D:/workspace"))
map.foreach(f =>
println(f)
)
}

def scanDir(dir: File): Unit = {
dir.listFiles.foreach { file =>
if(file.isFile()){
readFile(file)
println(file)
}
}
}

def readFile(file: File){
val f = Source.fromFile(file)
for (line <- f.getLines()){
count(line)
}
}

def count(line: String) = {
for(word <- line.split("[,:.!\\s?*\\/-=+]()><")){
if(map.contains(word))
map += (word -> (map(word)+1))
else
map += (word -> 1)
}
}
}

var dir = newFile("E:\\study\\spark-1.6.0\\spark-1.6.0\\data\\mllib\\als")
var num:Int = 0
var keyword="8"
subdirs(dir)
def subdirs(dir: File): Iterator[File] = {
val irectory =dir.listFiles.filter(_.isDirectory)
val files = dir.listFiles.toIterator
for (file <- files)
{
vallines = scala.io.Source.fromFile(file).getLines().toArray
for (line <- lines)
{
var length = line.split(keyword).length
num = num +length-1
}
}
files ++irectory.toIterator.flatMap(subdirs _)

}

println("num:" + num)
}

}

 

第3集:Scala函数编程彻底精通

标签:

原文地址:http://www.cnblogs.com/jkge/p/5119701.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!