乘风原创程序

  • Golang爬虫及正则表达式的实现示例
  • 2021/12/23 10:54:15
  • 最近学习go,爬取网站数据用到正则表达式,做个总结;

    go中正则表达式采用re2语法(具体是啥咱也不清楚);

    字符

    • .?——匹配任意字符 e.g: abc. 结果: abcd,abcx,abc9;
    • [] ——匹配括号中任意一个字符 e.g: [abc]d 结果:ad,cd,1d;
    • - ——[-]中表示范围 e.g: [a-za-z0-9];
    • ^?——[^]中表示除括号中的任意字符 e.g:[^xy]a 结果:aa,da,不能为xa,ya;

    数量限定

    • ??——前面单元匹配0或1次;
    • + ——前面单元匹配1或多次;
    • * ——前面单元匹配0或多次;
    • {,}?——显示个数上下线;e.g : ip地址——[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3};

    其他

    • \?——转义字符;
    • |?——条件或;
    • ()?——组成单元 如果字符串本身有括号"[(] aaa. [)]" ;

    方法

    //参数正则字符串,返回值*regexp
    str := regexp.mustcompile(string) 
    //参数要查找的数据,查找次数-1为全局,返回值二维数组,查找出的字符串+正则字符串
    var result [][]string = str.findallstringsubmatch(data, -1)
    

    爬虫

    爬取博客园所有文章阅读量,评论,推荐;

    package main
    
    import (
     "fmt"
     "io"
     "net/http"
     "regexp"
     "strconv"
    )
    
    var readcount int = 0
    var commentcount int = 0
    var diggcount int = 0
    
    //http读取网页数据写入result返回
    func httpget(url string) (result string, err error) {
     resp, err1 := http.get(url)
     if err1 != nil {
      err = err1
      return
     }
     defer resp.body.close()
    
     buf := make([]byte, 4096)
    
     for {
      n, err2 := resp.body.read(buf)
      //fmt.println(url)
      if n == 0 {
       break
      }
      if err2 != nil && err2 != io.eof {
       err = err2
       return
      }
      result += string(buf[:n])
     }
     return result, err
    }
    
    //横向纵向爬取文章标题数据,并累计数值
    func spiderpagedb(index int, page chan int) {
     url := "https://www.cnblogs.com/littleperilla/default.html?page=" + strconv.itoa(index)
        
     result, err := httpget(url)
    
     if err != nil {
      fmt.println("httpget err:", err)
      return
     }
    
     str := regexp.mustcompile("post-view-count\">阅读[(](?s:(.*?))[)]</span>")
     alls := str.findallstringsubmatch(result, -1)
     for _, j := range alls {
      temp, err := strconv.atoi(j[1])
      if err != nil {
       fmt.println("string2int err:", err)
      }
      readcount += temp
     }
    
     str = regexp.mustcompile("post-comment-count\">评论[(](?s:(.*?))[)]</span>")
     alls = str.findallstringsubmatch(result, -1)
     for _, j := range alls {
      temp, err := strconv.atoi(j[1])
      if err != nil {
       fmt.println("string2int err:", err)
      }
      commentcount += temp
     }
    
     str = regexp.mustcompile("post-digg-count\">推荐[(](?s:(.*?))[)]</span>")
     alls = str.findallstringsubmatch(result, -1)
     for _, j := range alls {
      temp, err := strconv.atoi(j[1])
      if err != nil {
       fmt.println("string2int err:", err)
      }
      diggcount += temp
     }
    
     page <- index
    }
    
    //主要工作方法
    func working(start, end int) {
     fmt.printf("正在从%d到%d爬取中...\n", start, end)
    
     //channel通知主线程是否所有go都结束
     page := make(chan int)
    
     //多线程go程同时爬取
     for i := start; i <= end; i++ {
      go spiderpagedb(i, page)
     }
    
     for i := start; i <= end; i++ {
      fmt.printf("拉取到%d页\n", <-page)
     }
    }
    
    //入口函数
    func main() {
     //输入爬取的起始页
     var start, end int
     fmt.print("startpos:")
     fmt.scan(&start)
     fmt.print("endpos:")
     fmt.scan(&end)
    
     working(start, end)
    
     fmt.println("阅读:", readcount)
     fmt.println("评论:", commentcount)
     fmt.println("推荐:", diggcount)
    }
    
    

    补充:正则表达式加golang爬虫爬取经典案例豆瓣top250

    package main
    
    import (
    	"fmt"
    	"io"
    	"net/http"
    	"os"
    	"regexp"
    	"strconv"
    )
    
    func savtofile(index int, filmname, filmscore [][]string) {
    	f, err := os.create("第" + strconv.itoa(index) + "页.txt")
    	if err != nil {
    		fmt.println("os create err", err)
    		return
    	}
    	defer f.close()
    	// 查出有多少条
    	n := len(filmname)
    	// 先写抬头 名称     评分
    	f.writestring("电影名称" + "\t\t\t" + "评分" + "\n")
    	for i := 0; i < n; i++ {
    		f.writestring(filmname[i][1] + "\t\t\t" + filmscore[i][1] + "\n")
    	}
    }
    
    func main() {
    	var start, end int
    	fmt.print("请输入要爬取的起始页")
    	fmt.scan(&start)
    	fmt.print("请输入要爬取的终止页")
    	fmt.scan(&end)
    	working(start, end)
    }
    
    func working(start int, end int) {
    	fmt.printf("正在爬取%d到%d页", start, end)
    	for i := start; i <= end; i++ {
    		spiderpage(i)
    	}
    }
    
    // 爬取一个豆瓣页面数据信息保存到文档
    func spiderpage(index int) {
    	// 获取url
    	url := "https://movie.douban.com/top250?start=" + strconv.itoa((index-1)*25) + "&filter="
    
    	// 爬取url对应页面
    	result, err := httpget(url)
    	if err != nil {
    		fmt.println("httpget err", err)
    		return
    	}
    	//fmt.println("result=", result)
    	// 解析,编译正则表达式  ---电影名称
    	ret := regexp.mustcompile(`<img width="100" alt="(?s:(.*?))"`)
    	filmname := ret.findallstringsubmatch(result, -1)
    	for _, name := range filmname {
    		fmt.println("name", name[1])
    	}
    
    	ret2 := regexp.mustcompile(`<span class="rating_num" property="v:average">(?s:(.*?))<`)
    	filmscore := ret2.findallstringsubmatch(result, -1)
    	for _, score := range filmscore {
    		fmt.println("score", score[1])
    	}
    
    	savtofile(index, filmname, filmscore)
    
    }
    
    // 爬取指定url页面,返回result
    func httpget(url string) (result string, err error) {
    	req, _ := http.newrequest("get", url, nil)
    	// 设置头部信息
    	req.header.set("user-agent", "mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/79.0.3945.130 safari/537.36 opr/66.0.3515.115")
    	resp, err1 := (&http.client{}).do(req)
    	//resp, err1 := http.get(url)  //此方法已经被豆瓣视为爬虫,返回状态吗为418,所以必须伪装头部用上述办法
    	if err1 != nil {
    		err = err1
    		return
    	}
    	defer resp.body.close()
    
    	buf := make([]byte, 4096)
    
    	//循环爬取整页数据
    	for {
    		n, err2 := resp.body.read(buf)
    		if n == 0 {
    			break
    		}
    		if err2 != nil && err2 != io.eof {
    			err = err2
    			return
    		}
    		result += string(buf[:n])
    	}
    
    	return
    
    }

    到此这篇关于golang爬虫及正则表达式的实现示例的文章就介绍到这了,更多相关golang爬虫及正则表达式 内容请搜索本教程网以前的文章或继续浏览下面的相关文章希望大家以后多多支持本教程网!