File size: 3,383 Bytes
ca6b271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
//! The `wikipedia` module handles the scraping of results from wikipedia
//! with user provided query and with a page number if provided.

use std::collections::HashMap;

use reqwest::header::HeaderMap;
use reqwest::Client;
use scraper::Html;

use crate::models::aggregation_models::SearchResult;

use crate::models::engine_models::{EngineError, SearchEngine};

use error_stack::{Report, Result, ResultExt};

use super::common::build_query;
use super::search_result_parser::SearchResultParser;

/// A new Wikipedia engine type defined in-order to implement the `SearchEngine` trait which allows to
/// reduce code duplication as well as allows to create vector of different search engines easily.
pub struct Wikipedia {
    /// The parser, used to interpret the search result.
    parser: SearchResultParser,
    /// The id of the engine, equals to 'wikipedia-' + language
    id: String,
    /// The host where wikipedia can be accessed.
    host: String,
}

impl Wikipedia {
    /// Creates the Wikipedia parser.
    pub fn new(language: &str) -> Result<Self, EngineError> {
        let host = format!("https://{}.wikipedia.org", &language);
        let id = format!("wikipedia-{}", &language);
        Ok(Self {
            parser: SearchResultParser::new(
                "p.mw-search-nonefound",
                ".mw-search-results li.mw-search-result",
                ".mw-search-result-heading a",
                ".mw-search-result-heading a",
                ".searchresult",
            )?,
            id,
            host,
        })
    }
}

#[async_trait::async_trait]
impl SearchEngine for Wikipedia {
    async fn results(
        &self,
        query: &str,
        page: u32,
        user_agent: &str,
        client: &Client,
        _safe_search: u8,
    ) -> Result<Vec<(String, SearchResult)>, EngineError> {
        let header_map = HeaderMap::try_from(&HashMap::from([
            ("User-Agent".to_string(), user_agent.to_string()),
            ("Referer".to_string(), self.host.to_string()),
        ]))
        .change_context(EngineError::UnexpectedError)?;

        let offset = (page * 20).to_string();
        let query_params: Vec<(&str, &str)> = vec![
            ("limit", "20"),
            ("offset", &offset),
            ("profile", "default"),
            ("search", query),
            ("title", "Special:Search"),
            ("ns0", "1"),
        ];

        let query_params_string = build_query(&query_params);

        let url: String = format!("{}/w/index.php?{}", self.host, query_params_string);

        let document: Html = Html::parse_document(
            &Wikipedia::fetch_html_from_upstream(self, &url, header_map, client).await?,
        );

        if self.parser.parse_for_no_results(&document).next().is_some() {
            return Err(Report::new(EngineError::EmptyResultSet));
        }

        // scrape all the results from the html
        self.parser
            .parse_for_results(&document, |title, url, desc| {
                let found_url = url.attr("href");
                found_url.map(|relative_url| {
                    SearchResult::new(
                        title.inner_html().trim(),
                        &format!("{}{relative_url}", self.host),
                        desc.inner_html().trim(),
                        &[&self.id],
                    )
                })
            })
    }
}