ACL-OCL / Base_JSON /prefixE /json /ecnlp /2021.ecnlp-1.9.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2021",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T10:33:37.234089Z"
},
"title": "Identifying Hijacked Reviews",
"authors": [
{
"first": "Monika",
"middle": [],
"last": "Daryani",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Texas A&M University College Station",
"location": {
"postCode": "77843",
"region": "TX"
}
},
"email": "[email protected]"
},
{
"first": "James",
"middle": [],
"last": "Caverlee",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Texas A&M University College Station",
"location": {
"postCode": "77843",
"region": "TX"
}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Fake reviews and review manipulation are growing problems on online marketplaces globally. Review Hijacking is a new review manipulation tactic in which unethical sellers \"hijack\" an existing product page (usually one with many positive reviews), then update the product details like title, photo, and description with those of an entirely different product. With the earlier reviews still attached, the new item appears well-reviewed. However, there are no public datasets of review hijacking and little is known in the literature about this tactic. Hence, this paper proposes a three-part study: (i) we propose a framework to generate synthetically labeled data for review hijacking by swapping products and reviews; (ii) then, we evaluate the potential of both a Twin LSTM network and BERT sequence pair classifier to distinguish legitimate reviews from hijacked ones using this data; and (iii) we then deploy the best performing model on a collection of 31K products (with 6.5 M reviews) in the original data, where we find 100s of previously unknown examples of review hijacking.",
"pdf_parse": {
"paper_id": "2021",
"_pdf_hash": "",
"abstract": [
{
"text": "Fake reviews and review manipulation are growing problems on online marketplaces globally. Review Hijacking is a new review manipulation tactic in which unethical sellers \"hijack\" an existing product page (usually one with many positive reviews), then update the product details like title, photo, and description with those of an entirely different product. With the earlier reviews still attached, the new item appears well-reviewed. However, there are no public datasets of review hijacking and little is known in the literature about this tactic. Hence, this paper proposes a three-part study: (i) we propose a framework to generate synthetically labeled data for review hijacking by swapping products and reviews; (ii) then, we evaluate the potential of both a Twin LSTM network and BERT sequence pair classifier to distinguish legitimate reviews from hijacked ones using this data; and (iii) we then deploy the best performing model on a collection of 31K products (with 6.5 M reviews) in the original data, where we find 100s of previously unknown examples of review hijacking.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Reviews are an essential component of many online marketplaces, helping new consumers assess product quality, legitimacy, and reliability. Recent surveys indicate that an overwhelming majority of people read reviews (Murphy, 2020) . Indeed, 79% of people overall and 91% of people ages 18-34 trust online reviews as much as personal recommendations (Kaemingk, 2020) . Naturally, reviews have become a target of manipulation, misuse, and abuse (Mukherjee et al., 2012) .",
"cite_spans": [
{
"start": 216,
"end": 230,
"text": "(Murphy, 2020)",
"ref_id": "BIBREF13"
},
{
"start": 349,
"end": 365,
"text": "(Kaemingk, 2020)",
"ref_id": "BIBREF8"
},
{
"start": 443,
"end": 467,
"text": "(Mukherjee et al., 2012)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we focus on the problem of review hijacking, a relatively new attack vector and one that has received little, if any, research attention. Review hijacking is a fraud technique wherein a blackhat seller \"hijacks\" a product page that typically has already accumulated many positive reviews and then replaces the hijacked product with a different product (typically one without any positive reviews). The sellers then reap the ratings \"halo\" from consumers who assume the new product is highly rated. This review hijacking (also referred to as review reuse or bait-and-switch reviews) provides the sellers with a shortcut to many undeserved positive reviews. An example is shown in Figure 1 which we discovered in the first week of May 2021. This hair removal product has 4,069 reviews with an average rating of close to five stars. On inspection of the reviews (see Figure 2 ), we find many that refer to other products like dishwasher cleaners and diapers. Also, these reviews are for verified purchases which can provide added weight to the ostensible veracity of the reviews.",
"cite_spans": [],
"ref_spans": [
{
"start": 694,
"end": 702,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 879,
"end": 887,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We have identified at least three different methods that blackhat sellers adopt to conduct review hijacking depending on the particular e-commerce platform. A seller can incrementally change aspects of their own product (like the title, photo, description), resulting in an entirely new product, though still associated with the original reviews. Alternatively, a seller can add his product as a product variation of some other product to aggregate reviews from the former product. One can also merge reviews from some other products to their own by changing country or using some other platformspecific loopholes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "While review hijacking has been recognized in the press and social media as a growing problem, e.g., (Swearingen, 2019; Walsh, 2020; Sterling, 2019; Dascalescu, 2019; Nguyen, 2018; Dzieza, 2019) , there has been no structured research to date on identifying review hijacking. We attribute this to several key challenges:",
"cite_spans": [
{
"start": 101,
"end": 119,
"text": "(Swearingen, 2019;",
"ref_id": "BIBREF20"
},
{
"start": 120,
"end": 132,
"text": "Walsh, 2020;",
"ref_id": "BIBREF21"
},
{
"start": 133,
"end": 148,
"text": "Sterling, 2019;",
"ref_id": "BIBREF19"
},
{
"start": 149,
"end": 166,
"text": "Dascalescu, 2019;",
"ref_id": "BIBREF1"
},
{
"start": 167,
"end": 180,
"text": "Nguyen, 2018;",
"ref_id": "BIBREF14"
},
{
"start": 181,
"end": 194,
"text": "Dzieza, 2019)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 First, there are no standard datasets of review hijacking, nor are there gold labels of known examples. Hence, it is challenging to validate models that aim to uncover review hijacking.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 Second, review hijacking is a targeted attack vector with a skewed distribution, and so there are no simple approaches to find examples. In a preliminary investigation, we manually labeled hundreds of reviews and found fewer than 0.01% reviews that could be considered part of a review hijacking attack.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 Third, many reviews cannot easily be labeled as hijacked or not. For example, reviews like \"Great product! Five stars!\" are generic and could potentially be associated with any product.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 Finally, hijackers may adopt sophisticated techniques to avoid detection. For example, some products may have a mix of legitimate reviews to camouflage the hijacked ones (e.g., by incentivizing reviewers to contribute a review about the hair removal product).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Hence, this paper proposes an initial investigation into the potential of identifying review hijacking. We conduct a three-part study. Due to the challenges of finding high-quality examples of review hijacking, we first propose a framework to generate synthetic examples of review hijacking by swapping products and reviews. We do so both at the inter-category level (where presumably it should be easier to determine if a review is associated with a product) and at the intra-category level (where product similarity within the category may make this more challenging). Over this synthetic dataset, we evaluate the potential of both a Twin LSTM network and BERT sequence pair classifier to distinguish legitimate reviews from hijacked ones. Based on the encouraging results from this experiment, we then deploy the BERT sequence pair classifier algorithm on a real collection of 31K products (with 6.5 M reviews). By averaging the review scores from the classifier for each product, we find that products with an average review score (or suspiciousness score) > 0.5 have 99.95% of the listings containing unrelated or hijacked reviews. These findings suggest the promise of large-scale detection of review hijacking in the wild.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The manipulation of reviews and review platforms has been widely studied, e.g., (G\u00f6ssling et al., 2018; Jindal and Liu, 2007; Kaghazgaran et al., 2017; Mukherjee et al., 2012 Mukherjee et al., , 2013 , though there is little research literature on the problem of review hijacking. Here, we highlight several efforts related to the methods proposed in this paper. Higgins et al. developed models for an essay rating system to detect bad-faith essays by comparing the essay titles to the essay text to determine whether the title and text were in agreement through the use of word similarity (Higgins et al., 2006) . A similar idea motivates our approach that compares product titles/descriptions with review text. Louis and Higgins continued this line of research to determine whether a particular essay was related to the essay prompt or question by expanding short prompts and spell correcting the texts (Louis and Higgins, 2010) . Rei and Cummins extended this work and combined various sentence similarity measures like TF-IDF and Word2Vec embeddings with moderate improvement over Higgins' work (Rei and Cummins, 2016) . Apart from the essay space, Ryu et al. investigated the detection of out-of-domain sentences (Ryu et al., 2017) . They proposed a neural sentence embedding method representing sentences in a low-dimensional continuous vector space that emphasizes aspects in-domain and out-of-domain for a given scenario. In another direction, fake news detection and clickbait detection can be viewed as related tasks. For example, Hanselowski et al.used a BiLSTM model with attention to determine if the headline of a news article agrees, disagrees, or is unrelated to the text as part of a Fake News Challenge (Hanselowski et al., 2018) .",
"cite_spans": [
{
"start": 80,
"end": 103,
"text": "(G\u00f6ssling et al., 2018;",
"ref_id": "BIBREF4"
},
{
"start": 104,
"end": 125,
"text": "Jindal and Liu, 2007;",
"ref_id": "BIBREF7"
},
{
"start": 126,
"end": 151,
"text": "Kaghazgaran et al., 2017;",
"ref_id": "BIBREF9"
},
{
"start": 152,
"end": 174,
"text": "Mukherjee et al., 2012",
"ref_id": "BIBREF11"
},
{
"start": 175,
"end": 199,
"text": "Mukherjee et al., , 2013",
"ref_id": "BIBREF12"
},
{
"start": 590,
"end": 612,
"text": "(Higgins et al., 2006)",
"ref_id": "BIBREF6"
},
{
"start": 905,
"end": 930,
"text": "(Louis and Higgins, 2010)",
"ref_id": "BIBREF10"
},
{
"start": 1099,
"end": 1122,
"text": "(Rei and Cummins, 2016)",
"ref_id": "BIBREF17"
},
{
"start": 1218,
"end": 1236,
"text": "(Ryu et al., 2017)",
"ref_id": "BIBREF18"
},
{
"start": 1721,
"end": 1747,
"text": "(Hanselowski et al., 2018)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In our preliminary investigation, we examined hundreds of reviews from the Amazon dataset provided by McAuley (Ni et al., 2019) . The dataset contains 233.1 million reviews from May 1996 to October 2018, with reviews and product information including title, description, etc. However, we find very few examples of review hijacking. Hence, we concluded that hiring crowd labelers or subject matter experts to label product-review pairs as hijacked or not hijacked might not be fruitful. Instead, we propose a method to generate synthetic examples for studying the potential of models to identify hijacked reviews.",
"cite_spans": [
{
"start": 110,
"end": 127,
"text": "(Ni et al., 2019)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Generating Synthetic Examples of Review Hijacking",
"sec_num": "3"
},
{
"text": "As a first step, we prepared the Amazon dataset. For each product i, we combined the description (product text provided by the seller), title (the name of the product), the brand of the product, and features (product features like color or size) into a single product text P i . We also removed products with fewer than five reviews. For each review j, we combined the reviewText (the text in the review body), the style (which contains some optional product features like color or size), and summary (which is the headline of the review) into a single review text R j .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "3.1"
},
{
"text": "Hence, our goal is to determine if each review j associated with the product i, is actually related to the product or not. If the review is unrelated, we can conclude that there is potential evidence of review hijacking for the product. Of course, there could be other reasons for a review for being unrelated to a product, like an error by the reviewer. We leave this fine-grained determination as future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "3.1"
},
{
"text": "Given these products and reviews, we propose to randomly swap reviews between a pair of distinct products, yielding a collection of unrelated productreview pairs. As a first step, we assume that all reviews are actually related to the associated product. Hence, we have a large set of product-review pairs with the label related (= 0). Of course, we know that our data has some hijacked reviews (on the order of < 0.01%), so we will tolerate some errors in these labels.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Swapping Reviews",
"sec_num": "3.2"
},
{
"text": "By randomly swapping product-review pairs, we get a set of product-review pairs with the label unrelated (= 1). For example, Figure 3 shows a simple example of a basketball and a phone, each with an associated review. We swap reviews among the products to generate unrelated (= 1) labels in addition to the original related (= 0) labels. But, how do we select which products to select for randomly swapping reviews? Randomly selecting products may lead to such a clear mismatch between the review text and the product text that detection would be trivial. On the other hand, selecting closely related products (e.g., by selecting Samsung mobile covers from two different brands) may yield reviews that are essentially undetectable as possible hijacking. Hence, we propose two methods for finding pairs of dissimilar products for re-view swapping.",
"cite_spans": [],
"ref_spans": [
{
"start": 125,
"end": 133,
"text": "Figure 3",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Swapping Reviews",
"sec_num": "3.2"
},
{
"text": "Inter-Category Swapping. The first approach takes a product text P i composed of the title, features, and description from one category (e.g., Beauty, Clothing, Electronics) and the review texts R j of a product in another category for unrelated reviews. For related reviews, we take the original product-review pairs. We obtained a set of \u2248 59k reviews with \u2248 25k unrelated reviews and \u2248 34k related reviews.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Swapping Reviews",
"sec_num": "3.2"
},
{
"text": "Intra-category Swapping. The first approach handles hijacking across categories. For hijacking occurring within a product category, we use Jaccard distance. We converted product titles for each product into TF-IDF feature matrices, found pairwise Jaccard distances between them, and we formed product pairs (A 1 , A 2 ) with Jaccard distance 0. Then, we took the product text P i of one product A 1 , and the review text R j of another product A 2 and labeled this as unrelated. Similarly, we took the product text of A 2 and a review of A 1 as unrelated. For related labels, we took the product text, and the review text of A 1 , and likewise for A 2 to get another set of related data. We obtained a set of \u2248 56k reviews with \u2248 22k unrelated reviews and \u2248 34k related reviews.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Swapping Reviews",
"sec_num": "3.2"
},
{
"text": "Given these synthetic datasets of hijacked reviews, can we detect them? In this section, we report on experiments with two approaches: one based on a Twin LSTM and one based on BERT Sentence Pair Classification.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Identifying Synthetic Examples",
"sec_num": "4"
},
{
"text": "We shuffled the product-review pairs and split them into training, validation and test set in ratio 70:10:20 for both of the datasets. The actual number of reviews in each set depends on the swapping categories and is discussed in Section 3.2 We train on the train set, tune models on the validation set, and have reported results on the test set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Identifying Synthetic Examples",
"sec_num": "4"
},
{
"text": "The first approach adopts a Twin neural network which has shown success in comparing images and text. This network uses the same weights in parallel in tandem on two inputs to return output based on the relation or distance between them (Chicco, 2021) . Concretely, we compare sentence pairs and determine if they are similar or not. We tokenized our inputs and converted them into sequences. Then we used 300-dimensional GloVe (Pennington et al., 2014) embeddings and formed an embedding matrix for our tokens. We get two embedding matrices for both inputs, which we feed into the LSTM network illustrated in Figure 4 . We use twin LSTM networks with two layers of 64 nodes each, with a dropout of 0.01. We calculate the cosine similarity between the two input embeddings and evaluate the performance by computing cross-entropy loss using accuracy and AUC (Area Under Curve). It takes 13 epochs with Adam optimizer and learning rate of 0.00001 to get the result. ",
"cite_spans": [
{
"start": 237,
"end": 251,
"text": "(Chicco, 2021)",
"ref_id": "BIBREF0"
},
{
"start": 428,
"end": 453,
"text": "(Pennington et al., 2014)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [
{
"start": 610,
"end": 618,
"text": "Figure 4",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Twin LSTM Network",
"sec_num": "4.1"
},
{
"text": "The second approach adopts the popular BERT pretrained language model (Devlin et al., 2019) . Since BERT provides a deep bidirectional representation, conditioned on text in both directions, we expect this method to perform better than the twin neural network, which uses GLOVE embeddings. Our model is prepared from the BERT BASE model (bert 12 768 12) from GluonNLP. We add a layer on top for classification, as shown in Figure 5 . We use Adam optimizer for optimizing this classification layer and get results with only 3 epochs. Now we form the sentence pairs for classification. Like the previous method, the first sentence is the product text P i (a concatenation of product title, features, and description). The second sentence is the review text R j (a concatenation of the review summary and review text). We then tokenize the sentences, insert [CLS] at the start, insert [SEP] at the end and between both the sentences, and generate segment ids to specify if a token belongs to the first sentence or the second one. We now run the BERT fine-tuning with these sequences as inputs. We get the output as an unrelated score u(i, j) between 0 and 1. For texts longer than 512 tokens, we truncate and take the first 512 tokens for our model. As 99% of the review texts have fewer than 512 tokens, this choice impacts very few reviews. Table 1 shows the results reported on test data using these two approaches. We see that the Twin LSTM Network provides more than 80% accuracy and high ROC result on both inter-category and intra-category datasets. The BERT-based classifier has more than 90% accuracy and ROC result for both datasets. We see that both methods perform better on the inter-category dataset than the intracategory one. In the inter-category dataset, we obtain unrelated reviews by taking products from one category and review texts from another. Hence, models trained on this dataset can learn product features of one category at a time and develop expertise in that category. The intra-category dataset is more challenging for both approaches. Since products are drawn from the same category, there can be less clarity in distinguishing features of the reviews.",
"cite_spans": [
{
"start": 70,
"end": 91,
"text": "(Devlin et al., 2019)",
"ref_id": "BIBREF2"
},
{
"start": 882,
"end": 887,
"text": "[SEP]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 423,
"end": 431,
"text": "Figure 5",
"ref_id": "FIGREF3"
},
{
"start": 1340,
"end": 1347,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "BERT Sequence Pair Classifier",
"sec_num": "4.2"
},
{
"text": "Paired with this summary table (Table 1) , we show in Figures 6, 7, 8 and 9 the ROC curve for the BERT-based model and Twin LSTM network. We can clearly see that BERT-based model performs better than LSTM. We can also see how both models perform better on the inter-category dataset rather than the intra-category one.",
"cite_spans": [],
"ref_spans": [
{
"start": 31,
"end": 40,
"text": "(Table 1)",
"ref_id": null
},
{
"start": 54,
"end": 75,
"text": "Figures 6, 7, 8 and 9",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.3"
},
{
"text": "In-the-Wild",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Detecting Hijacked Reviews",
"sec_num": "5"
},
{
"text": "Even though encouraging, these results are on synthetic data, and the data itself may contain noisy labels. Hence, we next turn to the task of uncov- ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Detecting Hijacked Reviews",
"sec_num": "5"
},
{
"text": "For this experiment, we used the BERT sequence pair classifier and applied it to a dataset of 31K products (with 6.5 M reviews) with the original product-review pairs intact. These 31K products were held out and not used during the training. For each product-review pair, we take the unrelated score output from the trained BERT-based model as u(i, j). For a product i with n reviews, we calculate an average suspiciousness review score as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Approach and Results",
"sec_num": "5.1"
},
{
"text": "score i = n j=1 u(i, j)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Approach and Results",
"sec_num": "5.1"
},
{
"text": "n Based on this suspiciousness score, we plot the distribution of all 31K products in Figure 10 . Unsurprisingly, the vast majority of products have a very low suspiciousness score. About 99% of products have scored < 0.3, reinforcing our initial assumption about a skewed class distribution. In other words, the vast majority of the reviews on listings seem to be related to the product itself. However, we find many cases of potential review hijacking (see the right side of Figure 10 ), indicating that this targeted attack is indeed a threat to review platforms.",
"cite_spans": [],
"ref_spans": [
{
"start": 86,
"end": 95,
"text": "Figure 10",
"ref_id": "FIGREF0"
},
{
"start": 477,
"end": 486,
"text": "Figure 10",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Approach and Results",
"sec_num": "5.1"
},
{
"text": "We manually checked a sample of 200+ products with a suspiciousness score of > 0.5. We found that all but one of the products contained reviews referring to a different product. While there is uncertainty as to the mechanism leading to an unrelated review, we hypothesize that these are indeed previously unknown cases of review hijacking. And in an encouraging direction, these results indicate the promise of training models over synthetic hijacked reviews for uncovering actual instances.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Approach and Results",
"sec_num": "5.1"
},
{
"text": "In this section, we discuss three sample products three sample products and their distribution of unre-lated scores u(i, j) that are assigned by the BERTbased model. These three products are from the Cellphone & Accessories category. Figure 11 shows the unrelated score distribution for all of the reviews of product-1. Product-1 has an average unrelated review score of 0.9 to 1.0. We can see from the distribution that most reviews have a high unrelated score (> 0.9). We manually inspect these reviews and observe that these reviews are indeed unrelated. Hence, we conclude that this product is an example of review hijacking. Figure 12 shows the unrelated score distribution for product-2. Product-2 has an average review score of 0.0 to 0.1, meaning most of the reviews seem appropriate. We can see from the distribution that most reviews have a low unrelated score (< 0.1), and a few have a high score (> 0.9). We manually inspect the reviews with high unrelated scores (> 0.9) and observe that these reviews are either misclassified by our BERT-based model or do not have enough information to determine the label (e.g., reviews like \"Great Product!\"). Thus, we conclude that this product is not an example of review hijacking.",
"cite_spans": [],
"ref_spans": [
{
"start": 234,
"end": 243,
"text": "Figure 11",
"ref_id": "FIGREF0"
},
{
"start": 630,
"end": 639,
"text": "Figure 12",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Case Study",
"sec_num": "5.2"
},
{
"text": "Finally, Figure 13 shows the unrelated review score distribution for product-3. Product-3 has an average review score of 0.5 to 0.6. We can see from the distribution that about 55% of the reviews have a high unrelated score, while 35% reviews have a low unrelated score. We manually inspect reviews with high unrelated scores (> 0.9) and observe that these reviews are indeed unrelated to the product. We also inspect the reviews with low unrelated scores (< 0.1 and < 0.2) and observe that most are related to the product. As this product has a mix of related and unrelated reviews, we hypothesize that it is also an example of review hijacking containing some related reviews.",
"cite_spans": [],
"ref_spans": [
{
"start": 9,
"end": 18,
"text": "Figure 13",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Case Study",
"sec_num": "5.2"
},
{
"text": "This paper has examined the challenge of identifying hijacked reviews. Since we know little about these hijacked reviews, we first proposed to generate synthetic examples by swapping the reviews of a product with reviews on an unrelated product. We then tested the viability of a Twin LSTM network and BERT sentence pair classifier to uncover these unrelated reviews. Both approaches provided excellent results on synthetic data, but do they actually identify hijacked reviews in the wild? Our preliminary investigation showed that a model trained over synthetic data could detect many examples of previously unknown cases of review hijacking. Our method also has some limitations. First, the major drawback occurs because the data is labeled synthetically. Hence, there is no way to find the actual recall for our approach. Calculating recall requires manual labeling of all product-review pairs, which is an expensive process. Second, our method is dependent on the accuracy of labeling methods. For the intra-category case, our method cannot detect products hijacked with similar wording in the same category since their Jaccard distance is low. For example, if there are two products, \"iPhone X\" and \"iPhone 5C cover\", the products will have a low Jaccard distance, and the reviews hijacked among them cannot be labeled correctly. Therefore, our ML model can also not learn this kind of review hijacking. Third, generic reviews like \"Good product!\" and \"Product shipped fast\" were labeled hijacked and not hijacked depending on what product they belonged to. Ideally, we would want to label all of them as not hijacked. This random labeling adds to the noise in the labels.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion, Limitations and Next Steps",
"sec_num": "6"
},
{
"text": "In our continuing work, we are interested in two main directions: data and methods. From a data per-spective, we are investigating more refined methods to generate synthetic labels. Can we couple crowd labelers with our swapping approach to construct better product-review pairs? We are also interested in updating the data itself. Our dataset covers reviews up to 2018, though many media reports of review hijacking were not until 2019. There could have been a rise in review hijacking that is not as prominent in our data. From a methods perspective, we have focused purely on text-based signals. Incorporating image-based features like from the product itself and user-submitted images could help identify examples of review hijacking. We are also interested in adopting recent advances in pre-trained language models like T5, DeBERTa, and RoBERTa. We are also focusing on using ecommerce specific text (like product catalog data) to instill domain-specific knowledge during the pretraining of language models versus BooksCorpus and English Wikipedia used in BERT.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion, Limitations and Next Steps",
"sec_num": "6"
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Siamese Neural Networks: An Overview",
"authors": [
{
"first": "Davide",
"middle": [],
"last": "Chicco",
"suffix": ""
}
],
"year": 2021,
"venue": "",
"volume": "",
"issue": "",
"pages": "73--94",
"other_ids": {
"DOI": [
"10.1007/978-1-0716-0826-5_3"
]
},
"num": null,
"urls": [],
"raw_text": "Davide Chicco. 2021. Siamese Neural Networks: An Overview, pages 73-94. Springer US, New York, NY.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Swapped product listings on amazon -web applications stack exchange. Webapps Stackexchange",
"authors": [
{
"first": "Dan",
"middle": [],
"last": "Dascalescu",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dan Dascalescu. 2019. Swapped product listings on amazon -web applications stack exchange. We- bapps Stackexchange.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding",
"authors": [
{
"first": "Jacob",
"middle": [],
"last": "Devlin",
"suffix": ""
},
{
"first": "Ming-Wei",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Toutanova",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "1",
"issue": "",
"pages": "4171--4186",
"other_ids": {
"DOI": [
"10.18653/v1/N19-1423"
]
},
"num": null,
"urls": [],
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Even amazon's own products are getting hijacked by imposter sellers. The Verge",
"authors": [
{
"first": "Josh",
"middle": [],
"last": "Dzieza",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Josh Dzieza. 2019. Even amazon's own products are getting hijacked by imposter sellers. The Verge.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "The manager's dilemma: a conceptualization of online review manipulation strategies",
"authors": [
{
"first": "Stefan",
"middle": [],
"last": "G\u00f6ssling",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Hall",
"suffix": ""
},
{
"first": "Ann-Christin",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "Current Issues in Tourism",
"volume": "21",
"issue": "5",
"pages": "484--503",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stefan G\u00f6ssling, C Michael Hall, and Ann-Christin An- dersson. 2018. The manager's dilemma: a concep- tualization of online review manipulation strategies. Current Issues in Tourism, 21(5):484-503.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A retrospective analysis of the fake news challenge stance-detection task",
"authors": [
{
"first": "Andreas",
"middle": [],
"last": "Hanselowski",
"suffix": ""
},
{
"first": "Pvs",
"middle": [],
"last": "Avinesh",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Schiller",
"suffix": ""
},
{
"first": "Felix",
"middle": [],
"last": "Caspelherr",
"suffix": ""
},
{
"first": "Debanjan",
"middle": [],
"last": "Chaudhuri",
"suffix": ""
},
{
"first": "Christian",
"middle": [
"M"
],
"last": "Meyer",
"suffix": ""
},
{
"first": "Iryna",
"middle": [],
"last": "Gurevych",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 27th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1859--1874",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Andreas Hanselowski, Avinesh PVS, Benjamin Schiller, Felix Caspelherr, Debanjan Chaudhuri, Christian M. Meyer, and Iryna Gurevych. 2018. A retrospective analysis of the fake news challenge stance-detection task. In Proceedings of the 27th International Conference on Computational Lin- guistics, pages 1859-1874, Santa Fe, New Mexico, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Identifying off-topic student essays without topicspecific training data",
"authors": [
{
"first": "Derrick",
"middle": [],
"last": "Higgins",
"suffix": ""
},
{
"first": "Jill",
"middle": [],
"last": "Burstein",
"suffix": ""
},
{
"first": "Yigal",
"middle": [],
"last": "Attali",
"suffix": ""
}
],
"year": 2006,
"venue": "Natural Language Engineering",
"volume": "12",
"issue": "2",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Derrick Higgins, Jill Burstein, and Yigal Attali. 2006. Identifying off-topic student essays without topic- specific training data. Natural Language Engineer- ing, 12(2):145.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Review spam detection",
"authors": [
{
"first": "Nitin",
"middle": [],
"last": "Jindal",
"suffix": ""
},
{
"first": "Bing",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 16th International Conference on World Wide Web, WWW '07",
"volume": "",
"issue": "",
"pages": "1189--1190",
"other_ids": {
"DOI": [
"10.1145/1242572.1242759"
]
},
"num": null,
"urls": [],
"raw_text": "Nitin Jindal and Bing Liu. 2007. Review spam de- tection. In Proceedings of the 16th International Conference on World Wide Web, WWW '07, page 1189-1190, New York, NY, USA. Association for Computing Machinery.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Online reviews statistics to know in 2021",
"authors": [
{
"first": "Diana",
"middle": [],
"last": "Kaemingk",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Diana Kaemingk. 2020. Online reviews statistics to know in 2021. Qualtrics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Behavioral analysis of review fraud: Linking malicious crowdsourcing to amazon and beyond",
"authors": [
{
"first": "Parisa",
"middle": [],
"last": "Kaghazgaran",
"suffix": ""
},
{
"first": "James",
"middle": [],
"last": "Caverlee",
"suffix": ""
},
{
"first": "Majid",
"middle": [],
"last": "Alfifi",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the International AAAI Conference on Web and Social Media",
"volume": "11",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Parisa Kaghazgaran, James Caverlee, and Majid Alfifi. 2017. Behavioral analysis of review fraud: Linking malicious crowdsourcing to amazon and beyond. In Proceedings of the International AAAI Conference on Web and Social Media, volume 11.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Off-topic essay detection using short prompt texts",
"authors": [
{
"first": "Annie",
"middle": [],
"last": "Louis",
"suffix": ""
},
{
"first": "Derrick",
"middle": [],
"last": "Higgins",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the NAACL HLT 2010 Fifth Workshop on Innovative Use of NLP for Building Educational Applications, IUNLPBEA '10",
"volume": "",
"issue": "",
"pages": "92--95",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Annie Louis and Derrick Higgins. 2010. Off-topic es- say detection using short prompt texts. In Proceed- ings of the NAACL HLT 2010 Fifth Workshop on In- novative Use of NLP for Building Educational Ap- plications, IUNLPBEA '10, page 92-95, USA. As- sociation for Computational Linguistics.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Spotting fake reviewer groups in consumer reviews",
"authors": [
{
"first": "Arjun",
"middle": [],
"last": "Mukherjee",
"suffix": ""
},
{
"first": "Bing",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Natalie",
"middle": [],
"last": "Glance",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 21st international conference on World Wide Web",
"volume": "",
"issue": "",
"pages": "191--200",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Arjun Mukherjee, Bing Liu, and Natalie Glance. 2012. Spotting fake reviewer groups in consumer reviews. In Proceedings of the 21st international conference on World Wide Web, pages 191-200.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Fake review detection: Classification and analysis of real and pseudo reviews",
"authors": [
{
"first": "Arjun",
"middle": [],
"last": "Mukherjee",
"suffix": ""
},
{
"first": "Vivek",
"middle": [],
"last": "Venkataraman",
"suffix": ""
},
{
"first": "Bing",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Natalie",
"middle": [],
"last": "Glance",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Arjun Mukherjee, Vivek Venkataraman, Bing Liu, Na- talie Glance, et al. 2013. Fake review detection: Classification and analysis of real and pseudo re- views. UIC-CS-03-2013. Technical Report.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Local consumer review survey 2020",
"authors": [
{
"first": "Rosie",
"middle": [],
"last": "Murphy",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rosie Murphy. 2020. Local consumer review survey 2020. Bright Local.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Here's another kind of review fraud happening on amazon",
"authors": [
{
"first": "Nicole",
"middle": [],
"last": "Nguyen",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nicole Nguyen. 2018. Here's another kind of review fraud happening on amazon. BuzzFeed News.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Justifying recommendations using distantly-labeled reviews and fine-grained aspects",
"authors": [
{
"first": "Jianmo",
"middle": [],
"last": "Ni",
"suffix": ""
},
{
"first": "Jiacheng",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Julian",
"middle": [],
"last": "Mcauley",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
"volume": "",
"issue": "",
"pages": "188--197",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 188-197.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Glove: Global vectors for word representation",
"authors": [
{
"first": "Jeffrey",
"middle": [],
"last": "Pennington",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "Christopher D",
"middle": [],
"last": "Manning",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "1532--1543",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Sentence similarity measures for fine-grained estimation of topical relevance in learner essays",
"authors": [
{
"first": "Marek",
"middle": [],
"last": "Rei",
"suffix": ""
},
{
"first": "Ronan",
"middle": [],
"last": "Cummins",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 11th Workshop on Innovative Use of NLP for Building Educational Applications",
"volume": "",
"issue": "",
"pages": "283--288",
"other_ids": {
"DOI": [
"10.18653/v1/W16-0533"
]
},
"num": null,
"urls": [],
"raw_text": "Marek Rei and Ronan Cummins. 2016. Sentence simi- larity measures for fine-grained estimation of topical relevance in learner essays. In Proceedings of the 11th Workshop on Innovative Use of NLP for Build- ing Educational Applications, pages 283-288, San Diego, CA. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Neural sentence embedding using only in-domain sentences for outof-domain sentence detection in dialog systems",
"authors": [
{
"first": "Seonghan",
"middle": [],
"last": "Ryu",
"suffix": ""
},
{
"first": "Seokhwan",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "Junhwi",
"middle": [],
"last": "Choi",
"suffix": ""
},
{
"first": "Hwanjo",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Gary Geunbae",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 2017,
"venue": "Pattern Recognition Letters",
"volume": "88",
"issue": "",
"pages": "26--32",
"other_ids": {
"DOI": [
"10.1016/j.patrec.2017.01.008"
]
},
"num": null,
"urls": [],
"raw_text": "Seonghan Ryu, Seokhwan Kim, Junhwi Choi, Hwanjo Yu, and Gary Geunbae Lee. 2017. Neural sentence embedding using only in-domain sentences for out- of-domain sentence detection in dialog systems. Pat- tern Recognition Letters, 88:26-32.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Review fraud: Hijacked amazon reviews a big problem says consumer reports",
"authors": [
{
"first": "Greg",
"middle": [],
"last": "Sterling",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Greg Sterling. 2019. Review fraud: Hijacked ama- zon reviews a big problem says consumer reports. Search Engine Land.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Hijacked reviews on amazon can trick shoppers",
"authors": [
{
"first": "Jake",
"middle": [],
"last": "Swearingen",
"suffix": ""
}
],
"year": 2019,
"venue": "Consumer Reports",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jake Swearingen. 2019. Hijacked reviews on amazon can trick shoppers. Consumer Reports.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "How ebay's review system is promoting fake, counterfeit and even dangerous products",
"authors": [
{
"first": "Hannah",
"middle": [],
"last": "Walsh",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hannah Walsh. 2020. How ebay's review system is pro- moting fake, counterfeit and even dangerous prod- ucts. Which.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "An example of review hijacking on Amazon (May 7, 2021) Figure 2: Hijacked reviews associated with the hair removal product inFigure 1"
},
"FIGREF1": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Generation of Synthetic Label and Data by Swapping Reviews among Dissimilar Products"
},
"FIGREF2": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Twin LSTM Network"
},
"FIGREF3": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "BERT Sequence Pair Classifier"
},
"FIGREF4": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "ROC curve for Twin LSTM network run on Intra-category data (Jaccard distance) ROC curve for BERT seq. pair classifier run on Intra-category data (Jaccard distance) ROC curve for Twin LSTM network run on Inter-category data"
},
"FIGREF5": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "ROC curve for BERT seq. pair classifier run on Inter-category data"
},
"FIGREF6": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Average Review Score vs. Number of Products Figure 11: Unrelated Review Score Distribution for Product-1 showing predominantly unrelated reviews Figure 12: Unrelated Review Score Distribution for Product-2 showing predominantly related reviews"
},
"FIGREF7": {
"uris": null,
"type_str": "figure",
"num": null,
"text": "Unrelated Review Score Distribution for Product-3 showing a roughly equal mix of related and unrelated reviews"
}
}
}
}