Skip to content

Commit

Permalink
Resolved Merge Conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
Shubh-Nisar committed Nov 27, 2023
2 parents 7bec750 + 193d9b3 commit 923b2b6
Show file tree
Hide file tree
Showing 7 changed files with 65 additions and 9 deletions.
8 changes: 8 additions & 0 deletions logger.txt
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ amazon query:Iphone
amazon query:Iphone
amazon query:Iphone
amazon query:Iphone
<<<<<<< HEAD
amazon query:iPad
amazon query:iPad
amazon query:iPad
Expand All @@ -316,3 +317,10 @@ amazon query:iPad
amazon query:iPad
amazon query:iPad
amazon query:iPad
=======
amazon query:iphone
amazon query:iphone
amazon query:iphone
amazon query:iphone
amazon query:iphone
>>>>>>> 193d9b395c989284203bd4d60446536812830a5e
20 changes: 14 additions & 6 deletions slash_user_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@
import sys
sys.path.append('../')
import streamlit as st
from src.main_streamlit import search_items_API
from src.main_streamlit import search_items_API, rakuten
from src.url_shortener import shorten_url
import src.configs as conf
import pandas as pd
import re
import streamlit
Expand Down Expand Up @@ -69,7 +70,7 @@ def split_description(description):
title = """
<div class="header">
<div class="t">SHOPSYNC</div>
<p>Get the Best Deals At Ease</p>
<p>Discover unparalleled savings effortlessly.</p>
</div>
<style>
.header {
Expand Down Expand Up @@ -146,7 +147,7 @@ def highlight_row(dataframe):
# Display Image
# st.image("assets/ShopSync_p.png")

st.write("ShopSync is an android application, website and a command line tool that scrapes the most popular e-commerce websites to get the best deals on the searched items across these websites.")
st.write("ShopSync is a versatile platform comprising an Android application, a user-friendly website, and a command line tool. It adeptly scours the leading e-commerce websites, extracting optimal deals for the searched items across this diverse range of platforms.")
product = st.text_input('Enter the product item name')
website = st.selectbox('Select the website',('All','Walmart', 'Amazon', 'Ebay', 'BestBuy', 'Target', 'Costco', 'All'))

Expand All @@ -161,12 +162,15 @@ def highlight_row(dataframe):
}
# Pass product and website to method
if st.button('Search') and product and website:
rakuten_discount = rakuten()
company_list = conf.getCompanies()
results = search_items_API(website_dict[website], product)
# Use st.columns based on return values
description = []
url = []
price = []
site = []
rakuten = []

if results is not None and isinstance(results, list):
for result in results:
Expand All @@ -182,10 +186,14 @@ def highlight_row(dataframe):
else:
print("Unable to extract a valid price from the string")
site.append(result['website'])

for i in range(len(site)):
k = company_list.index(site[i])
rakuten.append(str(rakuten_discount[k]) + "%")

if len(price):

dataframe = pd.DataFrame({'Description': description,'Price':price,'Link':url,'Website':site})
dataframe = pd.DataFrame({'Description': description,'Price':price,'Link':url,'Website':site, 'Rakuten':rakuten})
dataframe['Description'] = dataframe['Description'].apply(split_description)
dataframe['Product'] = dataframe['Description'].str.split().str[:3].str.join(' ')
dataframe['Product'] = dataframe['Product'].str.replace('[,"]', '', regex=True)
Expand Down Expand Up @@ -264,9 +272,9 @@ def add_http_if_not_present(url):
}
</style>
<div class="footer">
<p>Developed with ❤ by <a style='display: block; text-align: center;' href="https://github.com/Kashika08/ShopSync.git" target="_blank">ShopSync</a></p>
<p>Developed with ❤ by <a style='display: block; text-align: center;' href="https://github.com/Neel317/ShopSync" target="_blank">ShopSync</a></p>
<p><a style='display: block; text-align: center;' href="https://github.com/Kashika08/CSC510_ShopSync_Group40/blob/main/LICENSE" target="_blank">MIT License Copyright (c) 2023</a></p>
<p>Contributors: Kashika, Riya, Sinchana, Sweta</p>
<p>Contributors: Neel, Shubh, Tanay, Tanishq</p>
</div>
"""
st.markdown(footer,unsafe_allow_html=True)
Expand Down
10 changes: 10 additions & 0 deletions src/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,3 +153,13 @@ def scrape_ebay(query):


CONFIGS = [WALMART, AMAZON, COSTCO, BESTBUY]

def getRakutenList():
cashback = [10, 0, 1, 4, 1, 0]
return cashback

def getCompanies():
companies = ["walmart", "amazon", "ebay", "bestbuy", "target", "costco"]
return companies


6 changes: 3 additions & 3 deletions src/configs_mt.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@
BESTBUY = {
'site': 'bestbuy',
'url': 'https://www.bestbuy.com/site/searchpage.jsp?st=',
'item_component': 'li',
'item_component': 'div',
'item_indicator': {
'class': 'sku-item'
'class': 'embedded-sku'
},
'title_indicator': 'h4.sku-header a',
'title_indicator': 'h4.sku-title a',
'price_indicator': 'div.priceView-customer-price span',
'link_indicator': 'a.image-link',
}
Expand Down
3 changes: 3 additions & 0 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
from pydantic import BaseModel
import csv
import nest_asyncio
import sys
import os
sys.path.append(os.path.abspath("Slash"))

# local imports
import scraper_mt as scr
Expand Down
23 changes: 23 additions & 0 deletions src/main_streamlit.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,15 @@

from typing import Optional
from pydantic import BaseModel
from bs4 import BeautifulSoup
import requests

# local imports
import src.scraper_mt as scr
from src.configs import getRakutenList, getCompanies

cashback = getRakutenList()
companies = getCompanies()

# response type define
class jsonScraps(BaseModel):
Expand Down Expand Up @@ -76,3 +82,20 @@ def search_items_API(
else:
# No results
return None

def rakuten():
for i in range(len(companies)):
url = "https://www.rakuten.com/search?term=" + companies[i]
response = requests.get(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Parse the HTML content of the page
soup = BeautifulSoup(response.text, 'html.parser')
# Find the element containing the cashback information
cashback_element = soup.find('div', {'class': 'css-1i7dpco'}) # Adjust the class based on the actual HTML structure
if cashback_element:
# Extract the cashback value
cashback_value = cashback_element.text.strip()
if (cashback_value):
cashback[i] = cashback_value
return cashback
4 changes: 4 additions & 0 deletions src/scraper_mt.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
from datetime import datetime
from threading import Thread

import sys
import os
sys.path.append(os.path.abspath("Slash"))

# local imports
import formattr as form
from configs_mt import AMAZON, WALMART, COSTCO, BESTBUY, scrape_ebay, scrape_target
Expand Down

0 comments on commit 923b2b6

Please sign in to comment.