-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbtdigcsv.py
More file actions
86 lines (70 loc) · 2.45 KB
/
btdigcsv.py
File metadata and controls
86 lines (70 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import requests
from time import sleep
import csv
from bs4 import BeautifulSoup
import re
from os import rename, remove
from functools import lru_cache
keyword = 'trackerName'
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
def rename_oldcsv():
try:
rename("torrents.csv", ".torrents.old")
print('Saving previous search')
except FileNotFoundError:
print('Starting search')
def saving_oldcsv():
try:
remove('.torrents.old')
except FileNotFoundError:
rename_oldcsv()
@lru_cache
def requests_generated(npage):
print('Generating requests')
sleep(5)
url = 'http://btdigggink2pdqzqrik3blmqemsbntpzwxottujilcdjfz56jumzfsyd.onion.pet/search?q=' + keyword + '&p=' + npage + '&order=2'
return requests.get(url, headers=header).text
@lru_cache(maxsize=128)
def soup (npage):
return BeautifulSoup(requests_generated(npage), 'lxml')
def href_items(npage):
return soup(npage).find_all('a', attrs={'href': re.compile("^magnet:")})
def div_items(npage):
return soup(npage).find_all('div', {'class': 'one_result'})
def list_created(npage):
title = map(lambda x: x.find(class_='torrent_name').text, div_items(npage))
size = map(lambda x: x.find(class_='torrent_size').text, div_items(npage))
magnet = map(lambda x: x.get('href'), href_items(npage))
return list(zip(title, size, magnet))
def rename_csv(npage):
try:
rename(".torrents.old", "torrents.csv")
print('No results found, recovery old csv')
except:
if int(npage) == 0:
print('No results found, try another keyword (line 7)')
else:
print('Finished process')
def torrent_age(npage):
try:
return str(soup(npage).find(class_='torrent_age').text)
# return str(requests_generated(npage).find(class_='torrent_age').text)
except AttributeError:
rename_csv(npage)
exit()
def listo_csv(npage):
npage = str(npage)
if torrent_age(npage) == 'found 1 day ago':
print('All pages have been added')
exit()
with open('torrents.csv', 'a', newline='', encoding='utf-8') as file:
writer = csv.writer(file, quoting=csv.QUOTE_ALL, delimiter=',')
writer.writerows(list_created(npage))
print('Adding page ' + npage + ' to your csv')
npage = int(npage)
npage += 1
listo_csv(npage)
saving_oldcsv()
listo_csv(0)