145 lines
4.6 KiB
Python
145 lines
4.6 KiB
Python
import os
|
|
import random
|
|
import time
|
|
|
|
from bs4 import BeautifulSoup
|
|
import requests
|
|
from mastodon import Mastodon
|
|
from moviepy.editor import VideoFileClip
|
|
from urllib.parse import urlparse
|
|
import logging
|
|
import traceback
|
|
|
|
logging.basicConfig(filename='/home/captain/dev/log/setu.log', level=logging.INFO)
|
|
logger = logging.getLogger('/home/captain/dev/log/setu.log')
|
|
mastodon = Mastodon(
|
|
access_token = 'iQHxw2fdVO92q73gg2w9yTpMj0inybgOq5ezR7thffU',
|
|
api_base_url = 'https://botsin.space'
|
|
)
|
|
account_name = '@setu'
|
|
|
|
# mastodon = Mastodon(
|
|
# access_token = 'd357sMhxQ7GoIbxt3qSGr9YbSx1tVVf9sggA2pVOoMA',
|
|
# api_base_url = 'https://o3o.ca'
|
|
# )
|
|
# account_name = '@kedai'
|
|
|
|
|
|
tvdl_url = 'https://tvdl-api.saif.dev/'
|
|
|
|
def load_id():
|
|
id_ = None
|
|
with open('/home/captain/dev/tmp/setu_dm') as f:
|
|
id_ = f.read()
|
|
f.close()
|
|
return id_
|
|
|
|
def save_id(id_):
|
|
with open('/home/captain/dev/tmp/setu_dm', 'wb') as f:
|
|
f.write(("%s\n" % id_).encode())
|
|
f.close()
|
|
|
|
|
|
def post_mentions():
|
|
min_id = load_id()
|
|
logger.info('!!! %s get %s', time.asctime(), min_id)
|
|
mentions = mastodon.notifications(mentions_only=True, min_id=min_id)
|
|
mentions.reverse()
|
|
for mention in mentions:
|
|
if 'status' not in mention:
|
|
continue
|
|
soup = BeautifulSoup(mention['status']['content'], 'html.parser')
|
|
if not (mention['type'] == 'mention' and mention['status']['visibility'] == 'direct'):
|
|
save_id(mention['id'])
|
|
continue
|
|
if not mention['status']['media_attachments']:
|
|
content = soup.get_text().split(account_name)[-1].strip()
|
|
media_ids = []
|
|
if content.startswith('http'):
|
|
#import ipdb; ipdb.set_trace()
|
|
logging.info('get url %s', content)
|
|
try:
|
|
url = urlparse(content)
|
|
url_ = url.geturl()
|
|
if 'twitter.com' in url_:
|
|
data = {
|
|
'url': url_,
|
|
'ver': 1306
|
|
}
|
|
r_ = requests.post(tvdl_url, data=data)
|
|
if r_.status_code != 200:
|
|
raise ValueError
|
|
resp_data = r_.json()
|
|
url_ = resp_data.get('high', {}).get('downloadURL')
|
|
r = requests.get(url_)
|
|
mime_type = 'image/jpeg'
|
|
url = urlparse(url_)
|
|
ext = url.path.split('.')[-1]
|
|
if ext.lower() in ['jpg', 'jpeg', 'gif']:
|
|
if ext == 'gif':
|
|
mime_type = 'image/gif'
|
|
toot_resp = mastodon.media_post(r.content, mime_type)
|
|
else:
|
|
f = open('/home/captain/dev/tmp/temp_download_file.%s' % ext, 'wb')
|
|
f.write(r.content)
|
|
f.close()
|
|
mime_type = None
|
|
toot_resp = mastodon.media_post('/home/captain/dev/tmp/temp_download_file.%s' % ext, mime_type)
|
|
except:
|
|
continue
|
|
if toot_resp.get('id'):
|
|
media_ids.append(toot_resp['id'])
|
|
mastodon.status_post('🔞', media_ids=media_ids, sensitive=True)
|
|
save_id(mention['id'])
|
|
continue
|
|
|
|
media_ids = []
|
|
for media in mention['status']['media_attachments']:
|
|
#import ipdb; ipdb.set_trace()
|
|
mime_type = 'image/jpeg'
|
|
if media['type'] == 'video':
|
|
resp = requests.get(media['url'])
|
|
if resp.status_code != 200:
|
|
continue
|
|
f = open('/home/captain/dev/tmp/temp_video.mp4', 'wb')
|
|
f.write(resp.content)
|
|
f.close()
|
|
mime_type = None
|
|
try:
|
|
toot_resp = mastodon.media_post('/home/captain/dev/tmp/temp_video.mp4', mime_type)
|
|
except:
|
|
try:
|
|
clip = VideoFileClip('/home/captain/dev/tmp/temp_video.mp4')
|
|
clip.write_gif('/home/captain/dev/tmp/temp_gif.gif')
|
|
mime_type = 'image/gif'
|
|
toot_resp = mastodon.media_post('/home/captain/dev/tmp/temp_gif.gif', mime_type)
|
|
except:
|
|
continue
|
|
if toot_resp.get('id'):
|
|
media_ids.append(toot_resp['id'])
|
|
continue
|
|
r = requests.get(media['url'])
|
|
try:
|
|
toot_resp = mastodon.media_post(r.content, mime_type)
|
|
except:
|
|
continue
|
|
if toot_resp.get('id'):
|
|
media_ids.append(toot_resp['id'])
|
|
soup = BeautifulSoup(mention['status']['content'], 'html.parser')
|
|
logger.info('posting %s, %s, %s', mention['id'], soup.get_text(), mention['status']['media_attachments'])
|
|
status = soup.get_text().split(account_name)[-1]
|
|
if not status:
|
|
status = '🔞'
|
|
if media_ids:
|
|
mastodon.status_post(status, media_ids=media_ids, sensitive=True)
|
|
save_id(mention['id'])
|
|
|
|
while True:
|
|
try:
|
|
post_mentions()
|
|
time.sleep(10)
|
|
except Exception as ex:
|
|
logger.error(traceback.format_exc())
|
|
time.sleep(10)
|
|
|