cleaning some code (from MR #9)

This commit is contained in:
Carl Chenet 2017-07-31 15:58:30 +02:00
parent 67b7e72507
commit 37ee1e5a67
6 changed files with 27 additions and 29 deletions

View file

@ -20,7 +20,7 @@
# standard library imports
from operator import itemgetter
class AddTags(object):
class AddTags:
'''Add as many tags as possible depending on the tweet length'''
def __init__(self, tweet, tags):
'''Constructor of AddTags class'''
@ -34,14 +34,14 @@ class AddTags(object):
tweetlength = len(self.tweet)
# sort list of tags, the ones with the greater length first
tagswithindices = ({'text':i, 'length':len(i)} for i in self.tags)
tagswithindices = ({'text':i, 'length': len(i)} for i in self.tags)
sortedtagswithindices = sorted(tagswithindices, key=itemgetter('length'), reverse=True)
self.tags = (i['text'] for i in sortedtagswithindices)
# add tags is space is available
for tag in self.tags:
taglength = len(tag)
if (tweetlength + (taglength +1)) <= maxlength:
if (tweetlength + (taglength + 1)) <= maxlength:
self.tweet = ' '.join([self.tweet, tag])
tweetlength += (taglength + 1)

View file

@ -25,7 +25,7 @@ import sys
__version__ = '0.5'
class CliParse(object):
class CliParse:
'''CliParse class'''
def __init__(self):
'''Constructor for the CliParse class'''
@ -34,7 +34,7 @@ class CliParse(object):
def main(self):
'''main of CliParse class'''
feed2tootepilog = 'For more information: https://feed2toot.readhthedocs.org'
feed2tootdescription = 'Take rss feed and send it to Mastodon'
feed2tootdescription = 'Take rss feed and send it to Mastodon'
parser = ArgumentParser(prog='feed2toot',
description=feed2tootdescription,
epilog=feed2tootepilog)
@ -63,7 +63,7 @@ class CliParse(object):
action='store_const', const='debug', default='warning',
help='enable debug output, work on log level DEBUG')
levels = [i for i in logging._nameToLevel.keys()
if (type(i) == str and i != 'NOTSET')]
if (type(i) == str and i != 'NOTSET')]
parser.add_argument('--syslog', nargs='?', default=None,
type=str.upper, action='store',
const='INFO', choices=levels,

View file

@ -17,18 +17,17 @@
'''Get values of the configuration file'''
# standard library imports
from configparser import SafeConfigParser, NoOptionError, NoSectionError
from configparser import SafeConfigParser
import logging
import os
import os.path
import socket
import sys
import re
# 3rd party library imports
import feedparser
class ConfParse(object):
class ConfParse:
'''ConfParse class'''
def __init__(self, clioptions):
'''Constructor of the ConfParse class'''
@ -46,7 +45,6 @@ class ConfParse(object):
config = SafeConfigParser()
if not config.read(os.path.expanduser(pathtoconfig)):
sys.exit('Could not read config file')
# The feedparser section
if config.has_option('feedparser', 'accept_bozo_exceptions'):
self.accept_bozo_exceptions = config.getboolean('feedparser', 'accept_bozo_exceptions')
@ -185,7 +183,7 @@ class ConfParse(object):
sys.exit('The parent directory of the cache file does not exist: {cachefileparent}'.format(cachefileparent=cachefileparent))
else:
options['cachefile'] = self.clioptions.cachefile
### cache limit
# cache limit
if config.has_section(section):
confoption = 'cache_limit'
if config.has_option(section, confoption):
@ -225,7 +223,7 @@ class ConfParse(object):
# host, port, user, pass, database options
##########################################
plugins[section] = {}
for currentoption in ['host','port','user','pass','database']:
for currentoption in ['host', 'port', 'user', 'pass', 'database']:
if config.has_option(section, currentoption):
plugins[section][currentoption] = config.get(section, currentoption)
if 'host' not in plugins[section]:
@ -234,7 +232,7 @@ class ConfParse(object):
plugins[section]['port'] = 8086
if 'measurement' not in plugins[section]:
plugins[section]['measurement'] = 'tweets'
for field in ['user','pass','database']:
for field in ['user', 'pass', 'database']:
if field not in plugins[section]:
sys.exit('Parsing error for {field} in the [{section}] section: {field} is not defined'.format(field=field, section=section))

View file

@ -38,9 +38,9 @@ class FeedCache:
with open(self.options['cachefile']) as dbdsc:
dbfromfile = dbdsc.readlines()
dblist = [i.strip() for i in dbfromfile]
self.dbfeed = deque(dblist, self.options['cache_limit'] )
self.dbfeed = deque(dblist, self.options['cache_limit'])
else:
self.dbfeed = deque([], self.options['cache_limit'] )
self.dbfeed = deque([], self.options['cache_limit'])
def append(self, rssid):
'''Append a rss id to the cache'''

View file

@ -16,9 +16,6 @@
# Push values to a influxdb database
'''Push values to a influxdb database'''
# standard libraries imports
import json
# 3rd party libraries imports
from influxdb import InfluxDBClient
@ -30,13 +27,16 @@ class InfluxdbPlugin(object):
self.data = data
self.datatoinfluxdb = []
self.client = InfluxDBClient(self.plugininfo['host'],
self.plugininfo['port'],
self.plugininfo['user'],
self.plugininfo['pass'],
self.plugininfo['database'])
self.plugininfo['port'],
self.plugininfo['user'],
self.plugininfo['pass'],
self.plugininfo['database'])
self.main()
def main(self):
'''Main of the PiwikModule class'''
self.datatoinfluxdb.append({'measurement': self.plugininfo['measurement'], 'fields': {'value': self.data}})
self.datatoinfluxdb.append({
'measurement': self.plugininfo['measurement'],
'fields': {'value': self.data}
})
self.client.write_points(self.datatoinfluxdb)

View file

@ -17,7 +17,7 @@
# Remove duplicates from the final string before sending the tweet
'''Remove duplicates from the final string before sending the tweet'''
class RemoveDuplicates(object):
class RemoveDuplicates:
'''Remove duplicates from the final string before sending the tweet'''
def __init__(self, tweet):
'''Constructor of RemoveDuplicates class'''
@ -32,12 +32,12 @@ class RemoveDuplicates(object):
if element != ' ' and (element.startswith('http://') or element.startswith('https://')):
newlink = True
# if we already found this link, increment the counter
for i,_ in enumerate(links):
for i, _ in enumerate(links):
if links[i]['link'] == element:
newlink = False
links[i]['count'] += 1
if newlink:
links.append({'link': element, 'count': 1})
links.append({'link': element, 'count': 1})
# remove duplicates
validatedlinks = []
for i in range(len(links)):
@ -45,14 +45,14 @@ class RemoveDuplicates(object):
validatedlinks.append(links[i])
wildcard = 'FEED2TOOTWILDCARD'
for element in validatedlinks:
for i in range(element['count']):
for i in range(element['count']):
# needed for not inversing the order of links if it is a duplicate
# and the second link is not one
if i == 0:
self.tweet = self.tweet.replace(element['link'], wildcard, 1 )
self.tweet = self.tweet.replace(element['link'], wildcard, 1)
else:
self.tweet = self.tweet.replace(element['link'], '', 1)
# finally
# finally
self.tweet = self.tweet.replace(wildcard, element['link'], 1)
# remove all 2xspaces
self.tweet = self.tweet.replace(' ', ' ')