aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOxbian <got.dacs@slmail.me>2023-06-25 00:10:55 +0200
committerOxbian <got.dacs@slmail.me>2023-06-25 00:10:55 +0200
commit8fe9382024ea80847206ad7811b2ce2e4dd6d825 (patch)
treed0eb49591f81a16256e996b4823b976a286ce316
parentda3a8b740889d7874c1c30e9d24cfc0455291ccd (diff)
downloadblog-generator-8fe9382024ea80847206ad7811b2ce2e4dd6d825.tar.gz
blog-generator-8fe9382024ea80847206ad7811b2ce2e4dd6d825.zip
Fix unworking blog generator
-rw-r--r--generator.py44
-rw-r--r--markdown_parser.py18
2 files changed, 32 insertions, 30 deletions
diff --git a/generator.py b/generator.py
index de029a2..4c45caf 100644
--- a/generator.py
+++ b/generator.py
@@ -32,7 +32,7 @@ def generatePageXML(data, env_vars):
"""
template = open(env_vars['template_atom_post'], 'r').read()
date = datetime.strptime(data['date'],"%d-%m-%Y").isoformat() + "Z"
- return template.replace("$TITLE", data['title']).replace("$DATE", data['date']).replace("$CONTENT",
+ return template.replace("$TITLE", data['title']).replace("$DATE", date).replace("$CONTENT",
data['content']).replace("$URL", env_vars['website_url'] + data['filepath'])
@@ -43,16 +43,16 @@ def generateAtomFeed(posts, env_vars):
env_vars: dictionnary of env variables
"""
# Generate RSS / Atom post
- atom_content = []
+ atom_content = ""
for post in posts:
# Checking if there is metadata, if not we don't create a RSS / Atom post
- if post['date'] != '':
- atom_content.append(generatePageXML(post, env_vars))
+ if post['date'] != '01-01-0001':
+ atom_content += generatePageXML(post, env_vars)
# Generate RSS / atom feed
template = open(env_vars['template_atom_feed'], 'r').read()
output = open(env_vars['parent_path'] + '/atom.xml', 'w')
- output.write(template.replace("$CONTENT", atom_content))
+ output.write(template.replace('$CONTENT', atom_content).replace('$DATE', datetime.today().strftime("%d-%m-%Y")))
output.close()
@@ -63,18 +63,19 @@ def generateIndex(data, env_vars):
env_vars: dictionnary of env variables
"""
# Create the index content
- index_content = "<ul>"
+ index_content = "<ul>\n"
for page in data:
# Checking if there is metadata, if not we don't add the page in the index
- if page['date'] != '':
- index_content += '<li><a href="' + page['filepath'] + '">' + page['title'] + '</a><p>'
- + page['date'] + '</p></li>\n'
- index_content += "</ul>"
+ if page['date'] != '01-01-0001':
+ print(page['date'])
+ index_content += ('\t\t\t\t<li><a href="' + page['filepath'] + '">' + page['title'] + '</a><p>'
+ + page['date'] + '</p></li>\n')
+ index_content += "\t\t\t</ul>"
# Generate main page
template = open(env_vars['template_index'], 'r').read()
output = open(env_vars['parent_path'] + '/index.html', 'w')
- output.write(template.replace("$CONTENT", index_content))
+ output.write(template.replace('$CONTENT', index_content))
output.close()
@@ -100,7 +101,7 @@ if __name__=="__main__":
# Getting env variable
env_vars = { 'parent_path' : os.environ.get('PARENT_PATH'), 'pages_path' : os.environ.get('PAGES_PATH')
, 'markdown_path' : os.environ.get('MARKDOWN_PATH'), 'template_page' : os.environ.get('TEMPLATE_PAGE')
- , 'template_atom_post' : os.environ.get('TEMPLATE_PAGE'), 'template_atom_feed' : os.environ.get('TEMPLATE_ATOM_FEED')
+ , 'template_atom_post' : os.environ.get('TEMPLATE_ATOM_POST'), 'template_atom_feed' : os.environ.get('TEMPLATE_ATOM_FEED')
, 'website_url' : os.environ.get('WEBSITE_URL'), 'template_index' : os.environ.get('TEMPLATE_INDEX') }
# Checking if generate folder exist to remove previouly generated content, if not create it
@@ -112,20 +113,21 @@ if __name__=="__main__":
else:
os.mkdir(env_vars['pages_path'])
+ data = [] # A list for data generated by md2html
+
# Generate all markdown file
for file in os.listdir(env_vars['markdown_path']):
# Generating HTML page
print(f"{color['green']}Generating file: {file} {color['end']}")
- data = [] # A list for data generated by md2html
data.append(md2html(file, env_vars))
- sorted_data = sorted(data, key=lambda x:datetime.strptime(x['date'], '%d/%m/%Y'))
+ sorted_data = sorted(data, key=lambda x:datetime.strptime(x['date'], '%d-%m-%Y'))
- # Generating atom feed
- print(f"{color['green']}Generating RSS / Atom feed {color['end']}")
- generateAtomFeed(data, env_vars)
-
- # Generating index
- print(f"{color['green']}Generating main page {color['end']}")
- generateIndex(data, env_vars)
+ # Generating atom feed
+ print(f"{color['green']}Generating RSS / Atom feed {color['end']}")
+ generateAtomFeed(data, env_vars)
+
+ # Generating index
+ print(f"{color['green']}Generating main page {color['end']}")
+ generateIndex(data, env_vars)
diff --git a/markdown_parser.py b/markdown_parser.py
index b521a87..2b9be27 100644
--- a/markdown_parser.py
+++ b/markdown_parser.py
@@ -31,17 +31,17 @@ def parseline(line):
return line
-def parsemd(env_vars, filepath):
+def parsemd(filepath, env_vars):
"""
Parse the markdown file and return the content to put into the template page
env_vars: dictionnary of environment variable
filepath: Filepath of the markdown file
return: a dictionnary containing title, metadata, local path, content for HTML
"""
- content = {'content': '', 'title': '', 'date': '', 'description': '', 'filepath': env_vars['pages_path'].replace(env_vars['parent_path'] + '/', '')
+ content = {'content': '', 'title': '', 'date': '01-01-0001', 'description': '', 'filepath': env_vars['pages_path'].replace(env_vars['parent_path'] + '/', '')
+ '/' + filepath.split('.')[0] + '.html'}
- inmeta, inquote, inpre, inul = False, False, False
+ inmeta, inquote, inpre, inul = False, False, False, False
# Reading the content of the file and transform into html
for line in open(env_vars['markdown_path'] + '/' + filepath, "r"):
@@ -59,7 +59,7 @@ def parsemd(env_vars, filepath):
content['date'] = line.split(':')[1].strip()
# Getting the description metadata
- if line.startswith('description:'):
+ if inmeta and line.startswith('description:'):
content['description'] = line.split(':')[1].strip()
# Close quote if not quoting
@@ -89,12 +89,12 @@ def parsemd(env_vars, filepath):
inquote = True
# Checking if it's a list
- elif line.startswith("-"):
+ elif line.startswith("-") and not line.startswith("---"):
if inul:
content['content'] += "</li>\n"
- content['content'] += "<li>" + parseline(line.lstrip("- "))
+ content['content'] += "\t<li>" + parseline(line.lstrip("- "))
else:
- content['content'] += "<ul><li>" + parseline(line.lstrip("- "))
+ content['content'] += "<ul>\n\t<li>" + parseline(line.lstrip("- "))
inul = True
# Checking if it's a title
@@ -106,7 +106,7 @@ def parsemd(env_vars, filepath):
content['title'] += parseline(line.lstrip("# "))
# else it's a paragraph
- elif line != " " and line != "":
+ elif line != " " and line != "" and not inmeta and not line.startswith("---"):
content['content'] += "<p>" + parseline(line) + "</p>\n"
# Checking all balise are closed
@@ -115,7 +115,7 @@ def parsemd(env_vars, filepath):
inquote = False
if inul:
- content['content'] += "</li></ul>\n"
+ content['content'] += "</li>\n</ul>\n"
inul = False
if inpre:
ArKa projects. All rights to me, and your next child right arm.