|
1 | 1 | 'use strict';
|
2 | 2 |
|
3 |
| -import { createReadStream } from 'node:fs'; |
| 3 | +import { readFile } from 'node:fs/promises'; |
4 | 4 | import { basename, extname, join } from 'node:path';
|
5 |
| -import readline from 'node:readline'; |
6 | 5 |
|
7 | 6 | import graymatter from 'gray-matter';
|
8 | 7 |
|
@@ -63,50 +62,27 @@ const generateBlogData = async () => {
|
63 | 62 | '**/index.md',
|
64 | 63 | ]);
|
65 | 64 |
|
66 |
| - return new Promise(resolve => { |
67 |
| - const posts = []; |
68 |
| - const rawFrontmatter = []; |
69 |
| - |
70 |
| - filenames.forEach(filename => { |
71 |
| - // We create a stream for reading a file instead of reading the files |
72 |
| - const _stream = createReadStream(join(blogPath, filename)); |
73 |
| - |
74 |
| - // We create a readline interface to read the file line-by-line |
75 |
| - const _readLine = readline.createInterface({ input: _stream }); |
76 |
| - |
77 |
| - // Creates an array of the metadata based on the filename |
78 |
| - // This prevents concurrency issues since the for-loop is synchronous |
79 |
| - // and these event listeners are not |
80 |
| - rawFrontmatter[filename] = [0, '']; |
81 |
| - |
82 |
| - // We read line by line |
83 |
| - _readLine.on('line', line => { |
84 |
| - rawFrontmatter[filename][1] += `${line}\n`; |
85 |
| - |
86 |
| - // We observe the frontmatter separators |
87 |
| - if (line === '---') { |
88 |
| - rawFrontmatter[filename][0] += 1; |
89 |
| - } |
90 |
| - |
91 |
| - // Once we have two separators we close the readLine and the stream |
92 |
| - if (rawFrontmatter[filename][0] === 2) { |
93 |
| - _readLine.close(); |
94 |
| - _stream.close(); |
95 |
| - } |
96 |
| - }); |
97 |
| - |
98 |
| - // Then we parse gray-matter on the frontmatter |
99 |
| - // This allows us to only read the frontmatter part of each file |
100 |
| - // and optimise the read-process as we have thousands of markdown files |
101 |
| - _readLine.on('close', () => { |
102 |
| - posts.push(getFrontMatter(filename, rawFrontmatter[filename][1])); |
103 |
| - |
104 |
| - if (posts.length === filenames.length) { |
105 |
| - resolve({ categories: [...blogCategories], posts }); |
106 |
| - } |
107 |
| - }); |
108 |
| - }); |
109 |
| - }); |
| 65 | + const posts = []; |
| 66 | + // TODO: this should be done via a stream (like it originally was) instead of reading the whole file |
| 67 | + // (I went with `readFile` just because it's simpler for a POC, streams should work too) |
| 68 | + for (const filename of filenames) { |
| 69 | + const fileContents = await readFile(join(blogPath, filename), 'utf-8'); |
| 70 | + |
| 71 | + const frontmatterStart = fileContents.indexOf('---\n') + '---\n'.length; |
| 72 | + |
| 73 | + const frontmatterEnd = fileContents |
| 74 | + .slice(frontmatterStart) |
| 75 | + .indexOf('---\n'); |
| 76 | + |
| 77 | + const rawFrontmatter = |
| 78 | + '---\n' + |
| 79 | + fileContents.slice(frontmatterStart, frontmatterEnd + '---\n'.length) + |
| 80 | + '---\n'; |
| 81 | + |
| 82 | + posts.push(getFrontMatter(filename, rawFrontmatter)); |
| 83 | + } |
| 84 | + |
| 85 | + return { categories: [...blogCategories], posts }; |
110 | 86 | };
|
111 | 87 |
|
112 | 88 | export default generateBlogData;
|
0 commit comments