Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/node_modules
package-lock.json
.env
41 changes: 40 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,30 @@ Use it in apps, bots, landing pages, Slack integrations, rejection letters, or w

---

## 🧠 Now with LLM-powered excuses! (Optional)

If you enable LLM support and provide an [OpenRouter](https://openrouter.ai/) API key, the excuses will be generated by an AI model for maximum hilarity.

### 🎯 Thematic Excuses

When using the LLM excuses, you can pass an optional theme to get a context-aware excuse:

```http
GET /no?theme=starwars
```

Example response:

```json
{
"reason": "I can't join yout Lightsaber duel club today; I left my lightsaber charging in the Death Star's cathode ray tube overnight and it's still stuck in hyperspace",
"theme": "starwars",
"source": "llm"
}
```

---

## 🛠️ Self-Hosting

Want to run it yourself? It’s lightweight and simple.
Expand All @@ -65,7 +89,14 @@ cd no-as-a-service
npm install
```

### 3. Start the server
### 3. Configure environment variables (optional)
Create a .env file with your OpenRouter key:

```env
OPENROUTER_API_KEY=your_openrouter_api_key
```

### 4. Start the server
```bash
npm start
```
Expand All @@ -80,6 +111,12 @@ You can also change the port using an environment variable:
PORT=5000 npm start
```

Finally, you can adjust behavior in index.js
```js
const shouldUseLLM = true; // Toggle LLM use
const model = "meta-llama/llama-3.3-8b-instruct:free"; // Select preferred model
```

---

## 📁 Project Structure
Expand All @@ -88,6 +125,7 @@ PORT=5000 npm start
no-as-service/
├── index.js # Express API
├── reasons.json # 1000+ universal rejection reasons
├── .env # (optional) environment file with OpenRouter key
├── package.json
├── .devcontainer.json # VS Code / Github devcontainer setup
└── README.md
Expand All @@ -111,6 +149,7 @@ For reference, here’s the package config:
"author": "hotheadhacker",
"license": "MIT",
"dependencies": {
"dotenv": "^16.5.0",
"express": "^4.18.2",
"express-rate-limit": "^7.0.0"
}
Expand Down
66 changes: 58 additions & 8 deletions index.js
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
require('dotenv').config();
const express = require('express');
const rateLimit = require('express-rate-limit');
const fs = require('fs');
Expand All @@ -6,28 +7,77 @@ const app = express();
app.set('trust proxy', true);
const PORT = process.env.PORT || 3000;

const shouldUseLLM = true;
const model = 'meta-llama/llama-3.3-8b-instruct:free';
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;

// Load reasons from JSON
const reasons = JSON.parse(fs.readFileSync('./reasons.json', 'utf-8'));

// Rate limiter: 120 requests per minute per IP
const limiter = rateLimit({
windowMs: 60 * 1000, // 1 minute
windowMs: 60 * 1000,
max: 120,
keyGenerator: (req, res) => {
return req.headers['cf-connecting-ip'] || req.ip; // Fallback if header missing (or for non-CF)
keyGenerator: (req) => req.headers['cf-connecting-ip'] || req.ip,
message: {
error: 'Too many requests, please try again later. (120 reqs/min/IP)',
},
message: { error: "Too many requests, please try again later. (120 reqs/min/IP)" }
});

app.use(limiter);

// Random rejection reason endpoint
app.get('/no', (req, res) => {
app.get('/no', async (req, res) => {
if (shouldUseLLM && OPENROUTER_API_KEY) {
const theme = req.query.theme?.toLowerCase();
const userMessage = theme
? `Give me a excuse to say no to something, related to the theme: ${theme}.`
: 'Give me a excuse to say no to something.';

try {
const response = await fetch('https://openrouter.ai/api/v1/chat/completions', {
method: 'POST',
headers: {
Authorization: `Bearer ${OPENROUTER_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages: [
{
role: 'system',
content:
"You are an API that generates a single, creative, humorous, and random excuse to say 'no'. Your response must contain only the excuse — no greetings, explanations, or additional text.",
},
{
role: 'user',
content: userMessage,
},
],
}),
});

const data = await response.json();

if (!response.ok) {
console.error('Fetch failed, falling back to local reasons:', data.error.message);
return;
}

const reason = data.choices?.[0]?.message?.content?.trim();
if (reason) {
return res.json({ reason, theme, source: 'llm' });
}

console.warn('LLM response was empty, falling back to local reasons.');
} catch (err) {
console.error('LLM error, falling back to local reasons:', err.message);
}
}

const reason = reasons[Math.floor(Math.random() * reasons.length)];
res.json({ reason });
res.json({ reason, source: 'offline' });
});

// Start server
app.listen(PORT, () => {
console.log(`No-as-a-Service is running on port ${PORT}`);
});
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"author": "hotheadhacker",
"license": "MIT",
"dependencies": {
"dotenv": "^16.5.0",
"express": "^4.18.2",
"express-rate-limit": "^7.0.0"
}
Expand Down