#include <iostream>
#include <string>
#include <deque>
#include <sstream>
#include <unordered_map>
using namespace std;
/*
We want to project an API endpoint with an in-memory rate limiter.
The rate limiter should be initialized with a value that represents the maximum requests per
second we want to allow. When a request hots the API endpoint, the rate limiter is asked whether
it has capacity to process this request and will reject or accept the request appropriatly.
Assume:
- Time is measured in millisecond resolution.
- API request come in sequentially.
Design the interface and implement the logic for the rate limiter
Example at a max of 2 requests / sec:
12:00:01.100 PASS
12:00:01.200 PASS
12:00:01.300 FAIL
12:00:02.100 PASS
12:00:02.150 FAIL
12:00:01.200 PASS
*/
int strToInt(string s) {
stringstream ss;
ss << s;
int value = 0;
ss >> value;
return value;
}
long timeToMsec(string timeStr) {
long millisec = 0;
int count = 0;
std::string s;
for (int i = 0; i < timeStr.size(); i++) {
if (timeStr[i] == ':' || timeStr[i] == '.') {
if (count == 0) {
millisec += strToInt(s) * 60 * 60 * 1000;
}
else if (count == 1) {
millisec += strToInt(s) * 60 * 1000;
}
else if (count == 2) {
millisec += strToInt(s) * 1000;
}
count++;
s.clear();
}
else {
s.push_back(timeStr[i]);
}
}
if (s.size() > 0) {
millisec += strToInt(s);
}
return millisec;
}
long diff(long time1, long time2) {
return abs(time1 - time2);
}
bool isRateLimited(std::string curTimestamp, int clientid = 0) {
// you wouldn't need a queue, the token bucket would be a single counter and
// "last event timestamp"
// the idea being the bucket adds a certain amount based on the amount of time that has passed,
// and then every event (api call) subtracts a certain amount
// the rate limit is when there isn't enough to subtract, and the max bucket size determines
// the burst (vs average) tuning
// assuming the expected output is representing an average of 1 request per 500ms,
// yes, it would work .. the limit target is 2 (N) per 1000ms (T),
// so you can form the math like this: max bucket size is 2 * 1000 = 2000; at every
// event, first get the "ms since last event", and multiply that by N (2), then add
// that to the bucket (but cap the bucket at 2000 max)
static long N = 2;
static long T = 1000;
static long maxBucketSize = N * T;
static long lastAllowedRequestMs = 0;
static long bucket = 0;
long curTimestampMs = timeToMsec(curTimestamp);
long delta = curTimestampMs - lastAllowedRequestMs;
bucket += delta * N;
if (bucket > maxBucketSize) {
bucket = maxBucketSize;
}
cout << curTimestamp << "...";
if (bucket >= T) {
bucket -= T;
lastAllowedRequestMs = curTimestampMs;
return true;
}
return false;
}
int main() {
// your code goes here
cout << isRateLimited("12:00:01.100") << endl; // expect true
cout << isRateLimited("12:00:01.200") << endl; // expect true
cout << isRateLimited("12:00:01.300") << endl; // expect false
cout << isRateLimited("12:00:02.100") << endl; // expect true
cout << isRateLimited("12:00:02.150") << endl; // expect false
cout << isRateLimited("12:00:02.200") << endl; // expect true
return 0;
}