aboutsummaryrefslogtreecommitdiff
path: root/src/server/ApiManagers/AssistantManager.ts
diff options
context:
space:
mode:
authorA.J. Shulman <Shulman.aj@gmail.com>2024-08-16 14:17:04 -0400
committerA.J. Shulman <Shulman.aj@gmail.com>2024-08-16 14:17:04 -0400
commitd97405e0a172b03a759452a1e9a7291974d89248 (patch)
tree518908b22b093e64cb5004547199087c2cde9e4f /src/server/ApiManagers/AssistantManager.ts
parenta88190885c5d22ba6a31e3a7379aaaa2440ae0d4 (diff)
fixed up prompt by fixing examples and also removed link creation in search tool and fixed dangling promise in AM
Diffstat (limited to 'src/server/ApiManagers/AssistantManager.ts')
-rw-r--r--src/server/ApiManagers/AssistantManager.ts69
1 files changed, 32 insertions, 37 deletions
diff --git a/src/server/ApiManagers/AssistantManager.ts b/src/server/ApiManagers/AssistantManager.ts
index b8f5f4241..cd26ca79b 100644
--- a/src/server/ApiManagers/AssistantManager.ts
+++ b/src/server/ApiManagers/AssistantManager.ts
@@ -140,49 +140,44 @@ export default class AssistantManager extends ApiManager {
secureHandler: async ({ req, res }) => {
const { url } = req.body;
try {
- const url_filename = url.replace(/\./g, '-');
+ const url_filename = url.replace(/\./g, '-').replace(/\//g, '_') + '.jpg';
const scrapedImagesDirectory = pathToDirectory(Directory.scrape_images);
const filePath = serverPathToFile(Directory.scrape_images, url_filename);
+
if (fs.existsSync(filePath)) {
const imageBuffer = await readFileAsync(filePath);
const base64Image = imageBuffer.toString('base64');
- res.send({ website_image_base64: base64Image });
- return;
- } else {
- if (!fs.existsSync(scrapedImagesDirectory)) {
- fs.mkdirSync(scrapedImagesDirectory);
- }
- const result = await scrapflyClient.scrape(
- new ScrapeConfig({
- url: url,
- // enable headless browsers for screenshots
- render_js: true,
- // optional: you can wait for page to load before capturing
- screenshots: {
- // name: what-to-capture
- // fullpage - will capture everything
- // css selector (e.g. #reviews) - will capture just that element
- everything: 'fullpage',
- },
- })
- );
- console.log(result.result.screenshots);
-
- for (let [name, screenshot] of Object.entries(result.result.screenshots)) {
- let response = await axios.get(screenshot.url, {
- // note: don't forget to add your API key parameter:
- params: { key: process.env._CLIENT_SCRAPFLY_API_KEY!, options: 'print_media_format' },
- // this indicates that response is binary data:
- responseType: 'arraybuffer',
- });
- // write to screenshot data to a file in current directory:
- fs.writeFileSync(filePath, response.data);
- const base64String = response.data.toString('base64');
- await fs.promises.writeFile(filePath, response.data);
-
- res.send({ website_image_base64: base64String });
- }
+ console.log('Image already exists');
+ return res.send({ website_image_base64: base64Image });
+ }
+
+ if (!fs.existsSync(scrapedImagesDirectory)) {
+ fs.mkdirSync(scrapedImagesDirectory);
}
+
+ const result = await scrapflyClient.scrape(
+ new ScrapeConfig({
+ url: url,
+ render_js: true,
+ screenshots: { everything: 'fullpage' },
+ })
+ );
+
+ const screenshotPromises = Object.entries(result.result.screenshots).map(async ([name, screenshot]) => {
+ const response = await axios.get(screenshot.url, {
+ params: {
+ key: process.env._CLIENT_SCRAPFLY_API_KEY!,
+ options: 'print_media_format',
+ proxy_pool: 'public_residential_pool',
+ },
+ responseType: 'arraybuffer',
+ });
+ await fs.promises.writeFile(filePath, response.data);
+ return response.data.toString('base64');
+ });
+
+ const base64Screenshots = await Promise.all(screenshotPromises);
+ res.send({ website_image_base64: base64Screenshots[0] });
} catch (error: any) {
console.error('Error scraping website:', error);
res.status(500).send({ error: 'Failed to scrape website', details: error.message });