aboutsummaryrefslogtreecommitdiff
path: root/src/test/java
diff options
context:
space:
mode:
authorMichael Foiani <sotech117@Michaels-MacBook-Pro-3.local>2021-04-10 03:26:13 -0400
committerMichael Foiani <sotech117@Michaels-MacBook-Pro-3.local>2021-04-10 03:26:13 -0400
commitb38821ee75b85cfcf1803f88f2092b4c742db2ac (patch)
tree8bb2cb309d5c0523b0405baa6d41b32d27a8bff5 /src/test/java
parent6384bebb363ab75e9f1d567ed179e08057828171 (diff)
Have a functional fetch and processing that uses the offical edgar api. Need to opitmize with parameters in the query and loading in more trades (max is 100 filings). This will help remove any need to get filings from the frontend, along with no issues with paying for an api.
Diffstat (limited to 'src/test/java')
-rw-r--r--src/test/java/edu/brown/cs/student/FilingTest.java82
1 files changed, 82 insertions, 0 deletions
diff --git a/src/test/java/edu/brown/cs/student/FilingTest.java b/src/test/java/edu/brown/cs/student/FilingTest.java
new file mode 100644
index 0000000..a9b21d3
--- /dev/null
+++ b/src/test/java/edu/brown/cs/student/FilingTest.java
@@ -0,0 +1,82 @@
+package edu.brown.cs.student;
+
+import edu.brown.cs.student.term.parsing.LocalXmlParser;
+import edu.brown.cs.student.term.parsing.Transaction;
+import edu.brown.cs.student.term.parsing.TxtXmlParser;
+import edu.brown.cs.student.term.parsing.UrlXmlParser;
+import edu.brown.cs.student.term.parsing.XmlParser;
+import edu.brown.cs.student.term.trade.Trade;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.print.Doc;
+
+import static org.junit.Assert.*;
+
+public class FilingTest {
+ private XmlParser _xmlParser, _txtXmlParser;
+
+ @Before
+ public void setUp() {
+ _xmlParser = new UrlXmlParser();
+ _txtXmlParser = new TxtXmlParser();
+ }
+
+ @After
+ public void tearDown() {
+ _xmlParser = null;
+ _txtXmlParser = null;
+ }
+
+ @Test
+ public void seeWorks(){
+ setUp();
+
+ String url = "https://www.sec.gov/cgi-bin/browse-edgar?" +
+ "action=getcurrent" +
+ "&CIK=" +
+ "&type=4" +
+ "&company=" +
+ "&dateb=" +
+ "&owner=only" +
+ "&start=0" +
+ "&count=10" +
+ "&output=atom";
+
+ Document doc = _xmlParser.parse(url);
+ assertNotNull(doc);
+ NodeList entries = doc.getElementsByTagName("entry");
+ assertNotEquals(entries.getLength(), 0);
+ assertEquals(entries.item(0).getNodeType(), Node.ELEMENT_NODE);
+ for (int i = 0; i < entries.getLength(); i++) {
+ Element entry = (Element) entries.item(i);
+ NodeList link = entry.getElementsByTagName("link");
+ assertEquals(link.getLength(), 1);
+ String linkUrl = link.item(0).getAttributes().getNamedItem("href").getNodeValue();
+ System.out.println(linkUrl);
+
+ NodeList updated = entry.getElementsByTagName("updated");
+ assertEquals(link.getLength(), 1);
+ System.out.println(updated.item(0).getTextContent());
+ }
+
+ tearDown();
+ }
+
+ @Test
+ public void xmlUrlFromFilingUrl(){
+ setUp();
+
+ String url = "https://www.sec.gov/Archives/edgar/data/1597341/000141588921001958/0001415889-21-001958.txt";
+ Document doc = _txtXmlParser.parse(url);
+ assertNotNull(doc);
+ tearDown();
+ }
+
+
+}