diff --git a/WebChatGPT/errors.py b/WebChatGPT/errors.py
index be99a58..dfaacd8 100644
--- a/WebChatGPT/errors.py
+++ b/WebChatGPT/errors.py
@@ -1,7 +1,3 @@
-class WebSocketError(Exception):
-    pass
-
-
 class CookieExpiredError(Exception):
     pass
 
diff --git a/WebChatGPT/main.py b/WebChatGPT/main.py
index dbcf765..2b887b3 100755
--- a/WebChatGPT/main.py
+++ b/WebChatGPT/main.py
@@ -1,69 +1,10 @@
 #!/usr/bin/python
 import requests
 from WebChatGPT import utils
-import logging
 import json
 import re
 from functools import lru_cache
-import websocket
-from base64 import b64decode
-from WebChatGPT.errors import WebSocketError
-from threading import Thread as thr
 from typing import Iterator
-from .errors import MaximumRetrialError
-
-
-class Websocket:
-
-    def __init__(
-        self,
-        data: dict,
-        chatgpt: object,
-        trace: bool = False,
-    ):
-        chatgpt.socket_closed = False
-        chatgpt.loading_chunk = ""
-        self.payload = data.copy()
-        self.url = data.get("wss_url")
-        self.payload.pop("wss_url")
-        self.chatgpt = chatgpt
-        self.last_response_chunk: dict = {}
-        self.last_response_undecoded_chunk: dict = {}
-        websocket.enableTrace(trace)
-
-    def on_message(self, ws, message):
-        response = json.loads(message)
-        self.chatgpt.last_response_undecoded_chunk = response
-        decoded_body = b64decode(response["body"]).decode("utf-8")
-        response["body"] = decoded_body
-        self.chatgpt.last_response_chunk = response
-        self.chatgpt.loading_chunk = decoded_body
-
-    def on_error(self, ws, error):
-        self.on_close("ws")
-        raise WebSocketError(error)
-
-    def on_close(self, ws, *args, **kwargs):
-        self.chatgpt.socket_closed = True
-
-    def on_open(
-        self,
-        ws,
-    ):
-        json_data = json.dumps(self.payload, indent=4)
-        ws.send(json_data)
-
-    def run(
-        self,
-    ):
-        ws = websocket.WebSocketApp(
-            self.url,
-            on_message=self.on_message,
-            on_error=self.on_error,
-            on_close=self.on_close,
-            on_open=self.on_open,
-        )
-        ws.run_forever(origin="https://chat.openai.com")
 
 
 class ChatGPT:
@@ -127,6 +68,9 @@ def __init__(
         self.stop_sharing_conversation_endpoint = (
             "https://chat.openai.com/backend-api/%(share_id)s"
         )
+        self.sentinel_chat_requirements_endpoint: str = (
+            "https://chat.openai.com/backend-api/sentinel/chat-requirements"
+        )
         self.session.headers["User-Agent"] = user_agent
         self.locale = locale
         self.model = model
@@ -139,12 +83,7 @@ def __init__(
         self.__already_init = False
         self.__index = conversation_index
         self.__title_cache = {}
-        self.last_response_undecoded_chunk: str = ""
-        self.last_response_chunk: dict = {}
-        self.loading_chunk: str = ""
-        self.socket_closed: bool = True
-        self.trace = trace
-        self.request_more_times: int = 2
+        self.stream_chunk_size = 64
         # self.register_ws =self.session.post("https://chat.openai.com/backend-api/register-websocket")
         # Websocket(self.register_ws.json(),self).run()
 
@@ -171,6 +110,13 @@ def current_conversation_id(self):
     def get_current_message_id(self):
         return self.last_response_metadata.get(2).get("message_id")
 
+    def update_sentinel_tokens(self):
+        resp = self.session.post(self.sentinel_chat_requirements_endpoint, json={})
+        resp.raise_for_status()
+        self.session.headers.update(
+            {"OpenAI-Sentinel-Chat-Requirements-Token": resp.json()["token"]}
+        )
+
     def ask(
         self,
         prompt: str,
@@ -228,32 +174,28 @@ def ask(
         }
         ```
         """
+        self.update_sentinel_tokens()
         response = self.session.post(
             url=self.conversation_endpoint,
             json=self.__generate_payload(prompt),
             timeout=self.timeout,
-            stream=False,
+            stream=True,
         )
-        response.raise_for_status()
-        ws_payload = dict(response.json())
-        self.__request_more_count: int = 0
-
-        # out = lambda v:print(json.dumps(dict(v), indent=4))
-        # out(response.headers)
-        def for_stream():
-
-            ws = Websocket(ws_payload, self, self.trace)
-            t1 = thr(target=ws.run)
-            t1.start()
-            cached_loading_chunk = self.loading_chunk
-            cached_last_response = self.last_response.copy()
-            while True:
-                if self.loading_chunk != cached_loading_chunk:
-                    # New chunk loaded
+        # response.raise_for_status()
+        if (
+            response.ok
+            and response.headers.get("content-type")
+            == "text/event-stream; charset=utf-8"
+        ):
+
+            def for_stream():
+                for value in response.iter_lines(
+                    decode_unicode=True,
+                    delimiter="data:",
+                    chunk_size=self.stream_chunk_size,
+                ):
                     try:
-                        value = self.loading_chunk
-                        # print(value)
-                        to_dict = json.loads(value[5:])
+                        to_dict = json.loads(value)
                         if "is_completion" in to_dict.keys():
                             # Metadata (response)
                             self.last_response_metadata[
@@ -269,40 +211,35 @@ def for_stream():
                             yield value
                         pass
 
-                    finally:
-                        cached_loading_chunk = self.loading_chunk
-
-                if self.socket_closed:
-                    t1.join()
-                    break
-
-            if (
-                self.last_response == cached_last_response
-                or self.last_response["message"]["status"] != "finished_successfully"
-            ):
-
-                # print(json.dumps(self.last_response, indent=4))
-                # print("Requesting more body")
-                # print('=='*40)
-                t1.join()
-                if self.__request_more_count >= self.request_more_times:
-                    raise MaximumRetrialError(
-                        f"Failed to generate response after {self.request_more_times} attempts"
-                    )
-
-                for value in for_stream():
-                    yield value
-
-                self.__request_more_count += 1
-            # else:
-            #   print(print(json.dumps(self.last_response_chunk, indent=4)))
+            def for_non_stream():
+                response_to_be_returned = {}
+                for value in response.iter_lines(
+                    decode_unicode=True,
+                    delimiter="data:",
+                    chunk_size=self.stream_chunk_size,
+                ):
+                    try:
+                        to_dict = json.loads(value)
+                        if "is_completion" in to_dict.keys():
+                            # Metadata (response)
+                            self.last_response_metadata[
+                                2 if to_dict.get("is_completion") else 1
+                            ] = to_dict
+                            continue
+                        # Only data containing the `feedback body` make it to here
+                        self.last_response.update(to_dict)
+                        response_to_be_returned.update(to_dict)
+                    except json.decoder.JSONDecodeError:
+                        # Caused by either empty string or [DONE]
+                        pass
+                return response_to_be_returned
 
-        def for_non_stream():
-            for _ in for_stream():
-                pass
-            return self.last_response
+            return for_stream() if stream else for_non_stream()
 
-        return for_stream() if stream else for_non_stream()
+        else:
+            raise Exception(
+                f"Failed to fetch response - ({response.status_code}, {response.reason} : {response.headers.get('content-type')} : {response.text}"
+            )
 
     def chat(self, prompt: str, stream: bool = False) -> str:
         """Interact with ChatGPT on the fly
diff --git a/WebChatGPT/utils.py b/WebChatGPT/utils.py
index 97c47ee..924f3ff 100644
--- a/WebChatGPT/utils.py
+++ b/WebChatGPT/utils.py
@@ -1,7 +1,7 @@
 from datetime import datetime, timezone
 import json
 import logging
-import os
+import locale
 from uuid import uuid4
 from typing import Any
 from .errors import CookieExpiredError
@@ -27,6 +27,7 @@
     "Sec-Fetch-Mode": "cors",
     "Sec-Fetch-Site": "same-origin",
     "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0",
+    "OAI-Language": locale.getlocale()[0].replace("_", "-"),
 }
 
 response_example = {
@@ -238,7 +239,7 @@ def generate_payload(self: object, prompt: str) -> dict:
         "timezone_offset_min": -180,
         "suggestions": [],
         "history_and_training_disabled": self.disable_history_and_training,
-        "arkose_token": None,
+        # "arkose_token": None,
         "conversation_mode": {"kind": "primary_assistant"},
         "force_paragen": False,
         "force_rate_limit": False,
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 4c2c050..f3da18f 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -159,4 +159,11 @@ More console chat manipulation features.
 
 **What's new?**
 
-- patch: Independencies
\ No newline at end of file
+- patch: Independencies
+
+## v0.3.0
+
+**What's new?*
+
+- patch : Fail to generate response. Resolves #15 #14
+- patch : Drop support for websocket. 
\ No newline at end of file
diff --git a/docs/README.md b/docs/README.md
index fd9249c..eab75f2 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -26,9 +26,6 @@
 
 Unlike the [official Openai library](https://github.com/openai/openai-python), this library makes REST-API calls to [ChatGPT](https://chat.openai.com) via the **browser** endpoints. *No API-KEY required*
 
-> [!CAUTION]
-> **Currently** very unreliable!
-
 ```python
 from WebChatGPT import ChatGPT
 bot = ChatGPT(
diff --git a/requirements.txt b/requirements.txt
index 8e36ecd..bb8cdf2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,5 +3,4 @@ python-dotenv==1.0.0
 click==8.1.3
 rich==13.3.4
 clipman==3.1.0
-pyperclip==1.8.2
-websocket-client==1.7.0
\ No newline at end of file
+pyperclip==1.8.2
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 81e666d..9d78e33 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
 
 setup(
     name="webchatgpt",
-    version="0.2.9",
+    version="0.3.0",
     license="GNU v3",
     author="Smartwa",
     maintainer="Smartwa",
@@ -41,7 +41,6 @@
         "rich==13.3.4",
         "clipman==3.1.0",
         "pyperclip==1.8.2",
-        "websocket-client==1.7.0",
     ],
     python_requires=">=3.10",
     keywords=[