diff --git a/.github/workflows/protocol-build-and-push.yml b/.github/workflows/protocol-build-and-push.yml
index ade5341369..fc26dd6e69 100644
--- a/.github/workflows/protocol-build-and-push.yml
+++ b/.github/workflows/protocol-build-and-push.yml
@@ -222,4 +222,4 @@ jobs:
             --platform amd64 \
             -t $ECR_REGISTRY/$ECR_REPOSITORY:$commit_hash \
             -f testing/testnet-staging/Dockerfile .
-          docker push $ECR_REGISTRY/$ECR_REPOSITORY --all-tags
\ No newline at end of file
+          docker push $ECR_REGISTRY/$ECR_REPOSITORY --all-tags
diff --git a/indexer/packages/v4-protos/src/codegen/dydxprotocol/bundle.ts b/indexer/packages/v4-protos/src/codegen/dydxprotocol/bundle.ts
index 51200efb9b..866360b7b5 100644
--- a/indexer/packages/v4-protos/src/codegen/dydxprotocol/bundle.ts
+++ b/indexer/packages/v4-protos/src/codegen/dydxprotocol/bundle.ts
@@ -88,82 +88,81 @@ import * as _91 from "./subaccounts/asset_position";
 import * as _92 from "./subaccounts/genesis";
 import * as _93 from "./subaccounts/perpetual_position";
 import * as _94 from "./subaccounts/query";
-import * as _95 from "./subaccounts/streaming";
-import * as _96 from "./subaccounts/subaccount";
-import * as _97 from "./vault/genesis";
-import * as _98 from "./vault/params";
-import * as _99 from "./vault/query";
-import * as _100 from "./vault/tx";
-import * as _101 from "./vault/vault";
-import * as _102 from "./vest/genesis";
-import * as _103 from "./vest/query";
-import * as _104 from "./vest/tx";
-import * as _105 from "./vest/vest_entry";
-import * as _113 from "./assets/query.lcd";
-import * as _114 from "./blocktime/query.lcd";
-import * as _115 from "./bridge/query.lcd";
-import * as _116 from "./clob/query.lcd";
-import * as _117 from "./delaymsg/query.lcd";
-import * as _118 from "./epochs/query.lcd";
-import * as _119 from "./feetiers/query.lcd";
-import * as _120 from "./perpetuals/query.lcd";
-import * as _121 from "./prices/query.lcd";
-import * as _122 from "./ratelimit/query.lcd";
-import * as _123 from "./rewards/query.lcd";
-import * as _124 from "./stats/query.lcd";
-import * as _125 from "./subaccounts/query.lcd";
-import * as _126 from "./vault/query.lcd";
-import * as _127 from "./vest/query.lcd";
-import * as _128 from "./assets/query.rpc.Query";
-import * as _129 from "./blocktime/query.rpc.Query";
-import * as _130 from "./bridge/query.rpc.Query";
-import * as _131 from "./clob/query.rpc.Query";
-import * as _132 from "./delaymsg/query.rpc.Query";
-import * as _133 from "./epochs/query.rpc.Query";
-import * as _134 from "./feetiers/query.rpc.Query";
-import * as _135 from "./govplus/query.rpc.Query";
-import * as _136 from "./perpetuals/query.rpc.Query";
-import * as _137 from "./prices/query.rpc.Query";
-import * as _138 from "./ratelimit/query.rpc.Query";
-import * as _139 from "./rewards/query.rpc.Query";
-import * as _140 from "./sending/query.rpc.Query";
-import * as _141 from "./stats/query.rpc.Query";
-import * as _142 from "./subaccounts/query.rpc.Query";
-import * as _143 from "./vault/query.rpc.Query";
-import * as _144 from "./vest/query.rpc.Query";
-import * as _145 from "./blocktime/tx.rpc.msg";
-import * as _146 from "./bridge/tx.rpc.msg";
-import * as _147 from "./clob/tx.rpc.msg";
-import * as _148 from "./delaymsg/tx.rpc.msg";
-import * as _149 from "./feetiers/tx.rpc.msg";
-import * as _150 from "./govplus/tx.rpc.msg";
-import * as _151 from "./perpetuals/tx.rpc.msg";
-import * as _152 from "./prices/tx.rpc.msg";
-import * as _153 from "./ratelimit/tx.rpc.msg";
-import * as _154 from "./rewards/tx.rpc.msg";
-import * as _155 from "./sending/tx.rpc.msg";
-import * as _156 from "./stats/tx.rpc.msg";
-import * as _157 from "./vault/tx.rpc.msg";
-import * as _158 from "./vest/tx.rpc.msg";
-import * as _159 from "./lcd";
-import * as _160 from "./rpc.query";
-import * as _161 from "./rpc.tx";
+import * as _95 from "./subaccounts/subaccount";
+import * as _96 from "./vault/genesis";
+import * as _97 from "./vault/params";
+import * as _98 from "./vault/query";
+import * as _99 from "./vault/tx";
+import * as _100 from "./vault/vault";
+import * as _101 from "./vest/genesis";
+import * as _102 from "./vest/query";
+import * as _103 from "./vest/tx";
+import * as _104 from "./vest/vest_entry";
+import * as _112 from "./assets/query.lcd";
+import * as _113 from "./blocktime/query.lcd";
+import * as _114 from "./bridge/query.lcd";
+import * as _115 from "./clob/query.lcd";
+import * as _116 from "./delaymsg/query.lcd";
+import * as _117 from "./epochs/query.lcd";
+import * as _118 from "./feetiers/query.lcd";
+import * as _119 from "./perpetuals/query.lcd";
+import * as _120 from "./prices/query.lcd";
+import * as _121 from "./ratelimit/query.lcd";
+import * as _122 from "./rewards/query.lcd";
+import * as _123 from "./stats/query.lcd";
+import * as _124 from "./subaccounts/query.lcd";
+import * as _125 from "./vault/query.lcd";
+import * as _126 from "./vest/query.lcd";
+import * as _127 from "./assets/query.rpc.Query";
+import * as _128 from "./blocktime/query.rpc.Query";
+import * as _129 from "./bridge/query.rpc.Query";
+import * as _130 from "./clob/query.rpc.Query";
+import * as _131 from "./delaymsg/query.rpc.Query";
+import * as _132 from "./epochs/query.rpc.Query";
+import * as _133 from "./feetiers/query.rpc.Query";
+import * as _134 from "./govplus/query.rpc.Query";
+import * as _135 from "./perpetuals/query.rpc.Query";
+import * as _136 from "./prices/query.rpc.Query";
+import * as _137 from "./ratelimit/query.rpc.Query";
+import * as _138 from "./rewards/query.rpc.Query";
+import * as _139 from "./sending/query.rpc.Query";
+import * as _140 from "./stats/query.rpc.Query";
+import * as _141 from "./subaccounts/query.rpc.Query";
+import * as _142 from "./vault/query.rpc.Query";
+import * as _143 from "./vest/query.rpc.Query";
+import * as _144 from "./blocktime/tx.rpc.msg";
+import * as _145 from "./bridge/tx.rpc.msg";
+import * as _146 from "./clob/tx.rpc.msg";
+import * as _147 from "./delaymsg/tx.rpc.msg";
+import * as _148 from "./feetiers/tx.rpc.msg";
+import * as _149 from "./govplus/tx.rpc.msg";
+import * as _150 from "./perpetuals/tx.rpc.msg";
+import * as _151 from "./prices/tx.rpc.msg";
+import * as _152 from "./ratelimit/tx.rpc.msg";
+import * as _153 from "./rewards/tx.rpc.msg";
+import * as _154 from "./sending/tx.rpc.msg";
+import * as _155 from "./stats/tx.rpc.msg";
+import * as _156 from "./vault/tx.rpc.msg";
+import * as _157 from "./vest/tx.rpc.msg";
+import * as _158 from "./lcd";
+import * as _159 from "./rpc.query";
+import * as _160 from "./rpc.tx";
 export namespace dydxprotocol {
   export const assets = { ..._5,
     ..._6,
     ..._7,
     ..._8,
-    ..._113,
-    ..._128
+    ..._112,
+    ..._127
   };
   export const blocktime = { ..._9,
     ..._10,
     ..._11,
     ..._12,
     ..._13,
-    ..._114,
-    ..._129,
-    ..._145
+    ..._113,
+    ..._128,
+    ..._144
   };
   export const bridge = { ..._14,
     ..._15,
@@ -171,9 +170,9 @@ export namespace dydxprotocol {
     ..._17,
     ..._18,
     ..._19,
-    ..._115,
-    ..._130,
-    ..._146
+    ..._114,
+    ..._129,
+    ..._145
   };
   export const clob = { ..._20,
     ..._21,
@@ -189,9 +188,9 @@ export namespace dydxprotocol {
     ..._31,
     ..._32,
     ..._33,
-    ..._116,
-    ..._131,
-    ..._147
+    ..._115,
+    ..._130,
+    ..._146
   };
   export namespace daemons {
     export const bridge = { ..._34
@@ -206,29 +205,29 @@ export namespace dydxprotocol {
     ..._39,
     ..._40,
     ..._41,
-    ..._117,
-    ..._132,
-    ..._148
+    ..._116,
+    ..._131,
+    ..._147
   };
   export const epochs = { ..._42,
     ..._43,
     ..._44,
-    ..._118,
-    ..._133
+    ..._117,
+    ..._132
   };
   export const feetiers = { ..._45,
     ..._46,
     ..._47,
     ..._48,
-    ..._119,
-    ..._134,
-    ..._149
+    ..._118,
+    ..._133,
+    ..._148
   };
   export const govplus = { ..._49,
     ..._50,
     ..._51,
-    ..._135,
-    ..._150
+    ..._134,
+    ..._149
   };
   export namespace indexer {
     export const events = { ..._52
@@ -255,18 +254,18 @@ export namespace dydxprotocol {
     ..._63,
     ..._64,
     ..._65,
-    ..._120,
-    ..._136,
-    ..._151
+    ..._119,
+    ..._135,
+    ..._150
   };
   export const prices = { ..._66,
     ..._67,
     ..._68,
     ..._69,
     ..._70,
-    ..._121,
-    ..._137,
-    ..._152
+    ..._120,
+    ..._136,
+    ..._151
   };
   export const ratelimit = { ..._71,
     ..._72,
@@ -274,63 +273,62 @@ export namespace dydxprotocol {
     ..._74,
     ..._75,
     ..._76,
-    ..._122,
-    ..._138,
-    ..._153
+    ..._121,
+    ..._137,
+    ..._152
   };
   export const rewards = { ..._77,
     ..._78,
     ..._79,
     ..._80,
     ..._81,
-    ..._123,
-    ..._139,
-    ..._154
+    ..._122,
+    ..._138,
+    ..._153
   };
   export const sending = { ..._82,
     ..._83,
     ..._84,
     ..._85,
-    ..._140,
-    ..._155
+    ..._139,
+    ..._154
   };
   export const stats = { ..._86,
     ..._87,
     ..._88,
     ..._89,
     ..._90,
-    ..._124,
-    ..._141,
-    ..._156
+    ..._123,
+    ..._140,
+    ..._155
   };
   export const subaccounts = { ..._91,
     ..._92,
     ..._93,
     ..._94,
     ..._95,
-    ..._96,
-    ..._125,
-    ..._142
+    ..._124,
+    ..._141
   };
-  export const vault = { ..._97,
+  export const vault = { ..._96,
+    ..._97,
     ..._98,
     ..._99,
     ..._100,
-    ..._101,
-    ..._126,
-    ..._143,
-    ..._157
+    ..._125,
+    ..._142,
+    ..._156
   };
-  export const vest = { ..._102,
+  export const vest = { ..._101,
+    ..._102,
     ..._103,
     ..._104,
-    ..._105,
-    ..._127,
-    ..._144,
-    ..._158
+    ..._126,
+    ..._143,
+    ..._157
   };
-  export const ClientFactory = { ..._159,
-    ..._160,
-    ..._161
+  export const ClientFactory = { ..._158,
+    ..._159,
+    ..._160
   };
 }
\ No newline at end of file
diff --git a/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/order.ts b/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/order.ts
index 855438764a..3c714e5979 100644
--- a/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/order.ts
+++ b/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/order.ts
@@ -1,5 +1,4 @@
 import { SubaccountId, SubaccountIdSDKType } from "../subaccounts/subaccount";
-import { PerpetualLiquidationInfo, PerpetualLiquidationInfoSDKType } from "./liquidations";
 import * as _m0 from "protobufjs/minimal";
 import { DeepPartial, Long } from "../../helpers";
 /**
@@ -703,60 +702,6 @@ export interface TransactionOrderingSDKType {
 
   transaction_index: number;
 }
-/**
- * StreamLiquidationOrder represents an protocol-generated IOC liquidation
- * order. Used in full node streaming.
- */
-
-export interface StreamLiquidationOrder {
-  /** Information about this liquidation order. */
-  liquidationInfo?: PerpetualLiquidationInfo;
-  /**
-   * CLOB pair ID of the CLOB pair the liquidation order will be matched
-   * against.
-   */
-
-  clobPairId: number;
-  /**
-   * True if this is a buy order liquidating a short position, false if vice
-   * versa.
-   */
-
-  isBuy: boolean;
-  /** The number of base quantums for this liquidation order. */
-
-  quantums: Long;
-  /** The subticks this liquidation order will be submitted at. */
-
-  subticks: Long;
-}
-/**
- * StreamLiquidationOrder represents an protocol-generated IOC liquidation
- * order. Used in full node streaming.
- */
-
-export interface StreamLiquidationOrderSDKType {
-  /** Information about this liquidation order. */
-  liquidation_info?: PerpetualLiquidationInfoSDKType;
-  /**
-   * CLOB pair ID of the CLOB pair the liquidation order will be matched
-   * against.
-   */
-
-  clob_pair_id: number;
-  /**
-   * True if this is a buy order liquidating a short position, false if vice
-   * versa.
-   */
-
-  is_buy: boolean;
-  /** The number of base quantums for this liquidation order. */
-
-  quantums: Long;
-  /** The subticks this liquidation order will be submitted at. */
-
-  subticks: Long;
-}
 
 function createBaseOrderId(): OrderId {
   return {
@@ -1341,89 +1286,4 @@ export const TransactionOrdering = {
     return message;
   }
 
-};
-
-function createBaseStreamLiquidationOrder(): StreamLiquidationOrder {
-  return {
-    liquidationInfo: undefined,
-    clobPairId: 0,
-    isBuy: false,
-    quantums: Long.UZERO,
-    subticks: Long.UZERO
-  };
-}
-
-export const StreamLiquidationOrder = {
-  encode(message: StreamLiquidationOrder, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.liquidationInfo !== undefined) {
-      PerpetualLiquidationInfo.encode(message.liquidationInfo, writer.uint32(10).fork()).ldelim();
-    }
-
-    if (message.clobPairId !== 0) {
-      writer.uint32(16).uint32(message.clobPairId);
-    }
-
-    if (message.isBuy === true) {
-      writer.uint32(24).bool(message.isBuy);
-    }
-
-    if (!message.quantums.isZero()) {
-      writer.uint32(32).uint64(message.quantums);
-    }
-
-    if (!message.subticks.isZero()) {
-      writer.uint32(40).uint64(message.subticks);
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): StreamLiquidationOrder {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseStreamLiquidationOrder();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.liquidationInfo = PerpetualLiquidationInfo.decode(reader, reader.uint32());
-          break;
-
-        case 2:
-          message.clobPairId = reader.uint32();
-          break;
-
-        case 3:
-          message.isBuy = reader.bool();
-          break;
-
-        case 4:
-          message.quantums = (reader.uint64() as Long);
-          break;
-
-        case 5:
-          message.subticks = (reader.uint64() as Long);
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<StreamLiquidationOrder>): StreamLiquidationOrder {
-    const message = createBaseStreamLiquidationOrder();
-    message.liquidationInfo = object.liquidationInfo !== undefined && object.liquidationInfo !== null ? PerpetualLiquidationInfo.fromPartial(object.liquidationInfo) : undefined;
-    message.clobPairId = object.clobPairId ?? 0;
-    message.isBuy = object.isBuy ?? false;
-    message.quantums = object.quantums !== undefined && object.quantums !== null ? Long.fromValue(object.quantums) : Long.UZERO;
-    message.subticks = object.subticks !== undefined && object.subticks !== null ? Long.fromValue(object.subticks) : Long.UZERO;
-    return message;
-  }
-
 };
\ No newline at end of file
diff --git a/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/query.ts b/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/query.ts
index 27872be567..e3ebe88ee0 100644
--- a/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/query.ts
+++ b/indexer/packages/v4-protos/src/codegen/dydxprotocol/clob/query.ts
@@ -1,12 +1,10 @@
 import { PageRequest, PageRequestSDKType, PageResponse, PageResponseSDKType } from "../../cosmos/base/query/v1beta1/pagination";
 import { ValidatorMevMatches, ValidatorMevMatchesSDKType, MevNodeToNodeMetrics, MevNodeToNodeMetricsSDKType } from "./mev";
-import { OrderId, OrderIdSDKType, LongTermOrderPlacement, LongTermOrderPlacementSDKType, Order, OrderSDKType, StreamLiquidationOrder, StreamLiquidationOrderSDKType } from "./order";
-import { SubaccountId, SubaccountIdSDKType } from "../subaccounts/subaccount";
+import { OrderId, OrderIdSDKType, LongTermOrderPlacement, LongTermOrderPlacementSDKType, Order, OrderSDKType } from "./order";
 import { ClobPair, ClobPairSDKType } from "./clob_pair";
 import { EquityTierLimitConfiguration, EquityTierLimitConfigurationSDKType } from "./equity_tier_limit_config";
 import { BlockRateLimitConfiguration, BlockRateLimitConfigurationSDKType } from "./block_rate_limit_config";
 import { LiquidationsConfig, LiquidationsConfigSDKType } from "./liquidations_config";
-import { StreamSubaccountUpdate, StreamSubaccountUpdateSDKType } from "../subaccounts/streaming";
 import { OffChainUpdateV1, OffChainUpdateV1SDKType } from "../indexer/off_chain_updates/off_chain_updates";
 import { ClobMatch, ClobMatchSDKType } from "./matches";
 import * as _m0 from "protobufjs/minimal";
@@ -253,9 +251,6 @@ export interface QueryLiquidationsConfigurationResponseSDKType {
 export interface StreamOrderbookUpdatesRequest {
   /** Clob pair ids to stream orderbook updates for. */
   clobPairId: number[];
-  /** Subaccount ids to stream subaccount updates for. */
-
-  subaccountIds: SubaccountId[];
 }
 /**
  * StreamOrderbookUpdatesRequest is a request message for the
@@ -265,9 +260,6 @@ export interface StreamOrderbookUpdatesRequest {
 export interface StreamOrderbookUpdatesRequestSDKType {
   /** Clob pair ids to stream orderbook updates for. */
   clob_pair_id: number[];
-  /** Subaccount ids to stream subaccount updates for. */
-
-  subaccount_ids: SubaccountIdSDKType[];
 }
 /**
  * StreamOrderbookUpdatesResponse is a response message for the
@@ -295,8 +287,6 @@ export interface StreamOrderbookUpdatesResponseSDKType {
 export interface StreamUpdate {
   orderbookUpdate?: StreamOrderbookUpdate;
   orderFill?: StreamOrderbookFill;
-  takerOrder?: StreamTakerOrder;
-  subaccountUpdate?: StreamSubaccountUpdate;
   /** Block height of the update. */
 
   blockHeight: number;
@@ -312,8 +302,6 @@ export interface StreamUpdate {
 export interface StreamUpdateSDKType {
   orderbook_update?: StreamOrderbookUpdateSDKType;
   order_fill?: StreamOrderbookFillSDKType;
-  taker_order?: StreamTakerOrderSDKType;
-  subaccount_update?: StreamSubaccountUpdateSDKType;
   /** Block height of the update. */
 
   block_height: number;
@@ -403,90 +391,6 @@ export interface StreamOrderbookFillSDKType {
 
   fill_amounts: Long[];
 }
-/**
- * StreamTakerOrder provides information on a taker order that was attempted
- * to be matched on the orderbook.
- * It is intended to be used only in full node streaming.
- */
-
-export interface StreamTakerOrder {
-  order?: Order;
-  liquidationOrder?: StreamLiquidationOrder;
-  /**
-   * Information on the taker order after it is matched on the book,
-   * either successfully or unsuccessfully.
-   */
-
-  takerOrderStatus?: StreamTakerOrderStatus;
-}
-/**
- * StreamTakerOrder provides information on a taker order that was attempted
- * to be matched on the orderbook.
- * It is intended to be used only in full node streaming.
- */
-
-export interface StreamTakerOrderSDKType {
-  order?: OrderSDKType;
-  liquidation_order?: StreamLiquidationOrderSDKType;
-  /**
-   * Information on the taker order after it is matched on the book,
-   * either successfully or unsuccessfully.
-   */
-
-  taker_order_status?: StreamTakerOrderStatusSDKType;
-}
-/**
- * StreamTakerOrderStatus is a representation of a taker order
- * after it is attempted to be matched on the orderbook.
- * It is intended to be used only in full node streaming.
- */
-
-export interface StreamTakerOrderStatus {
-  /**
-   * The state of the taker order after attempting to match it against the
-   * orderbook. Possible enum values can be found here:
-   * https://github.com/dydxprotocol/v4-chain/blob/main/protocol/x/clob/types/orderbook.go#L105
-   */
-  orderStatus: number;
-  /** The amount of remaining (non-matched) base quantums of this taker order. */
-
-  remainingQuantums: Long;
-  /**
-   * The amount of base quantums that were *optimistically* filled for this
-   * taker order when the order is matched against the orderbook. Note that if
-   * any quantums of this order were optimistically filled or filled in state
-   * before this invocation of the matching loop, this value will not include
-   * them.
-   */
-
-  optimisticallyFilledQuantums: Long;
-}
-/**
- * StreamTakerOrderStatus is a representation of a taker order
- * after it is attempted to be matched on the orderbook.
- * It is intended to be used only in full node streaming.
- */
-
-export interface StreamTakerOrderStatusSDKType {
-  /**
-   * The state of the taker order after attempting to match it against the
-   * orderbook. Possible enum values can be found here:
-   * https://github.com/dydxprotocol/v4-chain/blob/main/protocol/x/clob/types/orderbook.go#L105
-   */
-  order_status: number;
-  /** The amount of remaining (non-matched) base quantums of this taker order. */
-
-  remaining_quantums: Long;
-  /**
-   * The amount of base quantums that were *optimistically* filled for this
-   * taker order when the order is matched against the orderbook. Note that if
-   * any quantums of this order were optimistically filled or filled in state
-   * before this invocation of the matching loop, this value will not include
-   * them.
-   */
-
-  optimistically_filled_quantums: Long;
-}
 
 function createBaseQueryGetClobPairRequest(): QueryGetClobPairRequest {
   return {
@@ -1192,8 +1096,7 @@ export const QueryLiquidationsConfigurationResponse = {
 
 function createBaseStreamOrderbookUpdatesRequest(): StreamOrderbookUpdatesRequest {
   return {
-    clobPairId: [],
-    subaccountIds: []
+    clobPairId: []
   };
 }
 
@@ -1206,11 +1109,6 @@ export const StreamOrderbookUpdatesRequest = {
     }
 
     writer.ldelim();
-
-    for (const v of message.subaccountIds) {
-      SubaccountId.encode(v!, writer.uint32(18).fork()).ldelim();
-    }
-
     return writer;
   },
 
@@ -1236,10 +1134,6 @@ export const StreamOrderbookUpdatesRequest = {
 
           break;
 
-        case 2:
-          message.subaccountIds.push(SubaccountId.decode(reader, reader.uint32()));
-          break;
-
         default:
           reader.skipType(tag & 7);
           break;
@@ -1252,7 +1146,6 @@ export const StreamOrderbookUpdatesRequest = {
   fromPartial(object: DeepPartial<StreamOrderbookUpdatesRequest>): StreamOrderbookUpdatesRequest {
     const message = createBaseStreamOrderbookUpdatesRequest();
     message.clobPairId = object.clobPairId?.map(e => e) || [];
-    message.subaccountIds = object.subaccountIds?.map(e => SubaccountId.fromPartial(e)) || [];
     return message;
   }
 
@@ -1307,8 +1200,6 @@ function createBaseStreamUpdate(): StreamUpdate {
   return {
     orderbookUpdate: undefined,
     orderFill: undefined,
-    takerOrder: undefined,
-    subaccountUpdate: undefined,
     blockHeight: 0,
     execMode: 0
   };
@@ -1324,20 +1215,12 @@ export const StreamUpdate = {
       StreamOrderbookFill.encode(message.orderFill, writer.uint32(18).fork()).ldelim();
     }
 
-    if (message.takerOrder !== undefined) {
-      StreamTakerOrder.encode(message.takerOrder, writer.uint32(26).fork()).ldelim();
-    }
-
-    if (message.subaccountUpdate !== undefined) {
-      StreamSubaccountUpdate.encode(message.subaccountUpdate, writer.uint32(34).fork()).ldelim();
-    }
-
     if (message.blockHeight !== 0) {
-      writer.uint32(40).uint32(message.blockHeight);
+      writer.uint32(24).uint32(message.blockHeight);
     }
 
     if (message.execMode !== 0) {
-      writer.uint32(48).uint32(message.execMode);
+      writer.uint32(32).uint32(message.execMode);
     }
 
     return writer;
@@ -1361,18 +1244,10 @@ export const StreamUpdate = {
           break;
 
         case 3:
-          message.takerOrder = StreamTakerOrder.decode(reader, reader.uint32());
-          break;
-
-        case 4:
-          message.subaccountUpdate = StreamSubaccountUpdate.decode(reader, reader.uint32());
-          break;
-
-        case 5:
           message.blockHeight = reader.uint32();
           break;
 
-        case 6:
+        case 4:
           message.execMode = reader.uint32();
           break;
 
@@ -1389,8 +1264,6 @@ export const StreamUpdate = {
     const message = createBaseStreamUpdate();
     message.orderbookUpdate = object.orderbookUpdate !== undefined && object.orderbookUpdate !== null ? StreamOrderbookUpdate.fromPartial(object.orderbookUpdate) : undefined;
     message.orderFill = object.orderFill !== undefined && object.orderFill !== null ? StreamOrderbookFill.fromPartial(object.orderFill) : undefined;
-    message.takerOrder = object.takerOrder !== undefined && object.takerOrder !== null ? StreamTakerOrder.fromPartial(object.takerOrder) : undefined;
-    message.subaccountUpdate = object.subaccountUpdate !== undefined && object.subaccountUpdate !== null ? StreamSubaccountUpdate.fromPartial(object.subaccountUpdate) : undefined;
     message.blockHeight = object.blockHeight ?? 0;
     message.execMode = object.execMode ?? 0;
     return message;
@@ -1528,134 +1401,4 @@ export const StreamOrderbookFill = {
     return message;
   }
 
-};
-
-function createBaseStreamTakerOrder(): StreamTakerOrder {
-  return {
-    order: undefined,
-    liquidationOrder: undefined,
-    takerOrderStatus: undefined
-  };
-}
-
-export const StreamTakerOrder = {
-  encode(message: StreamTakerOrder, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.order !== undefined) {
-      Order.encode(message.order, writer.uint32(10).fork()).ldelim();
-    }
-
-    if (message.liquidationOrder !== undefined) {
-      StreamLiquidationOrder.encode(message.liquidationOrder, writer.uint32(18).fork()).ldelim();
-    }
-
-    if (message.takerOrderStatus !== undefined) {
-      StreamTakerOrderStatus.encode(message.takerOrderStatus, writer.uint32(26).fork()).ldelim();
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): StreamTakerOrder {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseStreamTakerOrder();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.order = Order.decode(reader, reader.uint32());
-          break;
-
-        case 2:
-          message.liquidationOrder = StreamLiquidationOrder.decode(reader, reader.uint32());
-          break;
-
-        case 3:
-          message.takerOrderStatus = StreamTakerOrderStatus.decode(reader, reader.uint32());
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<StreamTakerOrder>): StreamTakerOrder {
-    const message = createBaseStreamTakerOrder();
-    message.order = object.order !== undefined && object.order !== null ? Order.fromPartial(object.order) : undefined;
-    message.liquidationOrder = object.liquidationOrder !== undefined && object.liquidationOrder !== null ? StreamLiquidationOrder.fromPartial(object.liquidationOrder) : undefined;
-    message.takerOrderStatus = object.takerOrderStatus !== undefined && object.takerOrderStatus !== null ? StreamTakerOrderStatus.fromPartial(object.takerOrderStatus) : undefined;
-    return message;
-  }
-
-};
-
-function createBaseStreamTakerOrderStatus(): StreamTakerOrderStatus {
-  return {
-    orderStatus: 0,
-    remainingQuantums: Long.UZERO,
-    optimisticallyFilledQuantums: Long.UZERO
-  };
-}
-
-export const StreamTakerOrderStatus = {
-  encode(message: StreamTakerOrderStatus, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.orderStatus !== 0) {
-      writer.uint32(8).uint32(message.orderStatus);
-    }
-
-    if (!message.remainingQuantums.isZero()) {
-      writer.uint32(16).uint64(message.remainingQuantums);
-    }
-
-    if (!message.optimisticallyFilledQuantums.isZero()) {
-      writer.uint32(24).uint64(message.optimisticallyFilledQuantums);
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): StreamTakerOrderStatus {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseStreamTakerOrderStatus();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.orderStatus = reader.uint32();
-          break;
-
-        case 2:
-          message.remainingQuantums = (reader.uint64() as Long);
-          break;
-
-        case 3:
-          message.optimisticallyFilledQuantums = (reader.uint64() as Long);
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<StreamTakerOrderStatus>): StreamTakerOrderStatus {
-    const message = createBaseStreamTakerOrderStatus();
-    message.orderStatus = object.orderStatus ?? 0;
-    message.remainingQuantums = object.remainingQuantums !== undefined && object.remainingQuantums !== null ? Long.fromValue(object.remainingQuantums) : Long.UZERO;
-    message.optimisticallyFilledQuantums = object.optimisticallyFilledQuantums !== undefined && object.optimisticallyFilledQuantums !== null ? Long.fromValue(object.optimisticallyFilledQuantums) : Long.UZERO;
-    return message;
-  }
-
 };
\ No newline at end of file
diff --git a/indexer/packages/v4-protos/src/codegen/dydxprotocol/subaccounts/streaming.ts b/indexer/packages/v4-protos/src/codegen/dydxprotocol/subaccounts/streaming.ts
deleted file mode 100644
index fd54ef914b..0000000000
--- a/indexer/packages/v4-protos/src/codegen/dydxprotocol/subaccounts/streaming.ts
+++ /dev/null
@@ -1,286 +0,0 @@
-import { SubaccountId, SubaccountIdSDKType } from "./subaccount";
-import * as _m0 from "protobufjs/minimal";
-import { DeepPartial, Long } from "../../helpers";
-/**
- * StreamSubaccountUpdate provides information on a subaccount update. Used in
- * the full node GRPC stream.
- */
-
-export interface StreamSubaccountUpdate {
-  subaccountId?: SubaccountId;
-  /** updated_perpetual_positions will each be for unique perpetuals. */
-
-  updatedPerpetualPositions: SubaccountPerpetualPosition[];
-  /** updated_asset_positions will each be for unique assets. */
-
-  updatedAssetPositions: SubaccountAssetPosition[];
-  /**
-   * Snapshot indicates if the response is from a snapshot of the subaccount.
-   * All updates should be ignored until snapshot is received.
-   * If the snapshot is true, then all previous entries should be
-   * discarded and the subaccount should be resynced.
-   * For a snapshot subaccount update, the `updated_perpetual_positions` and
-   * `updated_asset_positions` fields will contain the full state of the
-   * subaccount.
-   */
-
-  snapshot: boolean;
-}
-/**
- * StreamSubaccountUpdate provides information on a subaccount update. Used in
- * the full node GRPC stream.
- */
-
-export interface StreamSubaccountUpdateSDKType {
-  subaccount_id?: SubaccountIdSDKType;
-  /** updated_perpetual_positions will each be for unique perpetuals. */
-
-  updated_perpetual_positions: SubaccountPerpetualPositionSDKType[];
-  /** updated_asset_positions will each be for unique assets. */
-
-  updated_asset_positions: SubaccountAssetPositionSDKType[];
-  /**
-   * Snapshot indicates if the response is from a snapshot of the subaccount.
-   * All updates should be ignored until snapshot is received.
-   * If the snapshot is true, then all previous entries should be
-   * discarded and the subaccount should be resynced.
-   * For a snapshot subaccount update, the `updated_perpetual_positions` and
-   * `updated_asset_positions` fields will contain the full state of the
-   * subaccount.
-   */
-
-  snapshot: boolean;
-}
-/**
- * SubaccountPerpetualPosition provides information on a subaccount's updated
- * perpetual positions.
- */
-
-export interface SubaccountPerpetualPosition {
-  /** The `Id` of the `Perpetual`. */
-  perpetualId: number;
-  /** The size of the position in base quantums. */
-
-  quantums: Long;
-}
-/**
- * SubaccountPerpetualPosition provides information on a subaccount's updated
- * perpetual positions.
- */
-
-export interface SubaccountPerpetualPositionSDKType {
-  /** The `Id` of the `Perpetual`. */
-  perpetual_id: number;
-  /** The size of the position in base quantums. */
-
-  quantums: Long;
-}
-/**
- * SubaccountAssetPosition provides information on a subaccount's updated asset
- * positions.
- */
-
-export interface SubaccountAssetPosition {
-  /** The `Id` of the `Asset`. */
-  assetId: number;
-  /** The absolute size of the position in base quantums. */
-
-  quantums: Long;
-}
-/**
- * SubaccountAssetPosition provides information on a subaccount's updated asset
- * positions.
- */
-
-export interface SubaccountAssetPositionSDKType {
-  /** The `Id` of the `Asset`. */
-  asset_id: number;
-  /** The absolute size of the position in base quantums. */
-
-  quantums: Long;
-}
-
-function createBaseStreamSubaccountUpdate(): StreamSubaccountUpdate {
-  return {
-    subaccountId: undefined,
-    updatedPerpetualPositions: [],
-    updatedAssetPositions: [],
-    snapshot: false
-  };
-}
-
-export const StreamSubaccountUpdate = {
-  encode(message: StreamSubaccountUpdate, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.subaccountId !== undefined) {
-      SubaccountId.encode(message.subaccountId, writer.uint32(10).fork()).ldelim();
-    }
-
-    for (const v of message.updatedPerpetualPositions) {
-      SubaccountPerpetualPosition.encode(v!, writer.uint32(18).fork()).ldelim();
-    }
-
-    for (const v of message.updatedAssetPositions) {
-      SubaccountAssetPosition.encode(v!, writer.uint32(26).fork()).ldelim();
-    }
-
-    if (message.snapshot === true) {
-      writer.uint32(32).bool(message.snapshot);
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): StreamSubaccountUpdate {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseStreamSubaccountUpdate();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.subaccountId = SubaccountId.decode(reader, reader.uint32());
-          break;
-
-        case 2:
-          message.updatedPerpetualPositions.push(SubaccountPerpetualPosition.decode(reader, reader.uint32()));
-          break;
-
-        case 3:
-          message.updatedAssetPositions.push(SubaccountAssetPosition.decode(reader, reader.uint32()));
-          break;
-
-        case 4:
-          message.snapshot = reader.bool();
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<StreamSubaccountUpdate>): StreamSubaccountUpdate {
-    const message = createBaseStreamSubaccountUpdate();
-    message.subaccountId = object.subaccountId !== undefined && object.subaccountId !== null ? SubaccountId.fromPartial(object.subaccountId) : undefined;
-    message.updatedPerpetualPositions = object.updatedPerpetualPositions?.map(e => SubaccountPerpetualPosition.fromPartial(e)) || [];
-    message.updatedAssetPositions = object.updatedAssetPositions?.map(e => SubaccountAssetPosition.fromPartial(e)) || [];
-    message.snapshot = object.snapshot ?? false;
-    return message;
-  }
-
-};
-
-function createBaseSubaccountPerpetualPosition(): SubaccountPerpetualPosition {
-  return {
-    perpetualId: 0,
-    quantums: Long.UZERO
-  };
-}
-
-export const SubaccountPerpetualPosition = {
-  encode(message: SubaccountPerpetualPosition, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.perpetualId !== 0) {
-      writer.uint32(8).uint32(message.perpetualId);
-    }
-
-    if (!message.quantums.isZero()) {
-      writer.uint32(16).uint64(message.quantums);
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): SubaccountPerpetualPosition {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseSubaccountPerpetualPosition();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.perpetualId = reader.uint32();
-          break;
-
-        case 2:
-          message.quantums = (reader.uint64() as Long);
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<SubaccountPerpetualPosition>): SubaccountPerpetualPosition {
-    const message = createBaseSubaccountPerpetualPosition();
-    message.perpetualId = object.perpetualId ?? 0;
-    message.quantums = object.quantums !== undefined && object.quantums !== null ? Long.fromValue(object.quantums) : Long.UZERO;
-    return message;
-  }
-
-};
-
-function createBaseSubaccountAssetPosition(): SubaccountAssetPosition {
-  return {
-    assetId: 0,
-    quantums: Long.UZERO
-  };
-}
-
-export const SubaccountAssetPosition = {
-  encode(message: SubaccountAssetPosition, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer {
-    if (message.assetId !== 0) {
-      writer.uint32(8).uint32(message.assetId);
-    }
-
-    if (!message.quantums.isZero()) {
-      writer.uint32(16).uint64(message.quantums);
-    }
-
-    return writer;
-  },
-
-  decode(input: _m0.Reader | Uint8Array, length?: number): SubaccountAssetPosition {
-    const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input);
-    let end = length === undefined ? reader.len : reader.pos + length;
-    const message = createBaseSubaccountAssetPosition();
-
-    while (reader.pos < end) {
-      const tag = reader.uint32();
-
-      switch (tag >>> 3) {
-        case 1:
-          message.assetId = reader.uint32();
-          break;
-
-        case 2:
-          message.quantums = (reader.uint64() as Long);
-          break;
-
-        default:
-          reader.skipType(tag & 7);
-          break;
-      }
-    }
-
-    return message;
-  },
-
-  fromPartial(object: DeepPartial<SubaccountAssetPosition>): SubaccountAssetPosition {
-    const message = createBaseSubaccountAssetPosition();
-    message.assetId = object.assetId ?? 0;
-    message.quantums = object.quantums !== undefined && object.quantums !== null ? Long.fromValue(object.quantums) : Long.UZERO;
-    return message;
-  }
-
-};
\ No newline at end of file
diff --git a/indexer/packages/v4-protos/src/codegen/gogoproto/bundle.ts b/indexer/packages/v4-protos/src/codegen/gogoproto/bundle.ts
index c6e19ca25e..f86995801b 100644
--- a/indexer/packages/v4-protos/src/codegen/gogoproto/bundle.ts
+++ b/indexer/packages/v4-protos/src/codegen/gogoproto/bundle.ts
@@ -1,3 +1,3 @@
-import * as _106 from "./gogo";
-export const gogoproto = { ..._106
+import * as _105 from "./gogo";
+export const gogoproto = { ..._105
 };
\ No newline at end of file
diff --git a/indexer/packages/v4-protos/src/codegen/google/bundle.ts b/indexer/packages/v4-protos/src/codegen/google/bundle.ts
index ce09159550..bd8180db33 100644
--- a/indexer/packages/v4-protos/src/codegen/google/bundle.ts
+++ b/indexer/packages/v4-protos/src/codegen/google/bundle.ts
@@ -1,16 +1,16 @@
-import * as _107 from "./api/annotations";
-import * as _108 from "./api/http";
-import * as _109 from "./protobuf/descriptor";
-import * as _110 from "./protobuf/duration";
-import * as _111 from "./protobuf/timestamp";
-import * as _112 from "./protobuf/any";
+import * as _106 from "./api/annotations";
+import * as _107 from "./api/http";
+import * as _108 from "./protobuf/descriptor";
+import * as _109 from "./protobuf/duration";
+import * as _110 from "./protobuf/timestamp";
+import * as _111 from "./protobuf/any";
 export namespace google {
-  export const api = { ..._107,
-    ..._108
+  export const api = { ..._106,
+    ..._107
   };
-  export const protobuf = { ..._109,
+  export const protobuf = { ..._108,
+    ..._109,
     ..._110,
-    ..._111,
-    ..._112
+    ..._111
   };
 }
\ No newline at end of file
diff --git a/proto/dydxprotocol/clob/order.proto b/proto/dydxprotocol/clob/order.proto
index 2cf39b808c..6cd482bd2b 100644
--- a/proto/dydxprotocol/clob/order.proto
+++ b/proto/dydxprotocol/clob/order.proto
@@ -3,7 +3,6 @@ package dydxprotocol.clob;
 
 import "gogoproto/gogo.proto";
 import "dydxprotocol/subaccounts/subaccount.proto";
-import "dydxprotocol/clob/liquidations.proto";
 
 option go_package = "github.com/dydxprotocol/v4-chain/protocol/x/clob/types";
 
@@ -229,24 +228,3 @@ message TransactionOrdering {
   // Within the block, the unique transaction index.
   uint32 transaction_index = 2;
 }
-
-// StreamLiquidationOrder represents an protocol-generated IOC liquidation
-// order. Used in full node streaming.
-message StreamLiquidationOrder {
-  // Information about this liquidation order.
-  PerpetualLiquidationInfo liquidation_info = 1;
-
-  // CLOB pair ID of the CLOB pair the liquidation order will be matched
-  // against.
-  uint32 clob_pair_id = 2;
-
-  // True if this is a buy order liquidating a short position, false if vice
-  // versa.
-  bool is_buy = 3;
-
-  // The number of base quantums for this liquidation order.
-  uint64 quantums = 4;
-
-  // The subticks this liquidation order will be submitted at.
-  uint64 subticks = 5;
-}
\ No newline at end of file
diff --git a/proto/dydxprotocol/clob/query.proto b/proto/dydxprotocol/clob/query.proto
index b0342bc3c6..cca523bcc6 100644
--- a/proto/dydxprotocol/clob/query.proto
+++ b/proto/dydxprotocol/clob/query.proto
@@ -12,8 +12,6 @@ import "dydxprotocol/clob/matches.proto";
 import "dydxprotocol/clob/liquidations_config.proto";
 import "dydxprotocol/clob/mev.proto";
 import "dydxprotocol/indexer/off_chain_updates/off_chain_updates.proto";
-import "dydxprotocol/subaccounts/streaming.proto";
-import "dydxprotocol/subaccounts/subaccount.proto";
 
 option go_package = "github.com/dydxprotocol/v4-chain/protocol/x/clob/types";
 
@@ -167,9 +165,6 @@ message QueryLiquidationsConfigurationResponse {
 message StreamOrderbookUpdatesRequest {
   // Clob pair ids to stream orderbook updates for.
   repeated uint32 clob_pair_id = 1;
-
-  // Subaccount ids to stream subaccount updates for.
-  repeated dydxprotocol.subaccounts.SubaccountId subaccount_ids = 2;
 }
 
 // StreamOrderbookUpdatesResponse is a response message for the
@@ -183,19 +178,17 @@ message StreamOrderbookUpdatesResponse {
 // GRPC stream.
 message StreamUpdate {
   // Contains one of an StreamOrderbookUpdate,
-  // StreamOrderbookFill, StreamTakerOrderStatus.
+  // StreamOrderbookFill.
   oneof update_message {
     StreamOrderbookUpdate orderbook_update = 1;
     StreamOrderbookFill order_fill = 2;
-    StreamTakerOrder taker_order = 3;
-    dydxprotocol.subaccounts.StreamSubaccountUpdate subaccount_update = 4;
   }
 
   // Block height of the update.
-  uint32 block_height = 5;
+  uint32 block_height = 3;
 
   // Exec mode of the update.
-  uint32 exec_mode = 6;
+  uint32 exec_mode = 4;
 }
 
 // StreamOrderbookUpdate provides information on an orderbook update. Used in
@@ -227,39 +220,3 @@ message StreamOrderbookFill {
   // Resulting fill amounts for each order in the orders array.
   repeated uint64 fill_amounts = 3;
 }
-
-// StreamTakerOrder provides information on a taker order that was attempted
-// to be matched on the orderbook.
-// It is intended to be used only in full node streaming.
-message StreamTakerOrder {
-  // The taker order that was matched on the orderbook. Can be a
-  // regular order or a liquidation order.
-  oneof taker_order {
-    Order order = 1;
-    StreamLiquidationOrder liquidation_order = 2;
-  }
-
-  // Information on the taker order after it is matched on the book,
-  // either successfully or unsuccessfully.
-  StreamTakerOrderStatus taker_order_status = 3;
-}
-
-// StreamTakerOrderStatus is a representation of a taker order
-// after it is attempted to be matched on the orderbook.
-// It is intended to be used only in full node streaming.
-message StreamTakerOrderStatus {
-  // The state of the taker order after attempting to match it against the
-  // orderbook. Possible enum values can be found here:
-  // https://github.com/dydxprotocol/v4-chain/blob/main/protocol/x/clob/types/orderbook.go#L105
-  uint32 order_status = 1;
-
-  // The amount of remaining (non-matched) base quantums of this taker order.
-  uint64 remaining_quantums = 2;
-
-  // The amount of base quantums that were *optimistically* filled for this
-  // taker order when the order is matched against the orderbook. Note that if
-  // any quantums of this order were optimistically filled or filled in state
-  // before this invocation of the matching loop, this value will not include
-  // them.
-  uint64 optimistically_filled_quantums = 3;
-}
diff --git a/proto/dydxprotocol/subaccounts/streaming.proto b/proto/dydxprotocol/subaccounts/streaming.proto
deleted file mode 100644
index 13b71ee1ae..0000000000
--- a/proto/dydxprotocol/subaccounts/streaming.proto
+++ /dev/null
@@ -1,42 +0,0 @@
-syntax = "proto3";
-package dydxprotocol.subaccounts;
-
-import "dydxprotocol/subaccounts/subaccount.proto";
-
-option go_package = "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types";
-
-// StreamSubaccountUpdate provides information on a subaccount update. Used in
-// the full node GRPC stream.
-message StreamSubaccountUpdate {
-  SubaccountId subaccount_id = 1;
-  // updated_perpetual_positions will each be for unique perpetuals.
-  repeated SubaccountPerpetualPosition updated_perpetual_positions = 2;
-  // updated_asset_positions will each be for unique assets.
-  repeated SubaccountAssetPosition updated_asset_positions = 3;
-  // Snapshot indicates if the response is from a snapshot of the subaccount.
-  // All updates should be ignored until snapshot is received.
-  // If the snapshot is true, then all previous entries should be
-  // discarded and the subaccount should be resynced.
-  // For a snapshot subaccount update, the `updated_perpetual_positions` and
-  // `updated_asset_positions` fields will contain the full state of the
-  // subaccount.
-  bool snapshot = 4;
-}
-
-// SubaccountPerpetualPosition provides information on a subaccount's updated
-// perpetual positions.
-message SubaccountPerpetualPosition {
-  // The `Id` of the `Perpetual`.
-  uint32 perpetual_id = 1;
-  // The size of the position in base quantums.
-  uint64 quantums = 2;
-}
-
-// SubaccountAssetPosition provides information on a subaccount's updated asset
-// positions.
-message SubaccountAssetPosition {
-  // The `Id` of the `Asset`.
-  uint32 asset_id = 1;
-  // The absolute size of the position in base quantums.
-  uint64 quantums = 2;
-}
diff --git a/protocol/app/app.go b/protocol/app/app.go
index 1263f2949c..5787ff7af3 100644
--- a/protocol/app/app.go
+++ b/protocol/app/app.go
@@ -218,10 +218,9 @@ import (
 	servicemetrics "github.com/skip-mev/slinky/service/metrics"
 	promserver "github.com/skip-mev/slinky/service/servers/prometheus"
 
-	// Full Node Streaming
-	streaming "github.com/dydxprotocol/v4-chain/protocol/streaming"
-	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/types"
-	"github.com/dydxprotocol/v4-chain/protocol/streaming/ws"
+	// Grpc Streaming
+	streaming "github.com/dydxprotocol/v4-chain/protocol/streaming/grpc"
+	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/grpc/types"
 )
 
 var (
@@ -324,11 +323,9 @@ type App struct {
 	// module configurator
 	configurator module.Configurator
 
-	IndexerEventManager      indexer_manager.IndexerEventManager
-	FullNodeStreamingManager streamingtypes.FullNodeStreamingManager
-	WebsocketStreamingServer *ws.WebsocketServer
-
-	Server *daemonserver.Server
+	IndexerEventManager  indexer_manager.IndexerEventManager
+	GrpcStreamingManager streamingtypes.GrpcStreamingManager
+	Server               *daemonserver.Server
 
 	// startDaemons encapsulates the logic that starts all daemons and daemon services. This function contains a
 	// closure of all relevant data structures that are shared with various keepers. Daemon services startup is
@@ -460,11 +457,8 @@ func New(
 			if app.SlinkyClient != nil {
 				app.SlinkyClient.Stop()
 			}
-			if app.FullNodeStreamingManager != nil {
-				app.FullNodeStreamingManager.Stop()
-			}
-			if app.WebsocketStreamingServer != nil {
-				app.WebsocketStreamingServer.Shutdown()
+			if app.GrpcStreamingManager != nil {
+				app.GrpcStreamingManager.Stop()
 			}
 			return nil
 		},
@@ -726,11 +720,7 @@ func New(
 		indexerFlags.SendOffchainData,
 	)
 
-	app.FullNodeStreamingManager, app.WebsocketStreamingServer = getFullNodeStreamingManagerFromOptions(
-		appFlags,
-		appCodec,
-		logger,
-	)
+	app.GrpcStreamingManager = getGrpcStreamingManagerFromOptions(appFlags, logger)
 
 	timeProvider := &timelib.TimeProviderImpl{}
 
@@ -1012,7 +1002,6 @@ func New(
 		app.PerpetualsKeeper,
 		app.BlockTimeKeeper,
 		app.IndexerEventManager,
-		app.FullNodeStreamingManager,
 	)
 	subaccountsModule := subaccountsmodule.NewAppModule(
 		appCodec,
@@ -1023,7 +1012,7 @@ func New(
 	logger.Info("Parsed CLOB flags", "Flags", clobFlags)
 
 	memClob := clobmodulememclob.NewMemClobPriceTimePriority(app.IndexerEventManager.Enabled())
-	memClob.SetGenerateOrderbookUpdates(app.FullNodeStreamingManager.Enabled())
+	memClob.SetGenerateOrderbookUpdates(app.GrpcStreamingManager.Enabled())
 
 	app.ClobKeeper = clobmodulekeeper.NewKeeper(
 		appCodec,
@@ -1046,7 +1035,7 @@ func New(
 		app.StatsKeeper,
 		app.RewardsKeeper,
 		app.IndexerEventManager,
-		app.FullNodeStreamingManager,
+		app.GrpcStreamingManager,
 		txConfig.TxDecoder(),
 		clobFlags,
 		rate_limit.NewPanicRateLimiter[sdk.Msg](),
@@ -1920,41 +1909,20 @@ func getIndexerFromOptions(
 	return indexerMessageSender, indexerFlags
 }
 
-// getFullNodeStreamingManagerFromOptions returns an instance of a streamingtypes.FullNodeStreamingManager
-// from the specified options. This function will default to returning a no-op instance.
-func getFullNodeStreamingManagerFromOptions(
+// getGrpcStreamingManagerFromOptions returns an instance of a streamingtypes.GrpcStreamingManager from the specified
+// options. This function will default to returning a no-op instance.
+func getGrpcStreamingManagerFromOptions(
 	appFlags flags.Flags,
-	cdc codec.Codec,
 	logger log.Logger,
-) (manager streamingtypes.FullNodeStreamingManager, wsServer *ws.WebsocketServer) {
-	logger = logger.With(log.ModuleKey, "full-node-streaming")
+) (manager streamingtypes.GrpcStreamingManager) {
 	if appFlags.GrpcStreamingEnabled {
-		logger.Info("Full node streaming is enabled")
-		if appFlags.FullNodeStreamingSnapshotInterval > 0 {
-			logger.Info("Interval snapshots enabled")
-		}
-		manager := streaming.NewFullNodeStreamingManager(
+		logger.Info("GRPC streaming is enabled")
+		return streaming.NewGrpcStreamingManager(
 			logger,
 			appFlags.GrpcStreamingFlushIntervalMs,
 			appFlags.GrpcStreamingMaxBatchSize,
 			appFlags.GrpcStreamingMaxChannelBufferSize,
-			appFlags.FullNodeStreamingSnapshotInterval,
 		)
-
-		// Start websocket server.
-		if appFlags.WebsocketStreamingEnabled {
-			port := appFlags.WebsocketStreamingPort
-			logger.Info("Websocket full node streaming is enabled")
-			wsServer = ws.NewWebsocketServer(
-				manager,
-				cdc,
-				logger,
-				port,
-			)
-			wsServer.Start()
-		}
-
-		return manager, wsServer
 	}
-	return streaming.NewNoopGrpcStreamingManager(), wsServer
+	return streaming.NewNoopGrpcStreamingManager()
 }
diff --git a/protocol/app/app_test.go b/protocol/app/app_test.go
index 1eab0b30e7..9bc21b9d88 100644
--- a/protocol/app/app_test.go
+++ b/protocol/app/app_test.go
@@ -106,7 +106,6 @@ func TestAppIsFullyInitialized(t *testing.T) {
 				"BridgeClient",
 				"SlinkyClient",
 				"oraclePrometheusServer",
-				"WebsocketStreamingServer",
 
 				// Any default constructed type can be considered initialized if the default is what is
 				// expected. getUninitializedStructFields relies on fields being the non-default and
diff --git a/protocol/app/flags/flags.go b/protocol/app/flags/flags.go
index fc14704028..5756b69162 100644
--- a/protocol/app/flags/flags.go
+++ b/protocol/app/flags/flags.go
@@ -20,14 +20,11 @@ type Flags struct {
 	GrpcAddress string
 	GrpcEnable  bool
 
-	// Full Node Streaming
+	// Grpc Streaming
 	GrpcStreamingEnabled              bool
 	GrpcStreamingFlushIntervalMs      uint32
 	GrpcStreamingMaxBatchSize         uint32
 	GrpcStreamingMaxChannelBufferSize uint32
-	WebsocketStreamingEnabled         bool
-	WebsocketStreamingPort            uint16
-	FullNodeStreamingSnapshotInterval uint32
 
 	VEOracleEnabled bool // Slinky Vote Extensions
 }
@@ -48,9 +45,6 @@ const (
 	GrpcStreamingFlushIntervalMs      = "grpc-streaming-flush-interval-ms"
 	GrpcStreamingMaxBatchSize         = "grpc-streaming-max-batch-size"
 	GrpcStreamingMaxChannelBufferSize = "grpc-streaming-max-channel-buffer-size"
-	WebsocketStreamingEnabled         = "websocket-streaming-enabled"
-	WebsocketStreamingPort            = "websocket-streaming-port"
-	FullNodeStreamingSnapshotInterval = "fns-snapshot-interval"
 
 	// Slinky VEs enabled
 	VEOracleEnabled = "slinky-vote-extension-oracle-enabled"
@@ -67,9 +61,6 @@ const (
 	DefaultGrpcStreamingFlushIntervalMs      = 50
 	DefaultGrpcStreamingMaxBatchSize         = 2000
 	DefaultGrpcStreamingMaxChannelBufferSize = 2000
-	DefaultWebsocketStreamingEnabled         = false
-	DefaultWebsocketStreamingPort            = 9092
-	DefaultFullNodeStreamingSnapshotInterval = 0
 
 	DefaultVEOracleEnabled = true
 )
@@ -120,22 +111,6 @@ func AddFlagsToCmd(cmd *cobra.Command) {
 		DefaultGrpcStreamingMaxChannelBufferSize,
 		"Maximum per-subscription channel size before grpc streaming cancels a singular subscription",
 	)
-	cmd.Flags().Uint32(
-		FullNodeStreamingSnapshotInterval,
-		DefaultFullNodeStreamingSnapshotInterval,
-		"If set to positive number, number of blocks between each periodic snapshot will be sent out. "+
-			"Defaults to zero for regular behavior of one initial snapshot.",
-	)
-	cmd.Flags().Bool(
-		WebsocketStreamingEnabled,
-		DefaultWebsocketStreamingEnabled,
-		"Whether to enable websocket full node streaming for full nodes",
-	)
-	cmd.Flags().Uint16(
-		WebsocketStreamingPort,
-		DefaultWebsocketStreamingPort,
-		"Port for websocket full node streaming connections. Defaults to 9092.",
-	)
 	cmd.Flags().Bool(
 		VEOracleEnabled,
 		DefaultVEOracleEnabled,
@@ -156,22 +131,15 @@ func (f *Flags) Validate() error {
 			return fmt.Errorf("grpc.enable must be set to true - grpc streaming requires gRPC server")
 		}
 		if f.GrpcStreamingMaxBatchSize == 0 {
-			return fmt.Errorf("full node streaming batch size must be positive number")
+			return fmt.Errorf("grpc streaming batch size must be positive number")
 		}
 		if f.GrpcStreamingFlushIntervalMs == 0 {
-			return fmt.Errorf("full node streaming flush interval must be positive number")
+			return fmt.Errorf("grpc streaming flush interval must be positive number")
 		}
 		if f.GrpcStreamingMaxChannelBufferSize == 0 {
-			return fmt.Errorf("full node streaming channel size must be positive number")
-		}
-	}
-
-	if f.WebsocketStreamingEnabled {
-		if !f.GrpcStreamingEnabled {
-			return fmt.Errorf("websocket full node streaming requires grpc streaming to be enabled")
+			return fmt.Errorf("grpc streaming channel size must be positive number")
 		}
 	}
-
 	return nil
 }
 
@@ -195,9 +163,6 @@ func GetFlagValuesFromOptions(
 		GrpcStreamingFlushIntervalMs:      DefaultGrpcStreamingFlushIntervalMs,
 		GrpcStreamingMaxBatchSize:         DefaultGrpcStreamingMaxBatchSize,
 		GrpcStreamingMaxChannelBufferSize: DefaultGrpcStreamingMaxChannelBufferSize,
-		WebsocketStreamingEnabled:         DefaultWebsocketStreamingEnabled,
-		WebsocketStreamingPort:            DefaultWebsocketStreamingPort,
-		FullNodeStreamingSnapshotInterval: DefaultFullNodeStreamingSnapshotInterval,
 
 		VEOracleEnabled: true,
 	}
@@ -263,24 +228,6 @@ func GetFlagValuesFromOptions(
 		}
 	}
 
-	if option := appOpts.Get(WebsocketStreamingEnabled); option != nil {
-		if v, err := cast.ToBoolE(option); err == nil {
-			result.WebsocketStreamingEnabled = v
-		}
-	}
-
-	if option := appOpts.Get(WebsocketStreamingPort); option != nil {
-		if v, err := cast.ToUint16E(option); err == nil {
-			result.WebsocketStreamingPort = v
-		}
-	}
-
-	if option := appOpts.Get(FullNodeStreamingSnapshotInterval); option != nil {
-		if v, err := cast.ToUint32E(option); err == nil {
-			result.FullNodeStreamingSnapshotInterval = v
-		}
-	}
-
 	if option := appOpts.Get(VEOracleEnabled); option != nil {
 		if v, err := cast.ToBoolE(option); err == nil {
 			result.VEOracleEnabled = v
diff --git a/protocol/app/flags/flags_test.go b/protocol/app/flags/flags_test.go
index 0def068f40..4b5da76819 100644
--- a/protocol/app/flags/flags_test.go
+++ b/protocol/app/flags/flags_test.go
@@ -5,6 +5,7 @@ import (
 	"testing"
 
 	"github.com/cosmos/cosmos-sdk/server/config"
+
 	"github.com/dydxprotocol/v4-chain/protocol/app/flags"
 	"github.com/dydxprotocol/v4-chain/protocol/mocks"
 	"github.com/spf13/cobra"
@@ -37,18 +38,9 @@ func TestAddFlagsToCommand(t *testing.T) {
 		fmt.Sprintf("Has %s flag", flags.GrpcStreamingMaxBatchSize): {
 			flagName: flags.GrpcStreamingMaxBatchSize,
 		},
-		fmt.Sprintf("Has %s flag", flags.FullNodeStreamingSnapshotInterval): {
-			flagName: flags.FullNodeStreamingSnapshotInterval,
-		},
 		fmt.Sprintf("Has %s flag", flags.GrpcStreamingMaxChannelBufferSize): {
 			flagName: flags.GrpcStreamingMaxChannelBufferSize,
 		},
-		fmt.Sprintf("Has %s flag", flags.WebsocketStreamingEnabled): {
-			flagName: flags.WebsocketStreamingEnabled,
-		},
-		fmt.Sprintf("Has %s flag", flags.WebsocketStreamingPort): {
-			flagName: flags.WebsocketStreamingPort,
-		},
 	}
 
 	for name, tc := range tests {
@@ -65,12 +57,11 @@ func TestValidate(t *testing.T) {
 	}{
 		"success (default values)": {
 			flags: flags.Flags{
-				NonValidatingFullNode:             flags.DefaultNonValidatingFullNode,
-				DdAgentHost:                       flags.DefaultDdAgentHost,
-				DdTraceAgentPort:                  flags.DefaultDdTraceAgentPort,
-				GrpcAddress:                       config.DefaultGRPCAddress,
-				GrpcEnable:                        true,
-				FullNodeStreamingSnapshotInterval: flags.DefaultFullNodeStreamingSnapshotInterval,
+				NonValidatingFullNode: flags.DefaultNonValidatingFullNode,
+				DdAgentHost:           flags.DefaultDdAgentHost,
+				DdTraceAgentPort:      flags.DefaultDdTraceAgentPort,
+				GrpcAddress:           config.DefaultGRPCAddress,
+				GrpcEnable:            true,
 			},
 		},
 		"success - full node & gRPC disabled": {
@@ -87,19 +78,6 @@ func TestValidate(t *testing.T) {
 				GrpcStreamingFlushIntervalMs:      100,
 				GrpcStreamingMaxBatchSize:         2000,
 				GrpcStreamingMaxChannelBufferSize: 2000,
-				WebsocketStreamingEnabled:         false,
-			},
-		},
-		"success - both grpc and websocket streaming enabled for validating nodes": {
-			flags: flags.Flags{
-				NonValidatingFullNode:             false,
-				GrpcEnable:                        true,
-				GrpcStreamingEnabled:              true,
-				GrpcStreamingFlushIntervalMs:      100,
-				GrpcStreamingMaxBatchSize:         2000,
-				GrpcStreamingMaxChannelBufferSize: 2000,
-				WebsocketStreamingEnabled:         true,
-				WebsocketStreamingPort:            8989,
 			},
 		},
 		"failure - gRPC disabled": {
@@ -116,30 +94,6 @@ func TestValidate(t *testing.T) {
 			},
 			expectedErr: fmt.Errorf("grpc.enable must be set to true - grpc streaming requires gRPC server"),
 		},
-		"failure - websocket streaming enabled with gRPC streaming disabled": {
-			flags: flags.Flags{
-				NonValidatingFullNode:             true,
-				GrpcEnable:                        true,
-				GrpcStreamingEnabled:              false,
-				WebsocketStreamingEnabled:         true,
-				GrpcStreamingFlushIntervalMs:      100,
-				GrpcStreamingMaxBatchSize:         10000,
-				GrpcStreamingMaxChannelBufferSize: 10000,
-			},
-			expectedErr: fmt.Errorf("websocket full node streaming requires grpc streaming to be enabled"),
-		},
-		"success - websocket streaming enabled with gRPC enabled for validating node": {
-			flags: flags.Flags{
-				NonValidatingFullNode:             true,
-				GrpcEnable:                        true,
-				WebsocketStreamingEnabled:         true,
-				GrpcStreamingEnabled:              true,
-				GrpcStreamingFlushIntervalMs:      100,
-				GrpcStreamingMaxBatchSize:         10000,
-				GrpcStreamingMaxChannelBufferSize: 10000,
-				WebsocketStreamingPort:            8989,
-			},
-		},
 		"failure - gRPC streaming enabled with zero batch size": {
 			flags: flags.Flags{
 				NonValidatingFullNode:        true,
@@ -148,7 +102,7 @@ func TestValidate(t *testing.T) {
 				GrpcStreamingFlushIntervalMs: 100,
 				GrpcStreamingMaxBatchSize:    0,
 			},
-			expectedErr: fmt.Errorf("full node streaming batch size must be positive number"),
+			expectedErr: fmt.Errorf("grpc streaming batch size must be positive number"),
 		},
 		"failure - gRPC streaming enabled with zero flush interval ms": {
 			flags: flags.Flags{
@@ -158,7 +112,7 @@ func TestValidate(t *testing.T) {
 				GrpcStreamingFlushIntervalMs: 0,
 				GrpcStreamingMaxBatchSize:    2000,
 			},
-			expectedErr: fmt.Errorf("full node streaming flush interval must be positive number"),
+			expectedErr: fmt.Errorf("grpc streaming flush interval must be positive number"),
 		},
 		"failure - gRPC streaming enabled with zero channel size ms": {
 			flags: flags.Flags{
@@ -169,29 +123,7 @@ func TestValidate(t *testing.T) {
 				GrpcStreamingMaxBatchSize:         2000,
 				GrpcStreamingMaxChannelBufferSize: 0,
 			},
-			expectedErr: fmt.Errorf("full node streaming channel size must be positive number"),
-		},
-		"failure - websocket streaming enabled with zero batch size": {
-			flags: flags.Flags{
-				NonValidatingFullNode:        true,
-				GrpcEnable:                   true,
-				GrpcStreamingEnabled:         true,
-				GrpcStreamingFlushIntervalMs: 100,
-				GrpcStreamingMaxBatchSize:    0,
-				WebsocketStreamingEnabled:    true,
-			},
-			expectedErr: fmt.Errorf("full node streaming batch size must be positive number"),
-		},
-		"success - full node streaming enabled with 20 snapshot interval": {
-			flags: flags.Flags{
-				NonValidatingFullNode:             true,
-				GrpcEnable:                        true,
-				GrpcStreamingEnabled:              true,
-				GrpcStreamingFlushIntervalMs:      100,
-				GrpcStreamingMaxBatchSize:         2000,
-				GrpcStreamingMaxChannelBufferSize: 2000,
-				FullNodeStreamingSnapshotInterval: 20,
-			},
+			expectedErr: fmt.Errorf("grpc streaming channel size must be positive number"),
 		},
 	}
 	for name, tc := range tests {
@@ -221,9 +153,6 @@ func TestGetFlagValuesFromOptions(t *testing.T) {
 		expectedGrpcStreamingFlushMs              uint32
 		expectedGrpcStreamingBatchSize            uint32
 		expectedGrpcStreamingMaxChannelBufferSize uint32
-		expectedWebsocketEnabled                  bool
-		expectedWebsocketPort                     uint16
-		expectedFullNodeStreamingSnapshotInterval uint32
 	}{
 		"Sets to default if unset": {
 			expectedNonValidatingFullNodeFlag:         false,
@@ -235,9 +164,6 @@ func TestGetFlagValuesFromOptions(t *testing.T) {
 			expectedGrpcStreamingFlushMs:              50,
 			expectedGrpcStreamingBatchSize:            2000,
 			expectedGrpcStreamingMaxChannelBufferSize: 2000,
-			expectedWebsocketEnabled:                  false,
-			expectedWebsocketPort:                     9092,
-			expectedFullNodeStreamingSnapshotInterval: 0,
 		},
 		"Sets values from options": {
 			optsMap: map[string]any{
@@ -245,27 +171,21 @@ func TestGetFlagValuesFromOptions(t *testing.T) {
 				flags.DdAgentHost:                       "agentHostTest",
 				flags.DdTraceAgentPort:                  uint16(777),
 				flags.GrpcEnable:                        false,
-				flags.GrpcAddress:                       "localhost:1234",
+				flags.GrpcAddress:                       "localhost:9091",
 				flags.GrpcStreamingEnabled:              "true",
 				flags.GrpcStreamingFlushIntervalMs:      uint32(408),
 				flags.GrpcStreamingMaxBatchSize:         uint32(650),
 				flags.GrpcStreamingMaxChannelBufferSize: uint32(972),
-				flags.WebsocketStreamingEnabled:         "true",
-				flags.WebsocketStreamingPort:            8989,
-				flags.FullNodeStreamingSnapshotInterval: uint32(123),
 			},
 			expectedNonValidatingFullNodeFlag:         true,
 			expectedDdAgentHost:                       "agentHostTest",
 			expectedDdTraceAgentPort:                  777,
 			expectedGrpcEnable:                        false,
-			expectedGrpcAddress:                       "localhost:1234",
+			expectedGrpcAddress:                       "localhost:9091",
 			expectedGrpcStreamingEnable:               true,
 			expectedGrpcStreamingFlushMs:              408,
 			expectedGrpcStreamingBatchSize:            650,
 			expectedGrpcStreamingMaxChannelBufferSize: 972,
-			expectedWebsocketEnabled:                  true,
-			expectedWebsocketPort:                     8989,
-			expectedFullNodeStreamingSnapshotInterval: 123,
 		},
 	}
 
@@ -318,26 +238,11 @@ func TestGetFlagValuesFromOptions(t *testing.T) {
 				tc.expectedGrpcStreamingBatchSize,
 				flags.GrpcStreamingMaxBatchSize,
 			)
-			require.Equal(
-				t,
-				tc.expectedFullNodeStreamingSnapshotInterval,
-				flags.FullNodeStreamingSnapshotInterval,
-			)
 			require.Equal(
 				t,
 				tc.expectedGrpcStreamingMaxChannelBufferSize,
 				flags.GrpcStreamingMaxChannelBufferSize,
 			)
-			require.Equal(
-				t,
-				tc.expectedWebsocketEnabled,
-				flags.WebsocketStreamingEnabled,
-			)
-			require.Equal(
-				t,
-				tc.expectedWebsocketPort,
-				flags.WebsocketStreamingPort,
-			)
 		})
 	}
 }
diff --git a/protocol/daemons/liquidation/client/sub_task_runner.go b/protocol/daemons/liquidation/client/sub_task_runner.go
index 594817034d..8ca0351808 100644
--- a/protocol/daemons/liquidation/client/sub_task_runner.go
+++ b/protocol/daemons/liquidation/client/sub_task_runner.go
@@ -16,7 +16,7 @@ import (
 	perplib "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/lib"
 	perptypes "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/types"
 	pricestypes "github.com/dydxprotocol/v4-chain/protocol/x/prices/types"
-	salib "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/lib"
+	sakeeper "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/keeper"
 	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
 )
 
@@ -337,7 +337,7 @@ func (c *Client) CheckSubaccountCollateralization(
 
 	// Funding payments are lazily settled, so get the settled subaccount
 	// to ensure that the funding payments are included in the net collateral calculation.
-	settledSubaccount, _, err := salib.GetSettledSubaccountWithPerpetuals(
+	settledSubaccount, _, err := sakeeper.GetSettledSubaccountWithPerpetuals(
 		unsettledSubaccount,
 		perpInfos,
 	)
diff --git a/protocol/docker-compose.yml b/protocol/docker-compose.yml
index 53558289c2..79ef5ae428 100644
--- a/protocol/docker-compose.yml
+++ b/protocol/docker-compose.yml
@@ -21,8 +21,6 @@ services:
       - "true"
       - --max-daemon-unhealthy-seconds
       - "4294967295" # Effectively disable the daemon monitor because bridge daemon is flaky in localnet.
-      - --grpc-streaming-enabled
-      - "true"
     environment:
       # See https://docs.datadoghq.com/profiler/enabling/go/ for DD_ specific environment variables
       - DD_ENV=localnet_${USER}
@@ -33,7 +31,6 @@ services:
     ports:
       - "26657:26657"
       - "9090:9090"
-      - "9092:9092" # full node streaming
       - "1317:1317"
 
   dydxprotocold1:
diff --git a/protocol/go.mod b/protocol/go.mod
index 96cd8159b4..731507414e 100644
--- a/protocol/go.mod
+++ b/protocol/go.mod
@@ -61,7 +61,6 @@ require (
 	github.com/deckarep/golang-set/v2 v2.6.0
 	github.com/ethereum/go-ethereum v1.14.5
 	github.com/go-kit/log v0.2.1
-	github.com/gorilla/websocket v1.5.3
 	github.com/hashicorp/go-metrics v0.5.3
 	github.com/ory/dockertest/v3 v3.10.0
 	github.com/pelletier/go-toml v1.9.5
@@ -230,6 +229,7 @@ require (
 	github.com/googleapis/gax-go/v2 v2.12.3 // indirect
 	github.com/gordonklaus/ineffassign v0.1.0 // indirect
 	github.com/gorilla/handlers v1.5.2 // indirect
+	github.com/gorilla/websocket v1.5.3 // indirect
 	github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
 	github.com/gostaticanalysis/comment v1.4.2 // indirect
 	github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
diff --git a/protocol/lib/metrics/metric_keys.go b/protocol/lib/metrics/metric_keys.go
index 088e742386..0c549c2dfa 100644
--- a/protocol/lib/metrics/metric_keys.go
+++ b/protocol/lib/metrics/metric_keys.go
@@ -69,9 +69,7 @@ const (
 	FullNodeGrpc                      = "full_node_grpc"
 	GrpcSendOrderbookUpdatesLatency   = "grpc_send_orderbook_updates_latency"
 	GrpcSendOrderbookSnapshotLatency  = "grpc_send_orderbook_snapshot_latency"
-	GrpcSendSubaccountSnapshotLatency = "grpc_send_subaccount_snapshot_latency"
 	GrpcSendOrderbookFillsLatency     = "grpc_send_orderbook_fills_latency"
-	GrpcSendSubaccountUpdatesLatency  = "grpc_send_subaccount_updates_latency"
 	GrpcAddUpdateToBufferCount        = "grpc_add_update_to_buffer_count"
 	GrpcAddToSubscriptionChannelCount = "grpc_add_to_subscription_channel_count"
 	GrpcSendResponseToSubscriberCount = "grpc_send_response_to_subscriber_count"
diff --git a/protocol/mocks/ClobKeeper.go b/protocol/mocks/ClobKeeper.go
index ba47affe1a..0a16ce55b8 100644
--- a/protocol/mocks/ClobKeeper.go
+++ b/protocol/mocks/ClobKeeper.go
@@ -718,8 +718,8 @@ func (_m *ClobKeeper) InitializeEquityTierLimit(ctx types.Context, config clobty
 	return r0
 }
 
-// InitializeNewStreams provides a mock function with given fields: ctx
-func (_m *ClobKeeper) InitializeNewStreams(ctx types.Context) {
+// InitializeNewGrpcStreams provides a mock function with given fields: ctx
+func (_m *ClobKeeper) InitializeNewGrpcStreams(ctx types.Context) {
 	_m.Called(ctx)
 }
 
diff --git a/protocol/mocks/MemClobKeeper.go b/protocol/mocks/MemClobKeeper.go
index 1143e2008e..b7f71ecdb2 100644
--- a/protocol/mocks/MemClobKeeper.go
+++ b/protocol/mocks/MemClobKeeper.go
@@ -425,11 +425,6 @@ func (_m *MemClobKeeper) SendOrderbookUpdates(ctx types.Context, offchainUpdates
 	_m.Called(ctx, offchainUpdates)
 }
 
-// SendTakerOrderStatus provides a mock function with given fields: ctx, takerOrder
-func (_m *MemClobKeeper) SendTakerOrderStatus(ctx types.Context, takerOrder clobtypes.StreamTakerOrder) {
-	_m.Called(ctx, takerOrder)
-}
-
 // SetLongTermOrderPlacement provides a mock function with given fields: ctx, order, blockHeight
 func (_m *MemClobKeeper) SetLongTermOrderPlacement(ctx types.Context, order clobtypes.Order, blockHeight uint32) {
 	_m.Called(ctx, order, blockHeight)
diff --git a/protocol/streaming/full_node_streaming_manager.go b/protocol/streaming/full_node_streaming_manager.go
deleted file mode 100644
index 31a82855ac..0000000000
--- a/protocol/streaming/full_node_streaming_manager.go
+++ /dev/null
@@ -1,716 +0,0 @@
-package streaming
-
-import (
-	"fmt"
-	"sync"
-	"time"
-
-	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-
-	"cosmossdk.io/log"
-	sdk "github.com/cosmos/cosmos-sdk/types"
-	"github.com/dydxprotocol/v4-chain/protocol/lib/metrics"
-	"github.com/dydxprotocol/v4-chain/protocol/streaming/types"
-	streaming_util "github.com/dydxprotocol/v4-chain/protocol/streaming/util"
-	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
-)
-
-var _ types.FullNodeStreamingManager = (*FullNodeStreamingManagerImpl)(nil)
-
-// FullNodeStreamingManagerImpl is an implementation for managing streaming subscriptions.
-type FullNodeStreamingManagerImpl struct {
-	sync.Mutex
-
-	logger log.Logger
-
-	// orderbookSubscriptions maps subscription IDs to their respective orderbook subscriptions.
-	orderbookSubscriptions map[uint32]*OrderbookSubscription
-	nextSubscriptionId     uint32
-
-	// stream will batch and flush out messages every 10 ms.
-	ticker *time.Ticker
-	done   chan bool
-
-	// TODO: Consolidate the streamUpdateCache and streamUpdateSubscriptionCache into a single
-	// struct to avoid the need to maintain two separate slices for the same data.
-
-	// list of stream updates.
-	streamUpdateCache []clobtypes.StreamUpdate
-	// list of subscription ids for each stream update.
-	streamUpdateSubscriptionCache [][]uint32
-	// map from clob pair id to subscription ids.
-	clobPairIdToSubscriptionIdMapping map[uint32][]uint32
-	// map from subaccount id to subscription ids.
-	subaccountIdToSubscriptionIdMapping map[satypes.SubaccountId][]uint32
-
-	maxUpdatesInCache          uint32
-	maxSubscriptionChannelSize uint32
-
-	// Block interval in which snapshot info should be sent out in.
-	// Defaults to 0, which means only one snapshot will be sent out.
-	snapshotBlockInterval uint32
-}
-
-// OrderbookSubscription represents a active subscription to the orderbook updates stream.
-type OrderbookSubscription struct {
-	subscriptionId uint32
-
-	// Initialize the subscription with orderbook snapshots.
-	initialize *sync.Once
-
-	// Clob pair ids to subscribe to.
-	clobPairIds []uint32
-
-	// Subaccount ids to subscribe to.
-	subaccountIds []satypes.SubaccountId
-
-	// Stream
-	messageSender types.OutgoingMessageSender
-
-	// Channel to buffer writes before the stream
-	updatesChannel chan []clobtypes.StreamUpdate
-
-	// If interval snapshots are turned on, the next block height at which
-	// a snapshot should be sent out.
-	nextSnapshotBlock uint32
-}
-
-func NewFullNodeStreamingManager(
-	logger log.Logger,
-	flushIntervalMs uint32,
-	maxUpdatesInCache uint32,
-	maxSubscriptionChannelSize uint32,
-	snapshotBlockInterval uint32,
-) *FullNodeStreamingManagerImpl {
-	fullNodeStreamingManager := &FullNodeStreamingManagerImpl{
-		logger:                 logger,
-		orderbookSubscriptions: make(map[uint32]*OrderbookSubscription),
-		nextSubscriptionId:     0,
-
-		ticker:                              time.NewTicker(time.Duration(flushIntervalMs) * time.Millisecond),
-		done:                                make(chan bool),
-		streamUpdateCache:                   make([]clobtypes.StreamUpdate, 0),
-		streamUpdateSubscriptionCache:       make([][]uint32, 0),
-		clobPairIdToSubscriptionIdMapping:   make(map[uint32][]uint32),
-		subaccountIdToSubscriptionIdMapping: make(map[satypes.SubaccountId][]uint32),
-
-		maxUpdatesInCache:          maxUpdatesInCache,
-		maxSubscriptionChannelSize: maxSubscriptionChannelSize,
-		snapshotBlockInterval:      snapshotBlockInterval,
-	}
-
-	// Start the goroutine for pushing order updates through.
-	// Sender goroutine for the subscription channels.
-	go func() {
-		for {
-			select {
-			case <-fullNodeStreamingManager.ticker.C:
-				fullNodeStreamingManager.FlushStreamUpdates()
-			case <-fullNodeStreamingManager.done:
-				fullNodeStreamingManager.logger.Info(
-					"Stream poller goroutine shutting down",
-				)
-				return
-			}
-		}
-	}()
-
-	return fullNodeStreamingManager
-}
-
-func (sm *FullNodeStreamingManagerImpl) Enabled() bool {
-	return true
-}
-
-func (sm *FullNodeStreamingManagerImpl) EmitMetrics() {
-	metrics.SetGauge(
-		metrics.GrpcStreamNumUpdatesBuffered,
-		float32(len(sm.streamUpdateCache)),
-	)
-	metrics.SetGauge(
-		metrics.GrpcStreamSubscriberCount,
-		float32(len(sm.orderbookSubscriptions)),
-	)
-	for _, subscription := range sm.orderbookSubscriptions {
-		metrics.AddSample(
-			metrics.GrpcSubscriptionChannelLength,
-			float32(len(subscription.updatesChannel)),
-		)
-	}
-}
-
-// Subscribe subscribes to the orderbook updates stream.
-func (sm *FullNodeStreamingManagerImpl) Subscribe(
-	clobPairIds []uint32,
-	subaccountIds []*satypes.SubaccountId,
-	messageSender types.OutgoingMessageSender,
-) (
-	err error,
-) {
-	// Perform some basic validation on the request.
-	if len(clobPairIds) == 0 && len(subaccountIds) == 0 {
-		return types.ErrInvalidStreamingRequest
-	}
-
-	sm.Lock()
-	sIds := make([]satypes.SubaccountId, len(subaccountIds))
-	for i, subaccountId := range subaccountIds {
-		sIds[i] = *subaccountId
-	}
-	subscription := &OrderbookSubscription{
-		subscriptionId: sm.nextSubscriptionId,
-		initialize:     &sync.Once{},
-		clobPairIds:    clobPairIds,
-		subaccountIds:  sIds,
-		messageSender:  messageSender,
-		updatesChannel: make(chan []clobtypes.StreamUpdate, sm.maxSubscriptionChannelSize),
-	}
-	for _, clobPairId := range clobPairIds {
-		// if clobPairId exists in the map, append the subscription id to the slice
-		// otherwise, create a new slice with the subscription id
-		if _, ok := sm.clobPairIdToSubscriptionIdMapping[clobPairId]; !ok {
-			sm.clobPairIdToSubscriptionIdMapping[clobPairId] = []uint32{}
-		}
-		sm.clobPairIdToSubscriptionIdMapping[clobPairId] = append(
-			sm.clobPairIdToSubscriptionIdMapping[clobPairId],
-			sm.nextSubscriptionId,
-		)
-	}
-	for _, subaccountId := range sIds {
-		// if subaccountId exists in the map, append the subscription id to the slice
-		// otherwise, create a new slice with the subscription id
-		if _, ok := sm.subaccountIdToSubscriptionIdMapping[subaccountId]; !ok {
-			sm.subaccountIdToSubscriptionIdMapping[subaccountId] = []uint32{}
-		}
-		sm.subaccountIdToSubscriptionIdMapping[subaccountId] = append(
-			sm.subaccountIdToSubscriptionIdMapping[subaccountId],
-			sm.nextSubscriptionId,
-		)
-	}
-
-	sm.logger.Info(
-		fmt.Sprintf(
-			"New subscription id %+v for clob pair ids: %+v and subaccount ids: %+v",
-			subscription.subscriptionId,
-			clobPairIds,
-			subaccountIds,
-		),
-	)
-	sm.orderbookSubscriptions[subscription.subscriptionId] = subscription
-	sm.nextSubscriptionId++
-	sm.EmitMetrics()
-	sm.Unlock()
-
-	// Use current goroutine to consistently poll subscription channel for updates
-	// to send through stream.
-	for updates := range subscription.updatesChannel {
-		metrics.IncrCounter(
-			metrics.GrpcSendResponseToSubscriberCount,
-			1,
-		)
-		err = subscription.messageSender.Send(
-			&clobtypes.StreamOrderbookUpdatesResponse{
-				Updates: updates,
-			},
-		)
-		if err != nil {
-			// On error, remove the subscription from the streaming manager
-			sm.logger.Error(
-				fmt.Sprintf(
-					"Error sending out update for streaming subscription %+v. Dropping subsciption connection.",
-					subscription.subscriptionId,
-				),
-				"err", err,
-			)
-			// Break out of the loop, stopping this goroutine.
-			// The channel will fill up and the main thread will prune the subscription.
-			break
-		}
-	}
-
-	sm.logger.Info(
-		fmt.Sprintf(
-			"Terminating poller for subscription id %+v",
-			subscription.subscriptionId,
-		),
-	)
-	return err
-}
-
-// removeSubscription removes a subscription from the streaming manager.
-// The streaming manager's lock should already be acquired before calling this.
-func (sm *FullNodeStreamingManagerImpl) removeSubscription(
-	subscriptionIdToRemove uint32,
-) {
-	subscription := sm.orderbookSubscriptions[subscriptionIdToRemove]
-	if subscription == nil {
-		return
-	}
-	close(subscription.updatesChannel)
-	delete(sm.orderbookSubscriptions, subscriptionIdToRemove)
-
-	// Iterate over the clobPairIdToSubscriptionIdMapping to remove the subscriptionIdToRemove
-	for pairId, subscriptionIds := range sm.clobPairIdToSubscriptionIdMapping {
-		for i, id := range subscriptionIds {
-			if id == subscriptionIdToRemove {
-				// Remove the subscription ID from the slice
-				sm.clobPairIdToSubscriptionIdMapping[pairId] = append(subscriptionIds[:i], subscriptionIds[i+1:]...)
-				break
-			}
-		}
-		// If the list is empty after removal, delete the key from the map
-		if len(sm.clobPairIdToSubscriptionIdMapping[pairId]) == 0 {
-			delete(sm.clobPairIdToSubscriptionIdMapping, pairId)
-		}
-	}
-
-	// Iterate over the subaccountIdToSubscriptionIdMapping to remove the subscriptionIdToRemove
-	for subaccountId, subscriptionIds := range sm.subaccountIdToSubscriptionIdMapping {
-		for i, id := range subscriptionIds {
-			if id == subscriptionIdToRemove {
-				// Remove the subscription ID from the slice
-				sm.subaccountIdToSubscriptionIdMapping[subaccountId] = append(subscriptionIds[:i], subscriptionIds[i+1:]...)
-				break
-			}
-		}
-		// If the list is empty after removal, delete the key from the map
-		if len(sm.subaccountIdToSubscriptionIdMapping[subaccountId]) == 0 {
-			delete(sm.subaccountIdToSubscriptionIdMapping, subaccountId)
-		}
-	}
-
-	sm.logger.Info(
-		fmt.Sprintf("Removed streaming subscription id %+v", subscriptionIdToRemove),
-	)
-}
-
-func (sm *FullNodeStreamingManagerImpl) Stop() {
-	sm.done <- true
-}
-
-func toOrderbookStreamUpdate(
-	offchainUpdates *clobtypes.OffchainUpdates,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) []clobtypes.StreamUpdate {
-	v1updates, err := streaming_util.GetOffchainUpdatesV1(offchainUpdates)
-	if err != nil {
-		panic(err)
-	}
-	return []clobtypes.StreamUpdate{
-		{
-			UpdateMessage: &clobtypes.StreamUpdate_OrderbookUpdate{
-				OrderbookUpdate: &clobtypes.StreamOrderbookUpdate{
-					Updates:  v1updates,
-					Snapshot: true,
-				},
-			},
-			BlockHeight: blockHeight,
-			ExecMode:    uint32(execMode),
-		},
-	}
-}
-
-func toSubaccountStreamUpdates(
-	saUpdates []*satypes.StreamSubaccountUpdate,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) []clobtypes.StreamUpdate {
-	streamUpdates := make([]clobtypes.StreamUpdate, 0)
-	for _, saUpdate := range saUpdates {
-		streamUpdates = append(streamUpdates, clobtypes.StreamUpdate{
-			UpdateMessage: &clobtypes.StreamUpdate_SubaccountUpdate{
-				SubaccountUpdate: saUpdate,
-			},
-			BlockHeight: blockHeight,
-			ExecMode:    uint32(execMode),
-		})
-	}
-	return streamUpdates
-}
-
-func (sm *FullNodeStreamingManagerImpl) sendStreamUpdates(
-	subscriptionId uint32,
-	streamUpdates []clobtypes.StreamUpdate,
-) {
-	removeSubscription := false
-	subscription, ok := sm.orderbookSubscriptions[subscriptionId]
-	if !ok {
-		sm.logger.Error(
-			fmt.Sprintf(
-				"Streaming subscription id %+v not found. This should not happen.",
-				subscriptionId,
-			),
-		)
-		return
-	}
-
-	select {
-	case subscription.updatesChannel <- streamUpdates:
-	default:
-		sm.logger.Error(
-			fmt.Sprintf(
-				"Streaming subscription id %+v channel full capacity. Dropping subscription connection.",
-				subscriptionId,
-			),
-		)
-		removeSubscription = true
-	}
-
-	if removeSubscription {
-		sm.removeSubscription(subscriptionId)
-	}
-}
-
-// SendCombinedSnapshot sends messages to a particular subscriber without buffering.
-// Note this method requires the lock and assumes that the lock has already been
-// acquired by the caller.
-func (sm *FullNodeStreamingManagerImpl) SendCombinedSnapshot(
-	offchainUpdates *clobtypes.OffchainUpdates,
-	saUpdates []*satypes.StreamSubaccountUpdate,
-	subscriptionId uint32,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) {
-	defer metrics.ModuleMeasureSince(
-		metrics.FullNodeGrpc,
-		metrics.GrpcSendOrderbookSnapshotLatency,
-		time.Now(),
-	)
-
-	var streamUpdates []clobtypes.StreamUpdate
-	streamUpdates = append(streamUpdates, toOrderbookStreamUpdate(offchainUpdates, blockHeight, execMode)...)
-	streamUpdates = append(streamUpdates, toSubaccountStreamUpdates(saUpdates, blockHeight, execMode)...)
-	sm.sendStreamUpdates(subscriptionId, streamUpdates)
-}
-
-// TracksSubaccountId checks if a subaccount id is being tracked by the streaming manager.
-func (sm *FullNodeStreamingManagerImpl) TracksSubaccountId(subaccountId satypes.SubaccountId) bool {
-	sm.Lock()
-	defer sm.Unlock()
-	_, exists := sm.subaccountIdToSubscriptionIdMapping[subaccountId]
-	return exists
-}
-
-// SendOrderbookUpdates groups updates by their clob pair ids and
-// sends messages to the subscribers.
-func (sm *FullNodeStreamingManagerImpl) SendOrderbookUpdates(
-	offchainUpdates *clobtypes.OffchainUpdates,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) {
-	defer metrics.ModuleMeasureSince(
-		metrics.FullNodeGrpc,
-		metrics.GrpcSendOrderbookUpdatesLatency,
-		time.Now(),
-	)
-
-	// Group updates by clob pair id.
-	updates := make(map[uint32]*clobtypes.OffchainUpdates)
-	for _, message := range offchainUpdates.Messages {
-		clobPairId := message.OrderId.ClobPairId
-		if _, ok := updates[clobPairId]; !ok {
-			updates[clobPairId] = clobtypes.NewOffchainUpdates()
-		}
-		updates[clobPairId].Messages = append(updates[clobPairId].Messages, message)
-	}
-
-	// Unmarshal each per-clob pair message to v1 updates.
-	streamUpdates := make([]clobtypes.StreamUpdate, 0)
-	clobPairIds := make([]uint32, 0)
-	for clobPairId, update := range updates {
-		v1updates, err := streaming_util.GetOffchainUpdatesV1(update)
-		if err != nil {
-			panic(err)
-		}
-		streamUpdate := clobtypes.StreamUpdate{
-			UpdateMessage: &clobtypes.StreamUpdate_OrderbookUpdate{
-				OrderbookUpdate: &clobtypes.StreamOrderbookUpdate{
-					Updates:  v1updates,
-					Snapshot: false,
-				},
-			},
-			BlockHeight: blockHeight,
-			ExecMode:    uint32(execMode),
-		}
-		streamUpdates = append(streamUpdates, streamUpdate)
-		clobPairIds = append(clobPairIds, clobPairId)
-	}
-
-	sm.AddOrderUpdatesToCache(streamUpdates, clobPairIds)
-}
-
-// SendOrderbookFillUpdates groups fills by their clob pair ids and
-// sends messages to the subscribers.
-func (sm *FullNodeStreamingManagerImpl) SendOrderbookFillUpdates(
-	orderbookFills []clobtypes.StreamOrderbookFill,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-	perpetualIdToClobPairId map[uint32][]clobtypes.ClobPairId,
-) {
-	defer metrics.ModuleMeasureSince(
-		metrics.FullNodeGrpc,
-		metrics.GrpcSendOrderbookFillsLatency,
-		time.Now(),
-	)
-
-	// Group fills by clob pair id.
-	streamUpdates := make([]clobtypes.StreamUpdate, 0)
-	clobPairIds := make([]uint32, 0)
-	for _, orderbookFill := range orderbookFills {
-		// If this is a deleveraging fill, fetch the clob pair id from the deleveraged
-		// perpetual id.
-		// Otherwise, fetch the clob pair id from the first order in `OrderBookMatchFill`.
-		// We can assume there must be an order, and that all orders share the same
-		// clob pair id.
-		clobPairId := uint32(0)
-		if match := orderbookFill.GetClobMatch().GetMatchPerpetualDeleveraging(); match != nil {
-			clobPairId = uint32(perpetualIdToClobPairId[match.PerpetualId][0])
-		} else {
-			clobPairId = orderbookFill.Orders[0].OrderId.ClobPairId
-		}
-		streamUpdate := clobtypes.StreamUpdate{
-			UpdateMessage: &clobtypes.StreamUpdate_OrderFill{
-				OrderFill: &orderbookFill,
-			},
-			BlockHeight: blockHeight,
-			ExecMode:    uint32(execMode),
-		}
-		streamUpdates = append(streamUpdates, streamUpdate)
-		clobPairIds = append(clobPairIds, clobPairId)
-	}
-
-	sm.AddOrderUpdatesToCache(streamUpdates, clobPairIds)
-}
-
-// SendTakerOrderStatus sends out a taker order and its status to the full node streaming service.
-func (sm *FullNodeStreamingManagerImpl) SendTakerOrderStatus(
-	streamTakerOrder clobtypes.StreamTakerOrder,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) {
-	clobPairId := uint32(0)
-	if liqOrder := streamTakerOrder.GetLiquidationOrder(); liqOrder != nil {
-		clobPairId = liqOrder.ClobPairId
-	}
-	if takerOrder := streamTakerOrder.GetOrder(); takerOrder != nil {
-		clobPairId = takerOrder.OrderId.ClobPairId
-	}
-
-	sm.AddOrderUpdatesToCache(
-		[]clobtypes.StreamUpdate{
-			{
-				UpdateMessage: &clobtypes.StreamUpdate_TakerOrder{
-					TakerOrder: &streamTakerOrder,
-				},
-				BlockHeight: blockHeight,
-				ExecMode:    uint32(execMode),
-			},
-		},
-		[]uint32{clobPairId},
-	)
-}
-
-// SendSubaccountUpdates groups subaccount updates by their subaccount ids and
-// sends messages to the subscribers.
-func (sm *FullNodeStreamingManagerImpl) SendSubaccountUpdates(
-	subaccountUpdates []satypes.StreamSubaccountUpdate,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) {
-	defer metrics.ModuleMeasureSince(
-		metrics.FullNodeGrpc,
-		metrics.GrpcSendSubaccountUpdatesLatency,
-		time.Now(),
-	)
-
-	// Group subaccount updates by subaccount id.
-	streamUpdates := make([]clobtypes.StreamUpdate, 0)
-	subaccountIds := make([]*satypes.SubaccountId, 0)
-	for _, subaccountUpdate := range subaccountUpdates {
-		streamUpdate := clobtypes.StreamUpdate{
-			UpdateMessage: &clobtypes.StreamUpdate_SubaccountUpdate{
-				SubaccountUpdate: &subaccountUpdate,
-			},
-			BlockHeight: blockHeight,
-			ExecMode:    uint32(execMode),
-		}
-		streamUpdates = append(streamUpdates, streamUpdate)
-		subaccountIds = append(subaccountIds, subaccountUpdate.SubaccountId)
-	}
-
-	sm.AddSubaccountUpdatesToCache(streamUpdates, subaccountIds)
-}
-
-// AddOrderUpdatesToCache adds a series of updates to the full node streaming cache.
-// Clob pair ids are the clob pair id each update is relevant to.
-func (sm *FullNodeStreamingManagerImpl) AddOrderUpdatesToCache(
-	updates []clobtypes.StreamUpdate,
-	clobPairIds []uint32,
-) {
-	sm.Lock()
-	defer sm.Unlock()
-
-	metrics.IncrCounter(
-		metrics.GrpcAddUpdateToBufferCount,
-		float32(len(updates)),
-	)
-
-	sm.streamUpdateCache = append(sm.streamUpdateCache, updates...)
-	for _, clobPairId := range clobPairIds {
-		sm.streamUpdateSubscriptionCache = append(
-			sm.streamUpdateSubscriptionCache,
-			sm.clobPairIdToSubscriptionIdMapping[clobPairId],
-		)
-	}
-
-	// Remove all subscriptions and wipe the buffer if buffer overflows.
-	sm.RemoveSubscriptionsAndClearBufferIfFull()
-	sm.EmitMetrics()
-}
-
-// AddSubaccountUpdatesToCache adds a series of updates to the full node streaming cache.
-// Subaccount ids are the subaccount id each update is relevant to.
-func (sm *FullNodeStreamingManagerImpl) AddSubaccountUpdatesToCache(
-	updates []clobtypes.StreamUpdate,
-	subaccountIds []*satypes.SubaccountId,
-) {
-	sm.Lock()
-	defer sm.Unlock()
-
-	metrics.IncrCounter(
-		metrics.GrpcAddUpdateToBufferCount,
-		float32(len(updates)),
-	)
-
-	sm.streamUpdateCache = append(sm.streamUpdateCache, updates...)
-	for _, subaccountId := range subaccountIds {
-		sm.streamUpdateSubscriptionCache = append(
-			sm.streamUpdateSubscriptionCache,
-			sm.subaccountIdToSubscriptionIdMapping[*subaccountId],
-		)
-	}
-	sm.RemoveSubscriptionsAndClearBufferIfFull()
-	sm.EmitMetrics()
-}
-
-// RemoveSubscriptionsAndClearBufferIfFull removes all subscriptions and wipes the buffer if buffer overflows.
-// Note this method requires the lock and assumes that the lock has already been
-// acquired by the caller.
-func (sm *FullNodeStreamingManagerImpl) RemoveSubscriptionsAndClearBufferIfFull() {
-	// Remove all subscriptions and wipe the buffer if buffer overflows.
-	if len(sm.streamUpdateCache) > int(sm.maxUpdatesInCache) {
-		sm.logger.Error("Streaming buffer full capacity. Dropping messages and all subscriptions. " +
-			"Disconnect all clients and increase buffer size via the grpc-stream-buffer-size flag.")
-		for id := range sm.orderbookSubscriptions {
-			sm.removeSubscription(id)
-		}
-		sm.streamUpdateCache = nil
-		sm.streamUpdateSubscriptionCache = nil
-	}
-}
-
-func (sm *FullNodeStreamingManagerImpl) FlushStreamUpdates() {
-	sm.Lock()
-	defer sm.Unlock()
-	sm.FlushStreamUpdatesWithLock()
-}
-
-// FlushStreamUpdatesWithLock takes in a list of stream updates and their corresponding subscription IDs,
-// and emits them to subscribers. Note this method requires the lock and assumes that the lock has already been
-// acquired by the caller.
-func (sm *FullNodeStreamingManagerImpl) FlushStreamUpdatesWithLock() {
-	defer metrics.ModuleMeasureSince(
-		metrics.FullNodeGrpc,
-		metrics.GrpcFlushUpdatesLatency,
-		time.Now(),
-	)
-
-	// Map to collect updates for each subscription.
-	subscriptionUpdates := make(map[uint32][]clobtypes.StreamUpdate)
-	idsToRemove := make([]uint32, 0)
-
-	// Collect updates for each subscription.
-	for i, update := range sm.streamUpdateCache {
-		subscriptionIds := sm.streamUpdateSubscriptionCache[i]
-		for _, id := range subscriptionIds {
-			subscriptionUpdates[id] = append(subscriptionUpdates[id], update)
-		}
-	}
-
-	// Non-blocking send updates through subscriber's buffered channel.
-	// If the buffer is full, drop the subscription.
-	for id, updates := range subscriptionUpdates {
-		if subscription, ok := sm.orderbookSubscriptions[id]; ok {
-			metrics.IncrCounter(
-				metrics.GrpcAddToSubscriptionChannelCount,
-				1,
-			)
-			select {
-			case subscription.updatesChannel <- updates:
-			default:
-				idsToRemove = append(idsToRemove, id)
-			}
-		}
-	}
-
-	sm.streamUpdateCache = nil
-	sm.streamUpdateSubscriptionCache = nil
-
-	for _, id := range idsToRemove {
-		sm.logger.Error(
-			fmt.Sprintf(
-				"Streaming subscription id %+v channel full capacity. Dropping subscription connection.",
-				id,
-			),
-		)
-		sm.removeSubscription(id)
-	}
-
-	sm.EmitMetrics()
-}
-
-func (sm *FullNodeStreamingManagerImpl) InitializeNewStreams(
-	getOrderbookSnapshot func(clobPairId clobtypes.ClobPairId) *clobtypes.OffchainUpdates,
-	getSubaccountSnapshot func(subaccountId satypes.SubaccountId) *satypes.StreamSubaccountUpdate,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-) {
-	sm.Lock()
-	defer sm.Unlock()
-
-	// Flush any pending updates before sending the snapshot to avoid
-	// race conditions with the snapshot.
-	sm.FlushStreamUpdatesWithLock()
-
-	updatesByClobPairId := make(map[uint32]*clobtypes.OffchainUpdates)
-
-	for subscriptionId, subscription := range sm.orderbookSubscriptions {
-		// If the snapshot block interval is enabled, reset the sync.Once in order to
-		// re-send snapshots out.
-		if sm.snapshotBlockInterval > 0 &&
-			blockHeight == subscription.nextSnapshotBlock {
-			subscription.initialize = &sync.Once{}
-		}
-
-		subscription.initialize.Do(
-			func() {
-				allUpdates := clobtypes.NewOffchainUpdates()
-				for _, clobPairId := range subscription.clobPairIds {
-					if _, ok := updatesByClobPairId[clobPairId]; !ok {
-						updatesByClobPairId[clobPairId] = getOrderbookSnapshot(clobtypes.ClobPairId(clobPairId))
-					}
-					allUpdates.Append(updatesByClobPairId[clobPairId])
-				}
-				saUpdates := []*satypes.StreamSubaccountUpdate{}
-				for _, subaccountId := range subscription.subaccountIds {
-					saUpdates = append(saUpdates, getSubaccountSnapshot(subaccountId))
-				}
-				sm.SendCombinedSnapshot(allUpdates, saUpdates, subscriptionId, blockHeight, execMode)
-				if sm.snapshotBlockInterval != 0 {
-					subscription.nextSnapshotBlock = blockHeight + sm.snapshotBlockInterval
-				}
-			},
-		)
-	}
-}
diff --git a/protocol/streaming/grpc/grpc_streaming_manager.go b/protocol/streaming/grpc/grpc_streaming_manager.go
new file mode 100644
index 0000000000..54037813f9
--- /dev/null
+++ b/protocol/streaming/grpc/grpc_streaming_manager.go
@@ -0,0 +1,488 @@
+package grpc
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"cosmossdk.io/log"
+	sdk "github.com/cosmos/cosmos-sdk/types"
+	"github.com/cosmos/gogoproto/proto"
+	ocutypes "github.com/dydxprotocol/v4-chain/protocol/indexer/off_chain_updates/types"
+	"github.com/dydxprotocol/v4-chain/protocol/lib/metrics"
+	"github.com/dydxprotocol/v4-chain/protocol/streaming/grpc/types"
+	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
+)
+
+var _ types.GrpcStreamingManager = (*GrpcStreamingManagerImpl)(nil)
+
+// GrpcStreamingManagerImpl is an implementation for managing gRPC streaming subscriptions.
+type GrpcStreamingManagerImpl struct {
+	sync.Mutex
+
+	logger log.Logger
+
+	// orderbookSubscriptions maps subscription IDs to their respective orderbook subscriptions.
+	orderbookSubscriptions map[uint32]*OrderbookSubscription
+	nextSubscriptionId     uint32
+
+	// grpc stream will batch and flush out messages every 10 ms.
+	ticker *time.Ticker
+	done   chan bool
+	// map of clob pair id to stream updates.
+	streamUpdateCache map[uint32][]clobtypes.StreamUpdate
+	numUpdatesInCache uint32
+
+	maxUpdatesInCache          uint32
+	maxSubscriptionChannelSize uint32
+}
+
+// OrderbookSubscription represents a active subscription to the orderbook updates stream.
+type OrderbookSubscription struct {
+	subscriptionId uint32
+
+	// Initialize the subscription with orderbook snapshots.
+	initialize sync.Once
+
+	// Clob pair ids to subscribe to.
+	clobPairIds []uint32
+
+	// Stream
+	srv clobtypes.Query_StreamOrderbookUpdatesServer
+
+	// Channel to buffer writes before the stream
+	updatesChannel chan []clobtypes.StreamUpdate
+}
+
+func NewGrpcStreamingManager(
+	logger log.Logger,
+	flushIntervalMs uint32,
+	maxUpdatesInCache uint32,
+	maxSubscriptionChannelSize uint32,
+) *GrpcStreamingManagerImpl {
+	logger = logger.With(log.ModuleKey, "grpc-streaming")
+	grpcStreamingManager := &GrpcStreamingManagerImpl{
+		logger:                 logger,
+		orderbookSubscriptions: make(map[uint32]*OrderbookSubscription),
+		nextSubscriptionId:     0,
+
+		ticker:            time.NewTicker(time.Duration(flushIntervalMs) * time.Millisecond),
+		done:              make(chan bool),
+		streamUpdateCache: make(map[uint32][]clobtypes.StreamUpdate),
+		numUpdatesInCache: 0,
+
+		maxUpdatesInCache:          maxUpdatesInCache,
+		maxSubscriptionChannelSize: maxSubscriptionChannelSize,
+	}
+
+	// Start the goroutine for pushing order updates through.
+	// Sender goroutine for the subscription channels.
+	go func() {
+		for {
+			select {
+			case <-grpcStreamingManager.ticker.C:
+				grpcStreamingManager.FlushStreamUpdates()
+			case <-grpcStreamingManager.done:
+				grpcStreamingManager.logger.Info(
+					"GRPC Stream poller goroutine shutting down",
+				)
+				return
+			}
+		}
+	}()
+
+	return grpcStreamingManager
+}
+
+func (sm *GrpcStreamingManagerImpl) Enabled() bool {
+	return true
+}
+
+func (sm *GrpcStreamingManagerImpl) EmitMetrics() {
+	metrics.SetGauge(
+		metrics.GrpcStreamNumUpdatesBuffered,
+		float32(sm.numUpdatesInCache),
+	)
+	metrics.SetGauge(
+		metrics.GrpcStreamSubscriberCount,
+		float32(len(sm.orderbookSubscriptions)),
+	)
+	for _, subscription := range sm.orderbookSubscriptions {
+		metrics.AddSample(
+			metrics.GrpcSubscriptionChannelLength,
+			float32(len(subscription.updatesChannel)),
+		)
+	}
+}
+
+// Subscribe subscribes to the orderbook updates stream.
+func (sm *GrpcStreamingManagerImpl) Subscribe(
+	req clobtypes.StreamOrderbookUpdatesRequest,
+	srv clobtypes.Query_StreamOrderbookUpdatesServer,
+) (
+	err error,
+) {
+	clobPairIds := req.GetClobPairId()
+
+	// Perform some basic validation on the request.
+	if len(clobPairIds) == 0 {
+		return clobtypes.ErrInvalidGrpcStreamingRequest
+	}
+
+	sm.Lock()
+	subscription := &OrderbookSubscription{
+		subscriptionId: sm.nextSubscriptionId,
+		clobPairIds:    clobPairIds,
+		srv:            srv,
+		updatesChannel: make(chan []clobtypes.StreamUpdate, sm.maxSubscriptionChannelSize),
+	}
+
+	sm.logger.Info(
+		fmt.Sprintf(
+			"New subscription id %+v for clob pair ids: %+v",
+			subscription.subscriptionId,
+			clobPairIds,
+		),
+	)
+	sm.orderbookSubscriptions[subscription.subscriptionId] = subscription
+	sm.nextSubscriptionId++
+	sm.EmitMetrics()
+	sm.Unlock()
+
+	// Use current goroutine to consistently poll subscription channel for updates
+	// to send through stream.
+	for updates := range subscription.updatesChannel {
+		metrics.IncrCounter(
+			metrics.GrpcSendResponseToSubscriberCount,
+			1,
+		)
+		err = subscription.srv.Send(
+			&clobtypes.StreamOrderbookUpdatesResponse{
+				Updates: updates,
+			},
+		)
+		if err != nil {
+			// On error, remove the subscription from the streaming manager
+			sm.logger.Error(
+				fmt.Sprintf(
+					"Error sending out update for grpc streaming subscription %+v. Dropping subsciption connection.",
+					subscription.subscriptionId,
+				),
+				"err", err,
+			)
+			// Break out of the loop, stopping this goroutine.
+			// The channel will fill up and the main thread will prune the subscription.
+			break
+		}
+	}
+
+	sm.logger.Info(
+		fmt.Sprintf(
+			"Terminating poller for subscription id %+v",
+			subscription.subscriptionId,
+		),
+	)
+	return err
+}
+
+// removeSubscription removes a subscription from the grpc streaming manager.
+// The streaming manager's lock should already be acquired before calling this.
+func (sm *GrpcStreamingManagerImpl) removeSubscription(
+	subscriptionIdToRemove uint32,
+) {
+	subscription := sm.orderbookSubscriptions[subscriptionIdToRemove]
+	if subscription == nil {
+		return
+	}
+	close(subscription.updatesChannel)
+	delete(sm.orderbookSubscriptions, subscriptionIdToRemove)
+	sm.logger.Info(
+		fmt.Sprintf("Removed grpc streaming subscription id %+v", subscriptionIdToRemove),
+	)
+}
+
+func (sm *GrpcStreamingManagerImpl) Stop() {
+	sm.done <- true
+}
+
+// SendSnapshot sends messages to a particular subscriber without buffering.
+// Note this method requires the lock and assumes that the lock has already been
+// acquired by the caller.
+func (sm *GrpcStreamingManagerImpl) SendSnapshot(
+	offchainUpdates *clobtypes.OffchainUpdates,
+	subscriptionId uint32,
+	blockHeight uint32,
+	execMode sdk.ExecMode,
+) {
+	defer metrics.ModuleMeasureSince(
+		metrics.FullNodeGrpc,
+		metrics.GrpcSendOrderbookSnapshotLatency,
+		time.Now(),
+	)
+
+	v1updates, err := GetOffchainUpdatesV1(offchainUpdates)
+	if err != nil {
+		panic(err)
+	}
+
+	removeSubscription := false
+	if len(v1updates) > 0 {
+		subscription, ok := sm.orderbookSubscriptions[subscriptionId]
+		if !ok {
+			sm.logger.Error(
+				fmt.Sprintf(
+					"GRPC Streaming subscription id %+v not found. This should not happen.",
+					subscriptionId,
+				),
+			)
+			return
+		}
+		streamUpdates := []clobtypes.StreamUpdate{
+			{
+				UpdateMessage: &clobtypes.StreamUpdate_OrderbookUpdate{
+					OrderbookUpdate: &clobtypes.StreamOrderbookUpdate{
+						Updates:  v1updates,
+						Snapshot: true,
+					},
+				},
+				BlockHeight: blockHeight,
+				ExecMode:    uint32(execMode),
+			},
+		}
+		metrics.IncrCounter(
+			metrics.GrpcAddToSubscriptionChannelCount,
+			1,
+		)
+		select {
+		case subscription.updatesChannel <- streamUpdates:
+		default:
+			sm.logger.Error(
+				fmt.Sprintf(
+					"GRPC Streaming subscription id %+v channel full capacity. Dropping subscription connection.",
+					subscriptionId,
+				),
+			)
+			removeSubscription = true
+		}
+	}
+
+	// Clean up subscriptions that have been closed.
+	// If a Send update has failed for any clob pair id, the whole subscription will be removed.
+	if removeSubscription {
+		sm.removeSubscription(subscriptionId)
+	}
+}
+
+// SendOrderbookUpdates groups updates by their clob pair ids and
+// sends messages to the subscribers.
+func (sm *GrpcStreamingManagerImpl) SendOrderbookUpdates(
+	offchainUpdates *clobtypes.OffchainUpdates,
+	blockHeight uint32,
+	execMode sdk.ExecMode,
+) {
+	defer metrics.ModuleMeasureSince(
+		metrics.FullNodeGrpc,
+		metrics.GrpcSendOrderbookUpdatesLatency,
+		time.Now(),
+	)
+
+	// Group updates by clob pair id.
+	updates := make(map[uint32]*clobtypes.OffchainUpdates)
+	for _, message := range offchainUpdates.Messages {
+		clobPairId := message.OrderId.ClobPairId
+		if _, ok := updates[clobPairId]; !ok {
+			updates[clobPairId] = clobtypes.NewOffchainUpdates()
+		}
+		updates[clobPairId].Messages = append(updates[clobPairId].Messages, message)
+	}
+
+	// Unmarshal each per-clob pair message to v1 updates.
+	updatesByClobPairId := make(map[uint32][]clobtypes.StreamUpdate)
+	for clobPairId, update := range updates {
+		v1updates, err := GetOffchainUpdatesV1(update)
+		if err != nil {
+			panic(err)
+		}
+		updatesByClobPairId[clobPairId] = []clobtypes.StreamUpdate{
+			{
+				UpdateMessage: &clobtypes.StreamUpdate_OrderbookUpdate{
+					OrderbookUpdate: &clobtypes.StreamOrderbookUpdate{
+						Updates:  v1updates,
+						Snapshot: false,
+					},
+				},
+				BlockHeight: blockHeight,
+				ExecMode:    uint32(execMode),
+			},
+		}
+	}
+
+	sm.AddUpdatesToCache(updatesByClobPairId, uint32(len(updates)))
+}
+
+// SendOrderbookFillUpdates groups fills by their clob pair ids and
+// sends messages to the subscribers.
+func (sm *GrpcStreamingManagerImpl) SendOrderbookFillUpdates(
+	ctx sdk.Context,
+	orderbookFills []clobtypes.StreamOrderbookFill,
+	blockHeight uint32,
+	execMode sdk.ExecMode,
+) {
+	defer metrics.ModuleMeasureSince(
+		metrics.FullNodeGrpc,
+		metrics.GrpcSendOrderbookFillsLatency,
+		time.Now(),
+	)
+
+	// Group fills by clob pair id.
+	updatesByClobPairId := make(map[uint32][]clobtypes.StreamUpdate)
+	for _, orderbookFill := range orderbookFills {
+		// Fetch the clob pair id from the first order in `OrderBookMatchFill`.
+		// We can assume there must be an order, and that all orders share the same
+		// clob pair id.
+		clobPairId := orderbookFill.Orders[0].OrderId.ClobPairId
+		if _, ok := updatesByClobPairId[clobPairId]; !ok {
+			updatesByClobPairId[clobPairId] = []clobtypes.StreamUpdate{}
+		}
+		streamUpdate := clobtypes.StreamUpdate{
+			UpdateMessage: &clobtypes.StreamUpdate_OrderFill{
+				OrderFill: &orderbookFill,
+			},
+			BlockHeight: blockHeight,
+			ExecMode:    uint32(execMode),
+		}
+		updatesByClobPairId[clobPairId] = append(updatesByClobPairId[clobPairId], streamUpdate)
+	}
+
+	sm.AddUpdatesToCache(updatesByClobPairId, uint32(len(orderbookFills)))
+}
+
+func (sm *GrpcStreamingManagerImpl) AddUpdatesToCache(
+	updatesByClobPairId map[uint32][]clobtypes.StreamUpdate,
+	numUpdatesToAdd uint32,
+) {
+	sm.Lock()
+	defer sm.Unlock()
+
+	metrics.IncrCounter(
+		metrics.GrpcAddUpdateToBufferCount,
+		1,
+	)
+
+	for clobPairId, streamUpdates := range updatesByClobPairId {
+		sm.streamUpdateCache[clobPairId] = append(sm.streamUpdateCache[clobPairId], streamUpdates...)
+	}
+	sm.numUpdatesInCache += numUpdatesToAdd
+
+	// Remove all subscriptions and wipe the buffer if buffer overflows.
+	if sm.numUpdatesInCache > sm.maxUpdatesInCache {
+		sm.logger.Error("GRPC Streaming buffer full capacity. Dropping messages and all subscriptions. " +
+			"Disconnect all clients and increase buffer size via the grpc-stream-buffer-size flag.")
+		for id := range sm.orderbookSubscriptions {
+			sm.removeSubscription(id)
+		}
+		clear(sm.streamUpdateCache)
+		sm.numUpdatesInCache = 0
+	}
+	sm.EmitMetrics()
+}
+
+func (sm *GrpcStreamingManagerImpl) FlushStreamUpdates() {
+	sm.Lock()
+	defer sm.Unlock()
+	sm.FlushStreamUpdatesWithLock()
+}
+
+// FlushStreamUpdatesWithLock takes in a map of clob pair id to stream updates and emits them to subscribers.
+// Note this method requires the lock and assumes that the lock has already been
+// acquired by the caller.
+func (sm *GrpcStreamingManagerImpl) FlushStreamUpdatesWithLock() {
+	defer metrics.ModuleMeasureSince(
+		metrics.FullNodeGrpc,
+		metrics.GrpcFlushUpdatesLatency,
+		time.Now(),
+	)
+
+	// Non-blocking send updates through subscriber's buffered channel.
+	// If the buffer is full, drop the subscription.
+	idsToRemove := make([]uint32, 0)
+	for id, subscription := range sm.orderbookSubscriptions {
+		streamUpdatesForSubscription := make([]clobtypes.StreamUpdate, 0)
+		for _, clobPairId := range subscription.clobPairIds {
+			if update, ok := sm.streamUpdateCache[clobPairId]; ok {
+				streamUpdatesForSubscription = append(streamUpdatesForSubscription, update...)
+			}
+		}
+
+		if len(streamUpdatesForSubscription) > 0 {
+			metrics.IncrCounter(
+				metrics.GrpcAddToSubscriptionChannelCount,
+				1,
+			)
+			select {
+			case subscription.updatesChannel <- streamUpdatesForSubscription:
+			default:
+				idsToRemove = append(idsToRemove, id)
+			}
+		}
+	}
+
+	clear(sm.streamUpdateCache)
+	sm.numUpdatesInCache = 0
+
+	for _, id := range idsToRemove {
+		sm.logger.Error(
+			fmt.Sprintf(
+				"GRPC Streaming subscription id %+v channel full capacity. Dropping subscription connection.",
+				id,
+			),
+		)
+		sm.removeSubscription(id)
+	}
+
+	sm.EmitMetrics()
+}
+
+func (sm *GrpcStreamingManagerImpl) InitializeNewGrpcStreams(
+	getOrderbookSnapshot func(clobPairId clobtypes.ClobPairId) *clobtypes.OffchainUpdates,
+	blockHeight uint32,
+	execMode sdk.ExecMode,
+) {
+	sm.Lock()
+	defer sm.Unlock()
+
+	// Flush any pending updates before sending the snapshot to avoid
+	// race conditions with the snapshot.
+	sm.FlushStreamUpdatesWithLock()
+
+	updatesByClobPairId := make(map[uint32]*clobtypes.OffchainUpdates)
+	for subscriptionId, subscription := range sm.orderbookSubscriptions {
+		subscription.initialize.Do(
+			func() {
+				allUpdates := clobtypes.NewOffchainUpdates()
+				for _, clobPairId := range subscription.clobPairIds {
+					if _, ok := updatesByClobPairId[clobPairId]; !ok {
+						updatesByClobPairId[clobPairId] = getOrderbookSnapshot(clobtypes.ClobPairId(clobPairId))
+					}
+					allUpdates.Append(updatesByClobPairId[clobPairId])
+				}
+
+				sm.SendSnapshot(allUpdates, subscriptionId, blockHeight, execMode)
+			},
+		)
+	}
+}
+
+// GetOffchainUpdatesV1 unmarshals messages in offchain updates to OffchainUpdateV1.
+func GetOffchainUpdatesV1(offchainUpdates *clobtypes.OffchainUpdates) ([]ocutypes.OffChainUpdateV1, error) {
+	v1updates := make([]ocutypes.OffChainUpdateV1, 0)
+	for _, message := range offchainUpdates.Messages {
+		var update ocutypes.OffChainUpdateV1
+		err := proto.Unmarshal(message.Message.Value, &update)
+		if err != nil {
+			return nil, err
+		}
+		v1updates = append(v1updates, update)
+	}
+	return v1updates, nil
+}
diff --git a/protocol/streaming/noop_streaming_manager.go b/protocol/streaming/grpc/noop_streaming_manager.go
similarity index 51%
rename from protocol/streaming/noop_streaming_manager.go
rename to protocol/streaming/grpc/noop_streaming_manager.go
index 24810fefe2..f5c61f0713 100644
--- a/protocol/streaming/noop_streaming_manager.go
+++ b/protocol/streaming/grpc/noop_streaming_manager.go
@@ -1,13 +1,12 @@
-package streaming
+package grpc
 
 import (
 	sdk "github.com/cosmos/cosmos-sdk/types"
-	"github.com/dydxprotocol/v4-chain/protocol/streaming/types"
+	"github.com/dydxprotocol/v4-chain/protocol/streaming/grpc/types"
 	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
-	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
 )
 
-var _ types.FullNodeStreamingManager = (*NoopGrpcStreamingManager)(nil)
+var _ types.GrpcStreamingManager = (*NoopGrpcStreamingManager)(nil)
 
 type NoopGrpcStreamingManager struct{}
 
@@ -20,51 +19,39 @@ func (sm *NoopGrpcStreamingManager) Enabled() bool {
 }
 
 func (sm *NoopGrpcStreamingManager) Subscribe(
-	_ []uint32,
-	_ []*satypes.SubaccountId,
-	_ types.OutgoingMessageSender,
+	req clobtypes.StreamOrderbookUpdatesRequest,
+	srv clobtypes.Query_StreamOrderbookUpdatesServer,
 ) (
 	err error,
 ) {
-	return types.ErrNotImplemented
+	return clobtypes.ErrGrpcStreamingManagerNotEnabled
 }
 
-func (sm *NoopGrpcStreamingManager) SendOrderbookUpdates(
+func (sm *NoopGrpcStreamingManager) SendSnapshot(
 	updates *clobtypes.OffchainUpdates,
+	subscriptionId uint32,
 	blockHeight uint32,
 	execMode sdk.ExecMode,
 ) {
 }
 
-func (sm *NoopGrpcStreamingManager) SendOrderbookFillUpdates(
-	orderbookFills []clobtypes.StreamOrderbookFill,
-	blockHeight uint32,
-	execMode sdk.ExecMode,
-	perpetualIdToClobPairId map[uint32][]clobtypes.ClobPairId,
-) {
-}
-
-func (sm *NoopGrpcStreamingManager) SendTakerOrderStatus(
-	takerOrder clobtypes.StreamTakerOrder,
+func (sm *NoopGrpcStreamingManager) SendOrderbookUpdates(
+	updates *clobtypes.OffchainUpdates,
 	blockHeight uint32,
 	execMode sdk.ExecMode,
 ) {
 }
 
-func (sm *NoopGrpcStreamingManager) SendSubaccountUpdates(
-	subaccountUpdates []satypes.StreamSubaccountUpdate,
+func (sm *NoopGrpcStreamingManager) SendOrderbookFillUpdates(
+	ctx sdk.Context,
+	orderbookFills []clobtypes.StreamOrderbookFill,
 	blockHeight uint32,
 	execMode sdk.ExecMode,
 ) {
 }
 
-func (sm *NoopGrpcStreamingManager) TracksSubaccountId(id satypes.SubaccountId) bool {
-	return false
-}
-
-func (sm *NoopGrpcStreamingManager) InitializeNewStreams(
+func (sm *NoopGrpcStreamingManager) InitializeNewGrpcStreams(
 	getOrderbookSnapshot func(clobPairId clobtypes.ClobPairId) *clobtypes.OffchainUpdates,
-	getSubaccountSnapshot func(subaccountId satypes.SubaccountId) *satypes.StreamSubaccountUpdate,
 	blockHeight uint32,
 	execMode sdk.ExecMode,
 ) {
diff --git a/protocol/streaming/grpc/types/manager.go b/protocol/streaming/grpc/types/manager.go
new file mode 100644
index 0000000000..74b145985c
--- /dev/null
+++ b/protocol/streaming/grpc/types/manager.go
@@ -0,0 +1,40 @@
+package types
+
+import (
+	sdk "github.com/cosmos/cosmos-sdk/types"
+	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
+)
+
+type GrpcStreamingManager interface {
+	Enabled() bool
+	Stop()
+	// L3+ Orderbook updates.
+	Subscribe(
+		req clobtypes.StreamOrderbookUpdatesRequest,
+		srv clobtypes.Query_StreamOrderbookUpdatesServer,
+	) (
+		err error,
+	)
+	InitializeNewGrpcStreams(
+		getOrderbookSnapshot func(clobPairId clobtypes.ClobPairId) *clobtypes.OffchainUpdates,
+		blockHeight uint32,
+		execMode sdk.ExecMode,
+	)
+	SendSnapshot(
+		offchainUpdates *clobtypes.OffchainUpdates,
+		subscriptionId uint32,
+		blockHeight uint32,
+		execMode sdk.ExecMode,
+	)
+	SendOrderbookUpdates(
+		offchainUpdates *clobtypes.OffchainUpdates,
+		blockHeight uint32,
+		execMode sdk.ExecMode,
+	)
+	SendOrderbookFillUpdates(
+		ctx sdk.Context,
+		orderbookFills []clobtypes.StreamOrderbookFill,
+		blockHeight uint32,
+		execMode sdk.ExecMode,
+	)
+}
diff --git a/protocol/streaming/types/errors.go b/protocol/streaming/types/errors.go
deleted file mode 100644
index af28954670..0000000000
--- a/protocol/streaming/types/errors.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package types
-
-import errorsmod "cosmossdk.io/errors"
-
-const (
-	ModuleName = "full_node_streaming"
-)
-
-var (
-	ErrNotImplemented          = errorsmod.Register(ModuleName, 1, "Not implemented")
-	ErrInvalidStreamingRequest = errorsmod.Register(
-		ModuleName,
-		2,
-		"Invalid full node streaming request",
-	)
-)
diff --git a/protocol/streaming/types/interface.go b/protocol/streaming/types/interface.go
deleted file mode 100644
index 7930853be6..0000000000
--- a/protocol/streaming/types/interface.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package types
-
-import (
-	sdk "github.com/cosmos/cosmos-sdk/types"
-	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
-	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-)
-
-type FullNodeStreamingManager interface {
-	Enabled() bool
-	Stop()
-
-	// Subscribe to streams
-	Subscribe(
-		clobPairIds []uint32,
-		subaccountIds []*satypes.SubaccountId,
-		srv OutgoingMessageSender,
-	) (
-		err error,
-	)
-
-	// L3+ Orderbook updates.
-	InitializeNewStreams(
-		getOrderbookSnapshot func(clobPairId clobtypes.ClobPairId) *clobtypes.OffchainUpdates,
-		getSubaccountSnapshot func(subaccountId satypes.SubaccountId) *satypes.StreamSubaccountUpdate,
-		blockHeight uint32,
-		execMode sdk.ExecMode,
-	)
-	SendOrderbookUpdates(
-		offchainUpdates *clobtypes.OffchainUpdates,
-		blockHeight uint32,
-		execMode sdk.ExecMode,
-	)
-	SendOrderbookFillUpdates(
-		orderbookFills []clobtypes.StreamOrderbookFill,
-		blockHeight uint32,
-		execMode sdk.ExecMode,
-		perpetualIdToClobPairId map[uint32][]clobtypes.ClobPairId,
-	)
-	SendTakerOrderStatus(
-		takerOrder clobtypes.StreamTakerOrder,
-		blockHeight uint32,
-		execMode sdk.ExecMode,
-	)
-	SendSubaccountUpdates(
-		subaccountUpdates []satypes.StreamSubaccountUpdate,
-		blockHeight uint32,
-		execMode sdk.ExecMode,
-	)
-	TracksSubaccountId(id satypes.SubaccountId) bool
-}
-
-type OutgoingMessageSender interface {
-	Send(*clobtypes.StreamOrderbookUpdatesResponse) error
-}
diff --git a/protocol/streaming/util/util.go b/protocol/streaming/util/util.go
deleted file mode 100644
index 985a29ef33..0000000000
--- a/protocol/streaming/util/util.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package util
-
-import (
-	"github.com/cosmos/gogoproto/proto"
-	ocutypes "github.com/dydxprotocol/v4-chain/protocol/indexer/off_chain_updates/types"
-	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
-)
-
-// GetOffchainUpdatesV1 unmarshals messages in offchain updates to OffchainUpdateV1.
-func GetOffchainUpdatesV1(offchainUpdates *clobtypes.OffchainUpdates) ([]ocutypes.OffChainUpdateV1, error) {
-	v1updates := make([]ocutypes.OffChainUpdateV1, 0)
-	for _, message := range offchainUpdates.Messages {
-		var update ocutypes.OffChainUpdateV1
-		err := proto.Unmarshal(message.Message.Value, &update)
-		if err != nil {
-			return nil, err
-		}
-		v1updates = append(v1updates, update)
-	}
-	return v1updates, nil
-}
diff --git a/protocol/streaming/ws/websocket_message_sender.go b/protocol/streaming/ws/websocket_message_sender.go
deleted file mode 100644
index 7a502b098b..0000000000
--- a/protocol/streaming/ws/websocket_message_sender.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package ws
-
-import (
-	"github.com/gorilla/websocket"
-
-	"github.com/cosmos/cosmos-sdk/codec"
-	"github.com/dydxprotocol/v4-chain/protocol/streaming/types"
-	clobtypes "github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
-)
-
-var _ types.OutgoingMessageSender = (*WebsocketMessageSender)(nil)
-
-type WebsocketMessageSender struct {
-	cdc codec.JSONCodec
-
-	conn *websocket.Conn
-}
-
-func (wms *WebsocketMessageSender) Send(
-	response *clobtypes.StreamOrderbookUpdatesResponse,
-) (err error) {
-	responseJson, err := wms.cdc.MarshalJSON(response)
-	if err != nil {
-		return err
-	}
-	return wms.conn.WriteMessage(websocket.TextMessage, responseJson)
-}
diff --git a/protocol/streaming/ws/websocket_server.go b/protocol/streaming/ws/websocket_server.go
deleted file mode 100644
index 0b66804595..0000000000
--- a/protocol/streaming/ws/websocket_server.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package ws
-
-import (
-	"context"
-	"fmt"
-	"net/http"
-	"strconv"
-	"strings"
-	"time"
-
-	"cosmossdk.io/log"
-	"github.com/cosmos/cosmos-sdk/codec"
-	"github.com/dydxprotocol/v4-chain/protocol/streaming/types"
-	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-	"github.com/gorilla/websocket"
-)
-
-var upgrader = websocket.Upgrader{
-	ReadBufferSize:  1024,
-	WriteBufferSize: 1024,
-	CheckOrigin: func(r *http.Request) bool {
-		return true // Allow all connections by default
-	},
-}
-
-type WebsocketServer struct {
-	streamingManager types.FullNodeStreamingManager
-	cdc              codec.JSONCodec
-	logger           log.Logger
-	port             uint16
-	server           *http.Server
-}
-
-func NewWebsocketServer(
-	streamingManager types.FullNodeStreamingManager,
-	cdc codec.JSONCodec,
-	logger log.Logger,
-	port uint16,
-) *WebsocketServer {
-	return &WebsocketServer{
-		streamingManager: streamingManager,
-		cdc:              cdc,
-		logger:           logger.With(log.ModuleKey, "full-node-streaming"),
-		port:             port,
-	}
-}
-
-func (ws *WebsocketServer) Handler(w http.ResponseWriter, r *http.Request) {
-	conn, err := upgrader.Upgrade(w, r, nil)
-	if err != nil {
-		ws.logger.Error(
-			"Error upgrading websocket connection",
-			"error", err,
-		)
-		return
-	}
-	defer conn.Close()
-
-	// Parse clobPairIds from query parameters
-	clobPairIds, err := parseClobPairIds(r)
-	if err != nil {
-		ws.logger.Error(
-			"Error parsing clobPairIds",
-			"err", err,
-		)
-		http.Error(w, err.Error(), http.StatusBadRequest)
-		return
-	}
-	// Parse subaccountIds from query parameters
-	subaccountIds, err := parseSubaccountIds(r)
-	if err != nil {
-		ws.logger.Error(
-			"Error parsing subaccountIds",
-			"err", err,
-		)
-		http.Error(w, err.Error(), http.StatusBadRequest)
-		return
-	}
-
-	websocketMessageSender := &WebsocketMessageSender{
-		cdc:  ws.cdc,
-		conn: conn,
-	}
-
-	ws.logger.Info(
-		fmt.Sprintf("Received websocket streaming request for clob pair ids: %+v", clobPairIds),
-	)
-
-	err = ws.streamingManager.Subscribe(
-		clobPairIds,
-		subaccountIds,
-		websocketMessageSender,
-	)
-	if err != nil {
-		ws.logger.Error(
-			"Ending handler for websocket connection",
-			"err", err,
-		)
-		return
-	}
-}
-
-// parseSubaccountIds is a helper function to parse the subaccountIds from the query parameters.
-func parseSubaccountIds(r *http.Request) ([]*satypes.SubaccountId, error) {
-	subaccountIdsParam := r.URL.Query().Get("subaccountIds")
-	if subaccountIdsParam == "" {
-		return []*satypes.SubaccountId{}, nil
-	}
-	idStrs := strings.Split(subaccountIdsParam, ",")
-	subaccountIds := make([]*satypes.SubaccountId, 0)
-	for _, idStr := range idStrs {
-		parts := strings.Split(idStr, "/")
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("invalid subaccountId format: %s, expected subaccount_id format: owner/number", idStr)
-		}
-
-		number, err := strconv.Atoi(parts[1])
-		if err != nil {
-			return nil, fmt.Errorf("invalid subaccount number: %s, expected subaccount_id format: owner/number", parts[1])
-		}
-
-		subaccountIds = append(subaccountIds, &satypes.SubaccountId{
-			Owner:  parts[0],
-			Number: uint32(number),
-		})
-	}
-
-	return subaccountIds, nil
-}
-
-// parseClobPairIds is a helper function to parse the clobPairIds from the query parameters.
-func parseClobPairIds(r *http.Request) ([]uint32, error) {
-	clobPairIdsParam := r.URL.Query().Get("clobPairIds")
-	if clobPairIdsParam == "" {
-		return []uint32{}, nil
-	}
-	idStrs := strings.Split(clobPairIdsParam, ",")
-	clobPairIds := make([]uint32, 0)
-	for _, idStr := range idStrs {
-		id, err := strconv.Atoi(idStr)
-		if err != nil {
-			return nil, fmt.Errorf("invalid clobPairId: %s", idStr)
-		}
-		clobPairIds = append(clobPairIds, uint32(id))
-	}
-
-	return clobPairIds, nil
-}
-
-// Start the websocket server in a separate goroutine.
-func (ws *WebsocketServer) Start() {
-	go func() {
-		http.HandleFunc("/ws", ws.Handler)
-		addr := fmt.Sprintf(":%d", ws.port)
-		ws.logger.Info("Starting websocket server on address " + addr)
-
-		server := &http.Server{Addr: addr}
-		ws.server = server
-		err := server.ListenAndServe()
-		if err != nil {
-			ws.logger.Error(
-				"Http websocket server error",
-				"err", err,
-			)
-		}
-		ws.logger.Info("Shutting down websocket server")
-	}()
-}
-
-func (ws *WebsocketServer) Shutdown() {
-	shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 5*time.Second)
-	defer shutdownRelease()
-	err := ws.server.Shutdown(shutdownCtx)
-	if err != nil {
-		ws.logger.Error("Failed to shutdown websocket server", "err", err)
-	}
-}
diff --git a/protocol/testutil/keeper/clob.go b/protocol/testutil/keeper/clob.go
index f5d71bdbaa..2abb897859 100644
--- a/protocol/testutil/keeper/clob.go
+++ b/protocol/testutil/keeper/clob.go
@@ -14,7 +14,7 @@ import (
 	"github.com/dydxprotocol/v4-chain/protocol/indexer/indexer_manager"
 	"github.com/dydxprotocol/v4-chain/protocol/lib"
 	"github.com/dydxprotocol/v4-chain/protocol/mocks"
-	streaming "github.com/dydxprotocol/v4-chain/protocol/streaming"
+	streaming "github.com/dydxprotocol/v4-chain/protocol/streaming/grpc"
 	clobtest "github.com/dydxprotocol/v4-chain/protocol/testutil/clob"
 	"github.com/dydxprotocol/v4-chain/protocol/testutil/constants"
 	asskeeper "github.com/dydxprotocol/v4-chain/protocol/x/assets/keeper"
diff --git a/protocol/testutil/keeper/subaccounts.go b/protocol/testutil/keeper/subaccounts.go
index 786aaa2040..07b33984b0 100644
--- a/protocol/testutil/keeper/subaccounts.go
+++ b/protocol/testutil/keeper/subaccounts.go
@@ -1,13 +1,9 @@
 package keeper
 
 import (
-	"testing"
-
-	"github.com/dydxprotocol/v4-chain/protocol/streaming"
-
-	"math/big"
-
 	"github.com/cosmos/gogoproto/proto"
+	"math/big"
+	"testing"
 
 	dbm "github.com/cosmos/cosmos-db"
 
@@ -113,7 +109,6 @@ func createSubaccountsKeeper(
 		pk,
 		btk,
 		mockIndexerEventsManager,
-		streaming.NewNoopGrpcStreamingManager(),
 	)
 
 	return k, storeKey
diff --git a/protocol/testutil/memclob/keeper.go b/protocol/testutil/memclob/keeper.go
index cc08220789..9ca05d4a96 100644
--- a/protocol/testutil/memclob/keeper.go
+++ b/protocol/testutil/memclob/keeper.go
@@ -512,18 +512,3 @@ func (f *FakeMemClobKeeper) SendOrderbookFillUpdates(
 	orderbookFills []types.StreamOrderbookFill,
 ) {
 }
-
-func (f *FakeMemClobKeeper) SendTakerOrderStatus(
-	ctx sdk.Context,
-	takerOrder types.StreamTakerOrder,
-) {
-}
-
-// Placeholder to satisfy interface implementation of types.MemClobKeeper
-func (f *FakeMemClobKeeper) AddOrderToOrderbookSubaccountUpdatesCheck(
-	ctx sdk.Context,
-	subaccountId satypes.SubaccountId,
-	order types.PendingOpenOrder,
-) satypes.UpdateResult {
-	return satypes.Success
-}
diff --git a/protocol/x/clob/abci.go b/protocol/x/clob/abci.go
index 8aff6110bb..a8c5d34f1b 100644
--- a/protocol/x/clob/abci.go
+++ b/protocol/x/clob/abci.go
@@ -258,8 +258,8 @@ func PrepareCheckState(
 		types.GetInternalOperationsQueueTextString(newLocalValidatorOperationsQueue),
 	)
 
-	// Initialize new streams with orderbook snapshots, if any.
-	keeper.InitializeNewStreams(ctx)
+	// Initialize new GRPC streams with orderbook snapshots, if any.
+	keeper.InitializeNewGrpcStreams(ctx)
 
 	// Set per-orderbook gauges.
 	keeper.MemClob.SetMemclobGauges(ctx)
diff --git a/protocol/x/clob/keeper/grpc_stream_orderbook.go b/protocol/x/clob/keeper/grpc_stream_orderbook.go
index caca5fbfbe..710a6ceec6 100644
--- a/protocol/x/clob/keeper/grpc_stream_orderbook.go
+++ b/protocol/x/clob/keeper/grpc_stream_orderbook.go
@@ -8,11 +8,7 @@ func (k Keeper) StreamOrderbookUpdates(
 	req *types.StreamOrderbookUpdatesRequest,
 	stream types.Query_StreamOrderbookUpdatesServer,
 ) error {
-	err := k.GetFullNodeStreamingManager().Subscribe(
-		req.GetClobPairId(),
-		req.GetSubaccountIds(),
-		stream,
-	)
+	err := k.GetGrpcStreamingManager().Subscribe(*req, stream)
 	if err != nil {
 		return err
 	}
diff --git a/protocol/x/clob/keeper/keeper.go b/protocol/x/clob/keeper/keeper.go
index 8e2e3e240f..4ea1edee3a 100644
--- a/protocol/x/clob/keeper/keeper.go
+++ b/protocol/x/clob/keeper/keeper.go
@@ -3,7 +3,6 @@ package keeper
 import (
 	"errors"
 	"fmt"
-	satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
 	"sync/atomic"
 
 	"cosmossdk.io/log"
@@ -15,7 +14,7 @@ import (
 	"github.com/dydxprotocol/v4-chain/protocol/indexer/indexer_manager"
 	"github.com/dydxprotocol/v4-chain/protocol/lib"
 	"github.com/dydxprotocol/v4-chain/protocol/lib/metrics"
-	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/types"
+	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/grpc/types"
 	flags "github.com/dydxprotocol/v4-chain/protocol/x/clob/flags"
 	"github.com/dydxprotocol/v4-chain/protocol/x/clob/rate_limit"
 	"github.com/dydxprotocol/v4-chain/protocol/x/clob/types"
@@ -44,7 +43,7 @@ type (
 		rewardsKeeper     types.RewardsKeeper
 
 		indexerEventManager indexer_manager.IndexerEventManager
-		streamingManager    streamingtypes.FullNodeStreamingManager
+		streamingManager    streamingtypes.GrpcStreamingManager
 
 		initialized         *atomic.Bool
 		memStoreInitialized *atomic.Bool
@@ -86,7 +85,7 @@ func NewKeeper(
 	statsKeeper types.StatsKeeper,
 	rewardsKeeper types.RewardsKeeper,
 	indexerEventManager indexer_manager.IndexerEventManager,
-	streamingManager streamingtypes.FullNodeStreamingManager,
+	grpcStreamingManager streamingtypes.GrpcStreamingManager,
 	txDecoder sdk.TxDecoder,
 	clobFlags flags.ClobFlags,
 	placeCancelOrderRateLimiter rate_limit.RateLimiter[sdk.Msg],
@@ -111,7 +110,7 @@ func NewKeeper(
 		statsKeeper:                  statsKeeper,
 		rewardsKeeper:                rewardsKeeper,
 		indexerEventManager:          indexerEventManager,
-		streamingManager:             streamingManager,
+		streamingManager:             grpcStreamingManager,
 		memStoreInitialized:          &atomic.Bool{}, // False by default.
 		initialized:                  &atomic.Bool{}, // False by default.
 		txDecoder:                    txDecoder,
@@ -141,7 +140,7 @@ func (k Keeper) GetIndexerEventManager() indexer_manager.IndexerEventManager {
 	return k.indexerEventManager
 }
 
-func (k Keeper) GetFullNodeStreamingManager() streamingtypes.FullNodeStreamingManager {
+func (k Keeper) GetGrpcStreamingManager() streamingtypes.GrpcStreamingManager {
 	return k.streamingManager
 }
 
@@ -256,32 +255,24 @@ func (k *Keeper) SetAnteHandler(anteHandler sdk.AnteHandler) {
 	k.antehandler = anteHandler
 }
 
-// InitializeNewStreams initializes new streams for all uninitialized clob pairs
+// InitializeNewGrpcStreams initializes new gRPC streams for all uninitialized clob pairs
 // by sending the corresponding orderbook snapshots.
-func (k Keeper) InitializeNewStreams(ctx sdk.Context) {
-	streamingManager := k.GetFullNodeStreamingManager()
+func (k Keeper) InitializeNewGrpcStreams(ctx sdk.Context) {
+	streamingManager := k.GetGrpcStreamingManager()
 
-	streamingManager.InitializeNewStreams(
+	streamingManager.InitializeNewGrpcStreams(
 		func(clobPairId types.ClobPairId) *types.OffchainUpdates {
 			return k.MemClob.GetOffchainUpdatesForOrderbookSnapshot(
 				ctx,
 				clobPairId,
 			)
 		},
-		func(subaccountId satypes.SubaccountId) *satypes.StreamSubaccountUpdate {
-			subaccountUpdate := k.subaccountsKeeper.GetStreamSubaccountUpdate(
-				ctx,
-				subaccountId,
-				true,
-			)
-			return &subaccountUpdate
-		},
 		lib.MustConvertIntegerToUint32(ctx.BlockHeight()),
 		ctx.ExecMode(),
 	)
 }
 
-// SendOrderbookUpdates sends the offchain updates to the Full Node streaming manager.
+// SendOrderbookUpdates sends the offchain updates to the gRPC streaming manager.
 func (k Keeper) SendOrderbookUpdates(
 	ctx sdk.Context,
 	offchainUpdates *types.OffchainUpdates,
@@ -290,14 +281,14 @@ func (k Keeper) SendOrderbookUpdates(
 		return
 	}
 
-	k.GetFullNodeStreamingManager().SendOrderbookUpdates(
+	k.GetGrpcStreamingManager().SendOrderbookUpdates(
 		offchainUpdates,
 		lib.MustConvertIntegerToUint32(ctx.BlockHeight()),
 		ctx.ExecMode(),
 	)
 }
 
-// SendOrderbookFillUpdates sends the orderbook fills to the Full Node streaming manager.
+// SendOrderbookFillUpdates sends the orderbook fills to the gRPC streaming manager.
 func (k Keeper) SendOrderbookFillUpdates(
 	ctx sdk.Context,
 	orderbookFills []types.StreamOrderbookFill,
@@ -305,22 +296,10 @@ func (k Keeper) SendOrderbookFillUpdates(
 	if len(orderbookFills) == 0 {
 		return
 	}
-	k.GetFullNodeStreamingManager().SendOrderbookFillUpdates(
+	k.GetGrpcStreamingManager().SendOrderbookFillUpdates(
+		ctx,
 		orderbookFills,
 		lib.MustConvertIntegerToUint32(ctx.BlockHeight()),
 		ctx.ExecMode(),
-		k.PerpetualIdToClobPairId,
-	)
-}
-
-// SendTakerOrderStatus sends the taker order with its status to the Full Node streaming manager.
-func (k Keeper) SendTakerOrderStatus(
-	ctx sdk.Context,
-	takerOrder types.StreamTakerOrder,
-) {
-	k.GetFullNodeStreamingManager().SendTakerOrderStatus(
-		takerOrder,
-		lib.MustConvertIntegerToUint32(ctx.BlockHeight()),
-		ctx.ExecMode(),
 	)
 }
diff --git a/protocol/x/clob/keeper/order_state.go b/protocol/x/clob/keeper/order_state.go
index 4fdd7a2987..df6909323d 100644
--- a/protocol/x/clob/keeper/order_state.go
+++ b/protocol/x/clob/keeper/order_state.go
@@ -258,7 +258,7 @@ func (k Keeper) RemoveOrderFillAmount(ctx sdk.Context, orderId types.OrderId) {
 	orderAmountFilledStore.Delete(orderId.ToStateKey())
 
 	// If grpc stream is on, zero out the fill amount.
-	if k.GetFullNodeStreamingManager().Enabled() {
+	if k.GetGrpcStreamingManager().Enabled() {
 		allUpdates := types.NewOffchainUpdates()
 		if message, success := off_chain_updates.CreateOrderUpdateMessage(
 			ctx,
diff --git a/protocol/x/clob/keeper/process_operations.go b/protocol/x/clob/keeper/process_operations.go
index 021cdd5094..2f5a38e2fd 100644
--- a/protocol/x/clob/keeper/process_operations.go
+++ b/protocol/x/clob/keeper/process_operations.go
@@ -38,21 +38,6 @@ func fetchOrdersInvolvedInOpQueue(
 	return orderIdSet
 }
 
-// fetchSubaccountIdsInvolvedInOpQueue fetches all SubaccountIds involved in an operations
-// queue's matches and returns them as a set.
-func fetchSubaccountIdsInvolvedInOpQueue(
-	operations []types.InternalOperation,
-) (subaccountIdSet map[satypes.SubaccountId]struct{}) {
-	subaccountIdSet = make(map[satypes.SubaccountId]struct{})
-	for _, operation := range operations {
-		if clobMatch := operation.GetMatch(); clobMatch != nil {
-			subaccountIdSetForClobMatch := clobMatch.GetAllSubaccountIds()
-			subaccountIdSet = lib.MergeMaps(subaccountIdSet, subaccountIdSetForClobMatch)
-		}
-	}
-	return subaccountIdSet
-}
-
 // ProcessProposerOperations updates on-chain state given an []OperationRaw operations queue
 // representing matches that occurred in the previous block. It performs validation on an operations
 // queue. If all validation passes, the operations queue is written to state.
@@ -73,12 +58,8 @@ func (k Keeper) ProcessProposerOperations(
 	}
 
 	// If grpc streams are on, send absolute fill amounts from local + proposed opqueue to the grpc stream.
-	// Also send subaccount snapshots for impacted subaccounts.
-	// An impacted subaccount is defined as:
-	// - A subaccount that was involved in any match in the local opqueue.
-	//   Only matches generate subaccount updates.
 	// This must be sent out to account for checkState being discarded and deliverState being used.
-	if streamingManager := k.GetFullNodeStreamingManager(); streamingManager.Enabled() {
+	if streamingManager := k.GetGrpcStreamingManager(); streamingManager.Enabled() {
 		localValidatorOperationsQueue, _ := k.MemClob.GetOperationsToReplay(ctx)
 		orderIdsFromProposed := fetchOrdersInvolvedInOpQueue(
 			operations,
@@ -94,21 +75,6 @@ func (k Keeper) ProcessProposerOperations(
 			allUpdates.Append(orderbookUpdate)
 		}
 		k.SendOrderbookUpdates(ctx, allUpdates)
-
-		subaccountIdsFromProposed := fetchSubaccountIdsInvolvedInOpQueue(
-			operations,
-		)
-
-		subaccountIdsFromLocal := fetchSubaccountIdsInvolvedInOpQueue(
-			localValidatorOperationsQueue,
-		)
-		subaccountIdsToUpdate := lib.MergeMaps(subaccountIdsFromLocal, subaccountIdsFromProposed)
-		allSubaccountUpdates := make([]satypes.StreamSubaccountUpdate, 0)
-		for subaccountId := range subaccountIdsToUpdate {
-			subaccountUpdate := k.subaccountsKeeper.GetStreamSubaccountUpdate(ctx, subaccountId, false)
-			allSubaccountUpdates = append(allSubaccountUpdates, subaccountUpdate)
-		}
-		k.subaccountsKeeper.SendSubaccountUpdates(ctx, allSubaccountUpdates)
 	}
 
 	log.DebugLog(ctx, "Processing operations queue",
@@ -583,7 +549,7 @@ func (k Keeper) PersistMatchOrdersToState(
 	}
 
 	// if GRPC streaming is on, emit a generated clob match to stream.
-	if streamingManager := k.GetFullNodeStreamingManager(); streamingManager.Enabled() {
+	if streamingManager := k.GetGrpcStreamingManager(); streamingManager.Enabled() {
 		streamOrderbookFill := k.MemClob.GenerateStreamOrderbookFill(
 			ctx,
 			types.ClobMatch{
@@ -692,7 +658,7 @@ func (k Keeper) PersistMatchLiquidationToState(
 	)
 
 	// if GRPC streaming is on, emit a generated clob match to stream.
-	if streamingManager := k.GetFullNodeStreamingManager(); streamingManager.Enabled() {
+	if streamingManager := k.GetGrpcStreamingManager(); streamingManager.Enabled() {
 		streamOrderbookFill := k.MemClob.GenerateStreamOrderbookFill(
 			ctx,
 			types.ClobMatch{
@@ -869,22 +835,6 @@ func (k Keeper) PersistMatchDeleveragingToState(
 				),
 			),
 		)
-		// if GRPC streaming is on, emit a generated clob match to stream.
-		if streamingManager := k.GetFullNodeStreamingManager(); streamingManager.Enabled() {
-			streamOrderbookFill := types.StreamOrderbookFill{
-				ClobMatch: &types.ClobMatch{
-					Match: &types.ClobMatch_MatchPerpetualDeleveraging{
-						MatchPerpetualDeleveraging: matchDeleveraging,
-					},
-				},
-			}
-			k.SendOrderbookFillUpdates(
-				ctx,
-				[]types.StreamOrderbookFill{
-					streamOrderbookFill,
-				},
-			)
-		}
 	}
 
 	return nil
diff --git a/protocol/x/clob/memclob/memclob.go b/protocol/x/clob/memclob/memclob.go
index 3b5d1066d8..5a4a88ce7d 100644
--- a/protocol/x/clob/memclob/memclob.go
+++ b/protocol/x/clob/memclob/memclob.go
@@ -768,18 +768,6 @@ func (m *MemClobPriceTimePriority) matchOrder(
 		order,
 	)
 
-	// If full node streaming is on, emit the taker order and its resulting status.
-	if m.generateOrderbookUpdates {
-		streamTakerOrder := m.GenerateStreamTakerOrder(
-			order,
-			takerOrderStatus,
-		)
-		m.clobKeeper.SendTakerOrderStatus(
-			ctx,
-			streamTakerOrder,
-		)
-	}
-
 	// If this is a replacement order, then ensure we remove the existing order from the orderbook.
 	if !order.IsLiquidation() {
 		orderId := order.MustGetOrder().OrderId
@@ -841,9 +829,11 @@ func (m *MemClobPriceTimePriority) matchOrder(
 	}
 
 	// If the order is post only and it's not the rewind step, then it cannot be filled.
-	// If the order is post only and crosses the book,
 	// Set the matching error so that the order is canceled.
-	if !order.IsLiquidation() && takerOrderStatus.OrderStatus == types.PostOnlyWouldCrossMakerOrder {
+	// TODO(DEC-998): Determine if allowing post-only orders to match in rewind step is valid.
+	if len(newMakerFills) > 0 &&
+		!order.IsLiquidation() &&
+		order.MustGetOrder().TimeInForce == types.Order_TIME_IN_FORCE_POST_ONLY {
 		matchingErr = types.ErrPostOnlyWouldCrossMakerOrder
 	}
 
@@ -1766,16 +1756,6 @@ func (m *MemClobPriceTimePriority) mustPerformTakerOrderMatching(
 			continue
 		}
 
-		// If a valid match has been generated but the taker order is a post only order,
-		// end the matching loop. Because of this, post-only orders can cause
-		// undercollateralized maker orders to be removed from the book up to the first valid match.
-		if takerOrderCrossesMakerOrder &&
-			!newTakerOrder.IsLiquidation() &&
-			newTakerOrder.MustGetOrder().TimeInForce == types.Order_TIME_IN_FORCE_POST_ONLY {
-			takerOrderStatus.OrderStatus = types.PostOnlyWouldCrossMakerOrder
-			break
-		}
-
 		// The orders have matched successfully, and the state has been updated.
 		// To mark the orders as matched, perform the following actions:
 		// 1. Deduct `matchedAmount` from the taker order's remaining quantums, and add the matched
diff --git a/protocol/x/clob/memclob/memclob_grpc_streaming.go b/protocol/x/clob/memclob/memclob_grpc_streaming.go
index 8cd57cef98..3b2dc19a2e 100644
--- a/protocol/x/clob/memclob/memclob_grpc_streaming.go
+++ b/protocol/x/clob/memclob/memclob_grpc_streaming.go
@@ -157,28 +157,3 @@ func (m *MemClobPriceTimePriority) GetOrderbookUpdatesForOrderUpdate(
 	}
 	return offchainUpdates
 }
-
-// GenerateStreamTakerOrder returns a `StreamTakerOrder` object used in full node
-// streaming from a matchableOrder and a taker order status.
-func (m *MemClobPriceTimePriority) GenerateStreamTakerOrder(
-	takerOrder types.MatchableOrder,
-	takerOrderStatus types.TakerOrderStatus,
-) types.StreamTakerOrder {
-	if takerOrder.IsLiquidation() {
-		liquidationOrder := takerOrder.MustGetLiquidationOrder()
-		streamLiquidationOrder := liquidationOrder.ToStreamLiquidationOrder()
-		return types.StreamTakerOrder{
-			TakerOrder: &types.StreamTakerOrder_LiquidationOrder{
-				LiquidationOrder: streamLiquidationOrder,
-			},
-			TakerOrderStatus: takerOrderStatus.ToStreamingTakerOrderStatus(),
-		}
-	}
-	order := takerOrder.MustGetOrder()
-	return types.StreamTakerOrder{
-		TakerOrder: &types.StreamTakerOrder_Order{
-			Order: &order,
-		},
-		TakerOrderStatus: takerOrderStatus.ToStreamingTakerOrderStatus(),
-	}
-}
diff --git a/protocol/x/clob/memclob/memclob_place_order_test.go b/protocol/x/clob/memclob/memclob_place_order_test.go
index e2f921ab6f..cb6957a64b 100644
--- a/protocol/x/clob/memclob/memclob_place_order_test.go
+++ b/protocol/x/clob/memclob/memclob_place_order_test.go
@@ -2927,14 +2927,17 @@ func TestPlaceOrder_PostOnly(t *testing.T) {
 				},
 			},
 			expectedRemainingAsks: []OrderWithRemainingSize{},
-			// Second order is not collat check'd since the first order generates a valid
-			// match, so the matching loop ends.
 			expectedCollatCheck: []expectedMatch{
 				{
 					makerOrder:      &constants.Order_Bob_Num0_Id11_Clob1_Buy5_Price40_GTB32,
 					takerOrder:      &constants.Order_Alice_Num1_Id1_Clob1_Sell10_Price15_GTB20_PO,
 					matchedQuantums: 5,
 				},
+				{
+					makerOrder:      &constants.Order_Bob_Num0_Id4_Clob1_Buy20_Price35_GTB22,
+					takerOrder:      &constants.Order_Alice_Num1_Id1_Clob1_Sell10_Price15_GTB20_PO,
+					matchedQuantums: 5,
+				},
 			},
 			expectedExistingMatches:    []expectedMatch{},
 			expectedOperations:         []types.Operation{},
diff --git a/protocol/x/clob/types/clob_keeper.go b/protocol/x/clob/types/clob_keeper.go
index 4f2875e117..61f1675e2c 100644
--- a/protocol/x/clob/types/clob_keeper.go
+++ b/protocol/x/clob/types/clob_keeper.go
@@ -140,8 +140,8 @@ type ClobKeeper interface {
 		clobPair ClobPair,
 	) error
 	UpdateLiquidationsConfig(ctx sdk.Context, config LiquidationsConfig) error
-	// full node streaming
-	InitializeNewStreams(ctx sdk.Context)
+	// Gprc streaming
+	InitializeNewGrpcStreams(ctx sdk.Context)
 	SendOrderbookUpdates(
 		ctx sdk.Context,
 		offchainUpdates *OffchainUpdates,
diff --git a/protocol/x/clob/types/errors.go b/protocol/x/clob/types/errors.go
index c904cbcdc3..cc7ac4f4b3 100644
--- a/protocol/x/clob/types/errors.go
+++ b/protocol/x/clob/types/errors.go
@@ -533,4 +533,16 @@ var (
 		10001,
 		"Subaccount cannot open more orders due to equity tier limit.",
 	)
+
+	// GrpcStreamingManager errors.
+	ErrGrpcStreamingManagerNotEnabled = errorsmod.Register(
+		ModuleName,
+		11000,
+		"GrpcStreamingManager is not enabled",
+	)
+	ErrInvalidGrpcStreamingRequest = errorsmod.Register(
+		ModuleName,
+		11001,
+		"Invalid gRPC streaming request",
+	)
 )
diff --git a/protocol/x/clob/types/expected_keepers.go b/protocol/x/clob/types/expected_keepers.go
index ee7cb07107..755aa4fd20 100644
--- a/protocol/x/clob/types/expected_keepers.go
+++ b/protocol/x/clob/types/expected_keepers.go
@@ -38,13 +38,6 @@ type SubaccountsKeeper interface {
 	) (
 		val satypes.Subaccount,
 	)
-	GetStreamSubaccountUpdate(
-		ctx sdk.Context,
-		id satypes.SubaccountId,
-		snapshot bool,
-	) (
-		val satypes.StreamSubaccountUpdate,
-	)
 	GetAllSubaccount(
 		ctx sdk.Context,
 	) (
@@ -81,10 +74,6 @@ type SubaccountsKeeper interface {
 		ctx sdk.Context,
 		perpetualId uint32,
 	) (sdk.AccAddress, error)
-	SendSubaccountUpdates(
-		ctx sdk.Context,
-		subaccountUpdates []satypes.StreamSubaccountUpdate,
-	)
 }
 
 type AssetsKeeper interface {
diff --git a/protocol/x/clob/types/liquidation_order.go b/protocol/x/clob/types/liquidation_order.go
index 5804d0f9f0..182c0e84f8 100644
--- a/protocol/x/clob/types/liquidation_order.go
+++ b/protocol/x/clob/types/liquidation_order.go
@@ -51,18 +51,6 @@ func NewLiquidationOrder(
 	}
 }
 
-// ToStreamLiquidationOrder converts the LiquidationOrder to a StreamLiquidationOrder
-// to be emitted by full node streaming.
-func (lo *LiquidationOrder) ToStreamLiquidationOrder() *StreamLiquidationOrder {
-	return &StreamLiquidationOrder{
-		LiquidationInfo: &lo.perpetualLiquidationInfo,
-		ClobPairId:      uint32(lo.clobPairId),
-		IsBuy:           lo.isBuy,
-		Quantums:        lo.quantums.ToUint64(),
-		Subticks:        lo.subticks.ToUint64(),
-	}
-}
-
 // IsBuy returns true if this is a buy order, false if not.
 // This function is necessary for the `LiquidationOrder` type to implement the `MatchableOrder` interface.
 func (lo *LiquidationOrder) IsBuy() bool {
@@ -115,12 +103,6 @@ func (lo *LiquidationOrder) MustGetOrder() Order {
 	panic("MustGetOrder: No underlying order on a LiquidationOrder type.")
 }
 
-// MustGetLiquidationOrder returns the underlying `LiquidationOrder` type.
-// This function is necessary for the `LiquidationOrder` type to implement the `MatchableOrder` interface.
-func (lo *LiquidationOrder) MustGetLiquidationOrder() LiquidationOrder {
-	return *lo
-}
-
 // MustGetLiquidatedPerpetualId returns the perpetual ID that this perpetual order is liquidating.
 // This function is necessary for the `LiquidationOrder` type to implement the `MatchableOrder` interface.
 func (lo *LiquidationOrder) MustGetLiquidatedPerpetualId() uint32 {
diff --git a/protocol/x/clob/types/mem_clob_keeper.go b/protocol/x/clob/types/mem_clob_keeper.go
index 9804b33d48..c8670d694a 100644
--- a/protocol/x/clob/types/mem_clob_keeper.go
+++ b/protocol/x/clob/types/mem_clob_keeper.go
@@ -109,8 +109,4 @@ type MemClobKeeper interface {
 		ctx sdk.Context,
 		orderbookFills []StreamOrderbookFill,
 	)
-	SendTakerOrderStatus(
-		ctx sdk.Context,
-		takerOrder StreamTakerOrder,
-	)
 }
diff --git a/protocol/x/clob/types/message_clob_match.go b/protocol/x/clob/types/message_clob_match.go
index c2b2f0a665..47593e05ee 100644
--- a/protocol/x/clob/types/message_clob_match.go
+++ b/protocol/x/clob/types/message_clob_match.go
@@ -1,7 +1,5 @@
 package types
 
-import satypes "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-
 // NewClobMatchFromMatchOrders creates a `ClobMatch` from the provided `MatchOrders`.
 func NewClobMatchFromMatchOrders(
 	msgMatchOrders *MatchOrders,
@@ -42,27 +40,3 @@ func (clobMatch *ClobMatch) GetAllOrderIds() (orderIds map[OrderId]struct{}) {
 	}
 	return orderIds
 }
-
-// GetAllSubaccountIds returns a set of subaccountIds involved in a ClobMatch.
-func (clobMatch *ClobMatch) GetAllSubaccountIds() (subaccountIds map[satypes.SubaccountId]struct{}) {
-	subaccountIds = make(map[satypes.SubaccountId]struct{})
-	if matchOrders := clobMatch.GetMatchOrders(); matchOrders != nil {
-		subaccountIds[matchOrders.GetTakerOrderId().SubaccountId] = struct{}{}
-		for _, makerFill := range matchOrders.GetFills() {
-			subaccountIds[makerFill.GetMakerOrderId().SubaccountId] = struct{}{}
-		}
-	}
-	if matchOrders := clobMatch.GetMatchPerpetualLiquidation(); matchOrders != nil {
-		subaccountIds[matchOrders.GetLiquidated()] = struct{}{}
-		for _, makerFill := range matchOrders.GetFills() {
-			subaccountIds[makerFill.GetMakerOrderId().SubaccountId] = struct{}{}
-		}
-	}
-	if matchOrders := clobMatch.GetMatchPerpetualDeleveraging(); matchOrders != nil {
-		subaccountIds[matchOrders.GetLiquidated()] = struct{}{}
-		for _, makerFill := range matchOrders.GetFills() {
-			subaccountIds[makerFill.GetOffsettingSubaccountId()] = struct{}{}
-		}
-	}
-	return subaccountIds
-}
diff --git a/protocol/x/clob/types/order.go b/protocol/x/clob/types/order.go
index deac6716e1..9a4e559391 100644
--- a/protocol/x/clob/types/order.go
+++ b/protocol/x/clob/types/order.go
@@ -132,12 +132,6 @@ func (o *Order) MustGetOrder() Order {
 	return *o
 }
 
-// MustGetLiquidationOrder always panics since Order is not a Liquidation Order.
-// This function is necessary for the `Order` type to implement the `MatchableOrder` interface.
-func (o *Order) MustGetLiquidationOrder() LiquidationOrder {
-	panic("MustGetLiquidationOrder: Order is not a liquidation order")
-}
-
 // MustGetLiquidatedPerpetualId always panics since there is no underlying perpetual ID for a `Order`.
 // This function is necessary for the `Order` type to implement the `MatchableOrder` interface.
 func (o *Order) MustGetLiquidatedPerpetualId() uint32 {
diff --git a/protocol/x/clob/types/order.pb.go b/protocol/x/clob/types/order.pb.go
index e781abc639..0cd485c984 100644
--- a/protocol/x/clob/types/order.pb.go
+++ b/protocol/x/clob/types/order.pb.go
@@ -808,91 +808,6 @@ func (m *TransactionOrdering) GetTransactionIndex() uint32 {
 	return 0
 }
 
-// StreamLiquidationOrder represents an protocol-generated IOC liquidation
-// order. Used in full node streaming.
-type StreamLiquidationOrder struct {
-	// Information about this liquidation order.
-	LiquidationInfo *PerpetualLiquidationInfo `protobuf:"bytes,1,opt,name=liquidation_info,json=liquidationInfo,proto3" json:"liquidation_info,omitempty"`
-	// CLOB pair ID of the CLOB pair the liquidation order will be matched
-	// against.
-	ClobPairId uint32 `protobuf:"varint,2,opt,name=clob_pair_id,json=clobPairId,proto3" json:"clob_pair_id,omitempty"`
-	// True if this is a buy order liquidating a short position, false if vice
-	// versa.
-	IsBuy bool `protobuf:"varint,3,opt,name=is_buy,json=isBuy,proto3" json:"is_buy,omitempty"`
-	// The number of base quantums for this liquidation order.
-	Quantums uint64 `protobuf:"varint,4,opt,name=quantums,proto3" json:"quantums,omitempty"`
-	// The subticks this liquidation order will be submitted at.
-	Subticks uint64 `protobuf:"varint,5,opt,name=subticks,proto3" json:"subticks,omitempty"`
-}
-
-func (m *StreamLiquidationOrder) Reset()         { *m = StreamLiquidationOrder{} }
-func (m *StreamLiquidationOrder) String() string { return proto.CompactTextString(m) }
-func (*StreamLiquidationOrder) ProtoMessage()    {}
-func (*StreamLiquidationOrder) Descriptor() ([]byte, []int) {
-	return fileDescriptor_673c6f4faa93736b, []int{9}
-}
-func (m *StreamLiquidationOrder) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StreamLiquidationOrder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StreamLiquidationOrder.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StreamLiquidationOrder) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StreamLiquidationOrder.Merge(m, src)
-}
-func (m *StreamLiquidationOrder) XXX_Size() int {
-	return m.Size()
-}
-func (m *StreamLiquidationOrder) XXX_DiscardUnknown() {
-	xxx_messageInfo_StreamLiquidationOrder.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamLiquidationOrder proto.InternalMessageInfo
-
-func (m *StreamLiquidationOrder) GetLiquidationInfo() *PerpetualLiquidationInfo {
-	if m != nil {
-		return m.LiquidationInfo
-	}
-	return nil
-}
-
-func (m *StreamLiquidationOrder) GetClobPairId() uint32 {
-	if m != nil {
-		return m.ClobPairId
-	}
-	return 0
-}
-
-func (m *StreamLiquidationOrder) GetIsBuy() bool {
-	if m != nil {
-		return m.IsBuy
-	}
-	return false
-}
-
-func (m *StreamLiquidationOrder) GetQuantums() uint64 {
-	if m != nil {
-		return m.Quantums
-	}
-	return 0
-}
-
-func (m *StreamLiquidationOrder) GetSubticks() uint64 {
-	if m != nil {
-		return m.Subticks
-	}
-	return 0
-}
-
 func init() {
 	proto.RegisterEnum("dydxprotocol.clob.Order_Side", Order_Side_name, Order_Side_value)
 	proto.RegisterEnum("dydxprotocol.clob.Order_TimeInForce", Order_TimeInForce_name, Order_TimeInForce_value)
@@ -906,81 +821,74 @@ func init() {
 	proto.RegisterType((*ConditionalOrderPlacement)(nil), "dydxprotocol.clob.ConditionalOrderPlacement")
 	proto.RegisterType((*Order)(nil), "dydxprotocol.clob.Order")
 	proto.RegisterType((*TransactionOrdering)(nil), "dydxprotocol.clob.TransactionOrdering")
-	proto.RegisterType((*StreamLiquidationOrder)(nil), "dydxprotocol.clob.StreamLiquidationOrder")
 }
 
 func init() { proto.RegisterFile("dydxprotocol/clob/order.proto", fileDescriptor_673c6f4faa93736b) }
 
 var fileDescriptor_673c6f4faa93736b = []byte{
-	// 1086 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x6e, 0xdb, 0xc6,
-	0x13, 0x16, 0x6d, 0xd9, 0x96, 0x47, 0x7f, 0xc2, 0xac, 0xe3, 0xfc, 0x64, 0xfb, 0x67, 0x59, 0x15,
-	0x02, 0xd7, 0x45, 0x50, 0x09, 0x75, 0x83, 0x02, 0x45, 0xd1, 0x43, 0x64, 0x4b, 0x30, 0x61, 0xd9,
-	0x54, 0x49, 0x3a, 0x80, 0x83, 0xa2, 0x0b, 0x8a, 0x5c, 0xc9, 0x8b, 0xac, 0x48, 0x85, 0x5c, 0x16,
-	0xd6, 0xad, 0x8f, 0xd0, 0x97, 0xe8, 0x5b, 0xf4, 0x01, 0x72, 0xcc, 0xb1, 0xa7, 0xa2, 0xb5, 0x9f,
-	0xa1, 0xe8, 0xb5, 0xd8, 0x25, 0x2d, 0x91, 0xfe, 0xd3, 0xa2, 0xc8, 0xa5, 0x37, 0xee, 0x37, 0xdf,
-	0x7e, 0x3b, 0x33, 0x3b, 0x33, 0x4b, 0xd8, 0x76, 0xa7, 0xee, 0xe5, 0x24, 0xf0, 0xb9, 0xef, 0xf8,
-	0xac, 0xe5, 0x30, 0x7f, 0xd0, 0xf2, 0x03, 0x97, 0x04, 0x4d, 0x89, 0xa1, 0xc7, 0x69, 0x73, 0x53,
-	0x98, 0x37, 0x9f, 0x8c, 0xfc, 0x91, 0x2f, 0xa1, 0x96, 0xf8, 0x8a, 0x89, 0x9b, 0x9f, 0x64, 0x74,
-	0xc2, 0x68, 0x60, 0x3b, 0x8e, 0x1f, 0x79, 0x3c, 0x4c, 0x7d, 0x27, 0xd4, 0x67, 0x77, 0x8f, 0x64,
-	0xf4, 0x6d, 0x44, 0x5d, 0x9b, 0x53, 0xdf, 0x0b, 0x63, 0x56, 0xe3, 0x67, 0x05, 0x56, 0x74, 0xe1,
-	0x89, 0xe6, 0xa2, 0x6f, 0xa0, 0x3c, 0x57, 0xc1, 0xd4, 0xad, 0x2a, 0x75, 0x65, 0xaf, 0xb8, 0xbf,
-	0xdb, 0xcc, 0x78, 0x97, 0x3a, 0xb4, 0x69, 0xce, 0xbe, 0x35, 0xb7, 0x9d, 0x7f, 0xf7, 0xeb, 0x4e,
-	0xce, 0x28, 0x85, 0x29, 0x0c, 0x6d, 0xc1, 0xaa, 0xc3, 0x28, 0x89, 0xe5, 0x16, 0xea, 0xca, 0xde,
-	0x8a, 0x51, 0x88, 0x01, 0xcd, 0x45, 0x3b, 0x50, 0x94, 0x49, 0xc0, 0x43, 0x66, 0x8f, 0xc2, 0xea,
-	0x62, 0x5d, 0xd9, 0x2b, 0x1b, 0x20, 0xa1, 0xae, 0x40, 0x50, 0x1d, 0x4a, 0xc2, 0x6f, 0x3c, 0xb1,
-	0x69, 0x20, 0x04, 0xf2, 0x31, 0x43, 0x60, 0x7d, 0x9b, 0x06, 0x9a, 0xdb, 0xf8, 0x0e, 0xb6, 0xa5,
-	0xf7, 0x61, 0x97, 0x32, 0x46, 0xdc, 0xc3, 0x28, 0xa0, 0xde, 0xa8, 0x67, 0x73, 0x12, 0xf2, 0x36,
-	0xf3, 0x9d, 0x37, 0xe8, 0x6b, 0x58, 0x8d, 0xcf, 0xa0, 0x6e, 0x58, 0x55, 0xea, 0x8b, 0x7b, 0xc5,
-	0xfd, 0xcd, 0xe6, 0x9d, 0x6c, 0x37, 0x93, 0x14, 0x24, 0x31, 0x14, 0xfc, 0x78, 0x19, 0x36, 0x5e,
-	0xc3, 0x46, 0xdf, 0xe7, 0xc4, 0xe3, 0xd4, 0x66, 0x6c, 0xda, 0x0f, 0x22, 0xcf, 0x1e, 0x30, 0x12,
-	0x1f, 0xf9, 0xa1, 0xda, 0x04, 0x2a, 0xd2, 0x24, 0x5c, 0x37, 0xb9, 0xcd, 0x89, 0x48, 0xc8, 0x90,
-	0x32, 0x86, 0xed, 0xb1, 0x48, 0x9f, 0x4c, 0x7f, 0xde, 0x00, 0x01, 0xbd, 0x94, 0x08, 0xda, 0x87,
-	0xf5, 0x49, 0xe2, 0x03, 0x1e, 0x88, 0xf8, 0xf0, 0x05, 0xa1, 0xa3, 0x0b, 0x2e, 0x53, 0x5b, 0x36,
-	0xd6, 0x6e, 0x8c, 0x32, 0xf6, 0x23, 0x69, 0x6a, 0x7c, 0x0b, 0x5b, 0x52, 0x7d, 0x18, 0x31, 0x79,
-	0x9c, 0x45, 0xc7, 0xc4, 0x64, 0xd4, 0x21, 0xaf, 0x6c, 0x16, 0x91, 0x0f, 0x0d, 0xe2, 0x27, 0x05,
-	0x9e, 0xf6, 0x7c, 0x6f, 0x64, 0x91, 0x60, 0x2c, 0x39, 0x7d, 0x66, 0x3b, 0x64, 0x4c, 0x3c, 0x8e,
-	0x5e, 0xc0, 0x92, 0xa4, 0x25, 0x65, 0x54, 0x7d, 0x48, 0x35, 0xd1, 0x8c, 0xc9, 0xe8, 0x0c, 0x1e,
-	0x4d, 0x6e, 0x24, 0x30, 0xf5, 0x5c, 0x72, 0x29, 0x83, 0xbb, 0x53, 0x86, 0x72, 0xbf, 0x15, 0xd8,
-	0x5e, 0x68, 0x3b, 0xa2, 0xa0, 0xa5, 0x14, 0xf5, 0x46, 0x89, 0x5a, 0x65, 0x26, 0xa2, 0x09, 0x8d,
-	0xc6, 0x1f, 0x0a, 0x6c, 0x1c, 0xf8, 0x9e, 0x4b, 0x05, 0xd7, 0x66, 0xff, 0x61, 0x57, 0xd1, 0x31,
-	0x94, 0x79, 0x40, 0x47, 0x23, 0x71, 0x27, 0x52, 0x74, 0xf1, 0xdf, 0x88, 0x1a, 0xa5, 0x64, 0x73,
-	0x1c, 0xf7, 0x9f, 0xcb, 0xb0, 0x24, 0x4d, 0xe8, 0x2b, 0x28, 0xdc, 0x5c, 0x74, 0x12, 0xe6, 0x3f,
-	0xdf, 0xf3, 0x4a, 0x72, 0xcf, 0xe8, 0x33, 0xc8, 0x87, 0xd4, 0x25, 0x32, 0xbe, 0xca, 0xfe, 0xf6,
-	0x43, 0x1b, 0x9b, 0x26, 0x75, 0x89, 0x21, 0xa9, 0x68, 0x13, 0x0a, 0x6f, 0x23, 0xdb, 0xe3, 0xd1,
-	0x38, 0x6e, 0xed, 0xbc, 0x31, 0x5b, 0x0b, 0x5b, 0x18, 0x0d, 0x38, 0x75, 0xde, 0x84, 0xb2, 0xa9,
-	0xf3, 0xc6, 0x6c, 0x8d, 0x76, 0xa1, 0x32, 0xf2, 0x7d, 0x17, 0x73, 0xca, 0xe2, 0x1a, 0xaf, 0x2e,
-	0x89, 0xe2, 0x3e, 0xca, 0x19, 0x25, 0x81, 0x5b, 0x94, 0xc5, 0x9d, 0xdd, 0x82, 0xb5, 0x2c, 0x0f,
-	0x73, 0x3a, 0x26, 0xd5, 0x65, 0x31, 0x64, 0x8e, 0x72, 0x86, 0x9a, 0x26, 0x8b, 0x9a, 0x47, 0x47,
-	0x50, 0x16, 0x0c, 0x4c, 0x3d, 0x3c, 0xf4, 0x03, 0x87, 0x54, 0x57, 0x64, 0x30, 0xcf, 0x1e, 0x0c,
-	0x46, 0xec, 0xd2, 0xbc, 0xae, 0xe0, 0x1a, 0x45, 0x3e, 0x5f, 0x88, 0x3e, 0x0d, 0x88, 0x1b, 0x39,
-	0x04, 0xfb, 0x1e, 0x9b, 0x56, 0x0b, 0x75, 0x65, 0xaf, 0x60, 0x40, 0x0c, 0xe9, 0x1e, 0x9b, 0xa2,
-	0x8f, 0xe1, 0x51, 0x32, 0xf6, 0xc6, 0x84, 0xdb, 0xae, 0xcd, 0xed, 0xea, 0xaa, 0xec, 0xd0, 0x4a,
-	0x0c, 0x9f, 0x24, 0x28, 0x3a, 0x81, 0x8a, 0x73, 0x53, 0x95, 0x98, 0x4f, 0x27, 0xa4, 0x0a, 0xd2,
-	0xa9, 0xdd, 0x07, 0x9d, 0x9a, 0x15, 0xb1, 0x35, 0x9d, 0x10, 0xa3, 0xec, 0xa4, 0x97, 0xe8, 0x18,
-	0x1a, 0xce, 0xbc, 0xc8, 0x71, 0x7c, 0xdf, 0x37, 0xc5, 0x34, 0xcb, 0x78, 0x51, 0x66, 0x7c, 0xc7,
-	0xb9, 0xd5, 0x0e, 0x56, 0xcc, 0x33, 0x13, 0x5a, 0xe3, 0x4b, 0xc8, 0x8b, 0xeb, 0x44, 0x4f, 0x40,
-	0x35, 0xb5, 0xc3, 0x0e, 0x3e, 0x3b, 0x35, 0xfb, 0x9d, 0x03, 0xad, 0xab, 0x75, 0x0e, 0xd5, 0x1c,
-	0x2a, 0x41, 0x41, 0xa2, 0xed, 0xb3, 0x73, 0x55, 0x41, 0x65, 0x58, 0x95, 0x2b, 0xb3, 0xd3, 0xeb,
-	0xa9, 0x0b, 0x8d, 0x1f, 0x14, 0x28, 0xa6, 0xb2, 0x87, 0xb6, 0x61, 0xc3, 0xd2, 0x4e, 0x3a, 0x58,
-	0x3b, 0xc5, 0x5d, 0xdd, 0x38, 0xb8, 0xad, 0xb5, 0x0e, 0x8f, 0xb3, 0x66, 0x4d, 0x3f, 0x50, 0x15,
-	0xb4, 0x05, 0xff, 0xcb, 0xc2, 0x7d, 0xdd, 0xb4, 0xb0, 0x7e, 0xda, 0x3b, 0x57, 0x17, 0x50, 0x0d,
-	0x36, 0xb3, 0xc6, 0xae, 0xd6, 0xeb, 0x61, 0xdd, 0xc0, 0xc7, 0x5a, 0xaf, 0xa7, 0x2e, 0x36, 0xc6,
-	0x50, 0xce, 0xa4, 0x4a, 0x6c, 0x38, 0xd0, 0x4f, 0x0f, 0x35, 0x4b, 0xd3, 0x4f, 0xb1, 0x75, 0xde,
-	0xbf, 0xed, 0xc4, 0xff, 0xa1, 0x7a, 0xcb, 0x6e, 0x5a, 0x7a, 0x1f, 0xf7, 0x74, 0xd3, 0x54, 0x95,
-	0x7b, 0x76, 0x5b, 0x2f, 0x8f, 0x3b, 0xb8, 0x6f, 0xe8, 0x5d, 0xcd, 0x52, 0x17, 0xda, 0x6a, 0xaa,
-	0x6a, 0x7d, 0x8f, 0xf8, 0xc3, 0x06, 0x81, 0xb5, 0x7b, 0xda, 0x13, 0x7d, 0x04, 0xa5, 0xcc, 0xe4,
-	0x56, 0x64, 0x5d, 0x14, 0x07, 0xf3, 0x89, 0x8d, 0x9e, 0xc3, 0x63, 0x3e, 0xdf, 0x99, 0x9a, 0x2c,
-	0x65, 0x43, 0x4d, 0x19, 0xe2, 0x06, 0xff, 0x5d, 0x81, 0xa7, 0x26, 0x0f, 0x88, 0x3d, 0xee, 0xcd,
-	0x5f, 0xf7, 0xb8, 0xe3, 0x5f, 0x81, 0x9a, 0x7a, 0xf1, 0x31, 0xf5, 0x86, 0x7e, 0xd2, 0xf9, 0xcf,
-	0xef, 0x29, 0xaf, 0x3e, 0x09, 0x26, 0x84, 0x47, 0x36, 0x4b, 0xe9, 0x68, 0xde, 0xd0, 0x37, 0x1e,
-	0xb1, 0x2c, 0x70, 0xe7, 0x59, 0x5e, 0xb8, 0xfd, 0x2c, 0xa3, 0x75, 0x58, 0xa6, 0x21, 0x1e, 0x44,
-	0x53, 0xd9, 0xf9, 0x05, 0x63, 0x89, 0x86, 0xed, 0x68, 0x9a, 0x19, 0x09, 0xf9, 0xbf, 0x19, 0x09,
-	0x4b, 0xd9, 0x91, 0xd0, 0xee, 0xbf, 0xbb, 0xaa, 0x29, 0xef, 0xaf, 0x6a, 0xca, 0x6f, 0x57, 0x35,
-	0xe5, 0xc7, 0xeb, 0x5a, 0xee, 0xfd, 0x75, 0x2d, 0xf7, 0xcb, 0x75, 0x2d, 0xf7, 0xfa, 0x8b, 0x11,
-	0xe5, 0x17, 0xd1, 0xa0, 0xe9, 0xf8, 0xe3, 0x56, 0xe6, 0x7f, 0xe7, 0xfb, 0x17, 0x9f, 0x3a, 0x17,
-	0x36, 0xf5, 0x5a, 0x33, 0xe4, 0x32, 0xfe, 0x07, 0x12, 0x4d, 0x16, 0x0e, 0x96, 0x25, 0xfc, 0xf9,
-	0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x6d, 0x46, 0xe3, 0x98, 0x09, 0x00, 0x00,
+	// 989 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xdd, 0x6e, 0xe3, 0x44,
+	0x14, 0x8e, 0xdb, 0x6c, 0x9b, 0x9e, 0xfc, 0xac, 0x3b, 0xdd, 0x05, 0xb7, 0xa5, 0x69, 0x88, 0x50,
+	0x29, 0x42, 0x24, 0xa2, 0xac, 0x90, 0x10, 0xe2, 0x62, 0xdb, 0x26, 0xaa, 0xd5, 0xb4, 0x0e, 0xb6,
+	0x8b, 0xd4, 0x15, 0x62, 0xe4, 0xd8, 0x53, 0x77, 0xb4, 0x13, 0x4f, 0xb0, 0xc7, 0xa8, 0xb9, 0xe3,
+	0x11, 0x78, 0x09, 0xde, 0x82, 0x07, 0xd8, 0xcb, 0xbd, 0xe4, 0x0a, 0xa1, 0xf6, 0x19, 0x10, 0xb7,
+	0x68, 0xc6, 0x6e, 0xea, 0x74, 0xb7, 0x42, 0x68, 0x6f, 0xb8, 0xf3, 0x7c, 0xe7, 0x3b, 0xdf, 0x9c,
+	0xdf, 0x91, 0x61, 0x2b, 0x98, 0x06, 0x57, 0x93, 0x98, 0x0b, 0xee, 0x73, 0xd6, 0xf5, 0x19, 0x1f,
+	0x75, 0x79, 0x1c, 0x90, 0xb8, 0xa3, 0x30, 0xb4, 0x5a, 0x34, 0x77, 0xa4, 0x79, 0xe3, 0x49, 0xc8,
+	0x43, 0xae, 0xa0, 0xae, 0xfc, 0xca, 0x88, 0x1b, 0x9f, 0xcc, 0xe9, 0x24, 0xe9, 0xc8, 0xf3, 0x7d,
+	0x9e, 0x46, 0x22, 0x29, 0x7c, 0x67, 0xd4, 0xf6, 0x6f, 0x1a, 0x2c, 0x5b, 0xf2, 0x0e, 0x33, 0x40,
+	0xdf, 0x42, 0xfd, 0xce, 0x8e, 0x69, 0x60, 0x68, 0x2d, 0x6d, 0xb7, 0xba, 0xb7, 0xd3, 0x99, 0xbb,
+	0xb7, 0x20, 0xd7, 0x71, 0x66, 0xdf, 0x66, 0xb0, 0x5f, 0x7e, 0xf5, 0xc7, 0x76, 0xc9, 0xae, 0x25,
+	0x05, 0x0c, 0x6d, 0xc2, 0x8a, 0xcf, 0x28, 0xc9, 0xe4, 0x16, 0x5a, 0xda, 0xee, 0xb2, 0x5d, 0xc9,
+	0x00, 0x33, 0x40, 0xdb, 0x50, 0x55, 0xe9, 0xe1, 0x0b, 0xe6, 0x85, 0x89, 0xb1, 0xd8, 0xd2, 0x76,
+	0xeb, 0x36, 0x28, 0xa8, 0x2f, 0x11, 0xd4, 0x82, 0x9a, 0xcc, 0x12, 0x4f, 0x3c, 0x1a, 0x4b, 0x81,
+	0x72, 0xc6, 0x90, 0xd8, 0xd0, 0xa3, 0xb1, 0x19, 0xb4, 0x7f, 0x80, 0x2d, 0x15, 0x7d, 0xd2, 0xa7,
+	0x8c, 0x91, 0xe0, 0x30, 0x8d, 0x69, 0x14, 0x0e, 0x3c, 0x41, 0x12, 0xb1, 0xcf, 0xb8, 0xff, 0x12,
+	0x7d, 0x03, 0x2b, 0xd9, 0x1d, 0x34, 0x48, 0x0c, 0xad, 0xb5, 0xb8, 0x5b, 0xdd, 0xdb, 0xe8, 0xbc,
+	0x51, 0xc7, 0x4e, 0x5e, 0x82, 0x3c, 0x87, 0x0a, 0xcf, 0x8e, 0x49, 0xfb, 0x05, 0xac, 0x0f, 0xb9,
+	0x20, 0x91, 0xa0, 0x1e, 0x63, 0xd3, 0x61, 0x9c, 0x46, 0xde, 0x88, 0x91, 0xec, 0xca, 0x77, 0xd5,
+	0x26, 0xd0, 0x50, 0x26, 0x19, 0xba, 0x23, 0x3c, 0x41, 0x64, 0x41, 0x2e, 0x28, 0x63, 0xd8, 0x1b,
+	0xcb, 0xf2, 0xa9, 0xf2, 0x97, 0x6d, 0x90, 0xd0, 0x73, 0x85, 0xa0, 0x3d, 0x78, 0x3a, 0xc9, 0x63,
+	0xc0, 0x23, 0x99, 0x1f, 0xbe, 0x24, 0x34, 0xbc, 0x14, 0xaa, 0xb4, 0x75, 0x7b, 0xed, 0xd6, 0xa8,
+	0x72, 0x3f, 0x52, 0xa6, 0xf6, 0xf7, 0xb0, 0xa9, 0xd4, 0x2f, 0x52, 0xa6, 0xae, 0x73, 0xe9, 0x98,
+	0x38, 0x8c, 0xfa, 0xe4, 0x3b, 0x8f, 0xa5, 0xe4, 0x5d, 0x93, 0xf8, 0x55, 0x83, 0xf7, 0x06, 0x3c,
+	0x0a, 0x5d, 0x12, 0x8f, 0x15, 0x67, 0xc8, 0x3c, 0x9f, 0x8c, 0x49, 0x24, 0xd0, 0x33, 0x78, 0xa4,
+	0x68, 0xf9, 0x18, 0x19, 0x0f, 0xa9, 0xe6, 0x9a, 0x19, 0x19, 0x9d, 0xc1, 0xe3, 0xc9, 0xad, 0x04,
+	0xa6, 0x51, 0x40, 0xae, 0x54, 0x72, 0x6f, 0x8c, 0xa1, 0xf2, 0x77, 0x63, 0x2f, 0x4a, 0x3c, 0x5f,
+	0x50, 0x1e, 0x29, 0x29, 0x1a, 0x85, 0xb9, 0x5a, 0x63, 0x26, 0x62, 0x4a, 0x8d, 0xf6, 0x5f, 0x1a,
+	0xac, 0x1f, 0xf0, 0x28, 0xa0, 0x92, 0xeb, 0xb1, 0xff, 0x71, 0xa8, 0xe8, 0x18, 0xea, 0x22, 0xa6,
+	0x61, 0x28, 0x7b, 0xa2, 0x44, 0x17, 0xff, 0x8b, 0xa8, 0x5d, 0xcb, 0x9d, 0xb3, 0xbc, 0xff, 0x5e,
+	0x82, 0x47, 0xca, 0x84, 0xbe, 0x86, 0xca, 0x6d, 0xa3, 0xf3, 0x34, 0xff, 0xbd, 0xcf, 0xcb, 0x79,
+	0x9f, 0xd1, 0xe7, 0x50, 0x4e, 0x68, 0x40, 0x54, 0x7e, 0x8d, 0xbd, 0xad, 0x87, 0x1c, 0x3b, 0x0e,
+	0x0d, 0x88, 0xad, 0xa8, 0x68, 0x03, 0x2a, 0x3f, 0xa6, 0x5e, 0x24, 0xd2, 0x71, 0xb6, 0xda, 0x65,
+	0x7b, 0x76, 0x96, 0xb6, 0x24, 0x1d, 0x09, 0xea, 0xbf, 0x4c, 0xd4, 0x52, 0x97, 0xed, 0xd9, 0x19,
+	0xed, 0x40, 0x23, 0xe4, 0x3c, 0xc0, 0x82, 0xb2, 0x6c, 0xc6, 0x8d, 0x47, 0x72, 0xb8, 0x8f, 0x4a,
+	0x76, 0x4d, 0xe2, 0x2e, 0x65, 0xd9, 0x66, 0x77, 0x61, 0x6d, 0x9e, 0x87, 0x05, 0x1d, 0x13, 0x63,
+	0x49, 0x3e, 0x32, 0x47, 0x25, 0x5b, 0x2f, 0x92, 0xe5, 0xcc, 0xa3, 0x23, 0xa8, 0x4b, 0x06, 0xa6,
+	0x11, 0xbe, 0xe0, 0xb1, 0x4f, 0x8c, 0x65, 0x95, 0xcc, 0x47, 0x0f, 0x26, 0x23, 0xbd, 0xcc, 0xa8,
+	0x2f, 0xb9, 0x76, 0x55, 0xdc, 0x1d, 0xe4, 0x9e, 0xc6, 0x24, 0x48, 0x7d, 0x82, 0x79, 0xc4, 0xa6,
+	0x46, 0xa5, 0xa5, 0xed, 0x56, 0x6c, 0xc8, 0x20, 0x2b, 0x62, 0x53, 0xf4, 0x31, 0x3c, 0xce, 0x9f,
+	0xbd, 0x31, 0x11, 0x5e, 0xe0, 0x09, 0xcf, 0x58, 0x51, 0x1b, 0xda, 0xc8, 0xe0, 0x93, 0x1c, 0x45,
+	0x27, 0xd0, 0xf0, 0x6f, 0xa7, 0x12, 0x8b, 0xe9, 0x84, 0x18, 0xa0, 0x82, 0xda, 0x79, 0x30, 0xa8,
+	0xd9, 0x10, 0xbb, 0xd3, 0x09, 0xb1, 0xeb, 0x7e, 0xf1, 0x88, 0x8e, 0xa1, 0xed, 0xdf, 0x0d, 0x39,
+	0xce, 0xfa, 0x7d, 0x3b, 0x4c, 0xb3, 0x8a, 0x57, 0x55, 0xc5, 0xb7, 0xfd, 0x7b, 0xeb, 0xe0, 0x66,
+	0x3c, 0x27, 0xa7, 0xb5, 0xbf, 0x82, 0xb2, 0x6c, 0x27, 0x7a, 0x02, 0xba, 0x63, 0x1e, 0xf6, 0xf0,
+	0xd9, 0xa9, 0x33, 0xec, 0x1d, 0x98, 0x7d, 0xb3, 0x77, 0xa8, 0x97, 0x50, 0x0d, 0x2a, 0x0a, 0xdd,
+	0x3f, 0x3b, 0xd7, 0x35, 0x54, 0x87, 0x15, 0x75, 0x72, 0x7a, 0x83, 0x81, 0xbe, 0xd0, 0xfe, 0x59,
+	0x83, 0x6a, 0xa1, 0x7a, 0x68, 0x0b, 0xd6, 0x5d, 0xf3, 0xa4, 0x87, 0xcd, 0x53, 0xdc, 0xb7, 0xec,
+	0x83, 0xfb, 0x5a, 0x4f, 0x61, 0x75, 0xde, 0x6c, 0x5a, 0x07, 0xba, 0x86, 0x36, 0xe1, 0xfd, 0x79,
+	0x78, 0x68, 0x39, 0x2e, 0xb6, 0x4e, 0x07, 0xe7, 0xfa, 0x02, 0x6a, 0xc2, 0xc6, 0xbc, 0xb1, 0x6f,
+	0x0e, 0x06, 0xd8, 0xb2, 0xf1, 0xb1, 0x39, 0x18, 0xe8, 0x8b, 0xed, 0x31, 0xd4, 0xe7, 0x4a, 0x25,
+	0x1d, 0x0e, 0xac, 0xd3, 0x43, 0xd3, 0x35, 0xad, 0x53, 0xec, 0x9e, 0x0f, 0xef, 0x07, 0xf1, 0x01,
+	0x18, 0xf7, 0xec, 0x8e, 0x6b, 0x0d, 0xf1, 0xc0, 0x72, 0x1c, 0x5d, 0x7b, 0x8b, 0xb7, 0xfb, 0xfc,
+	0xb8, 0x87, 0x87, 0xb6, 0xd5, 0x37, 0x5d, 0x7d, 0x61, 0x5f, 0x2f, 0x4c, 0x2d, 0x8f, 0x08, 0xbf,
+	0x68, 0x13, 0x58, 0x7b, 0xcb, 0x7a, 0xa2, 0x0f, 0xa1, 0x36, 0xf7, 0x72, 0x6b, 0x6a, 0x2e, 0xaa,
+	0xa3, 0xbb, 0x17, 0x1b, 0x7d, 0x0a, 0xab, 0xe2, 0xce, 0xb3, 0xf0, 0xb2, 0xd4, 0x6d, 0xbd, 0x60,
+	0x50, 0x0b, 0xbe, 0x3f, 0x7c, 0x75, 0xdd, 0xd4, 0x5e, 0x5f, 0x37, 0xb5, 0x3f, 0xaf, 0x9b, 0xda,
+	0x2f, 0x37, 0xcd, 0xd2, 0xeb, 0x9b, 0x66, 0xe9, 0xf7, 0x9b, 0x66, 0xe9, 0xc5, 0x97, 0x21, 0x15,
+	0x97, 0xe9, 0xa8, 0xe3, 0xf3, 0x71, 0x77, 0xee, 0x87, 0xe0, 0xa7, 0x67, 0x9f, 0xf9, 0x97, 0x1e,
+	0x8d, 0xba, 0x33, 0xe4, 0x2a, 0xfb, 0xd9, 0x90, 0x03, 0x98, 0x8c, 0x96, 0x14, 0xfc, 0xc5, 0x3f,
+	0x01, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x0b, 0xbd, 0x8a, 0x8e, 0x08, 0x00, 0x00,
 }
 
 func (m *OrderId) Marshal() (dAtA []byte, err error) {
@@ -1419,66 +1327,6 @@ func (m *TransactionOrdering) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
-func (m *StreamLiquidationOrder) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StreamLiquidationOrder) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamLiquidationOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Subticks != 0 {
-		i = encodeVarintOrder(dAtA, i, uint64(m.Subticks))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Quantums != 0 {
-		i = encodeVarintOrder(dAtA, i, uint64(m.Quantums))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.IsBuy {
-		i--
-		if m.IsBuy {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ClobPairId != 0 {
-		i = encodeVarintOrder(dAtA, i, uint64(m.ClobPairId))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.LiquidationInfo != nil {
-		{
-			size, err := m.LiquidationInfo.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintOrder(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
 func encodeVarintOrder(dAtA []byte, offset int, v uint64) int {
 	offset -= sovOrder(v)
 	base := offset
@@ -1671,31 +1519,6 @@ func (m *TransactionOrdering) Size() (n int) {
 	return n
 }
 
-func (m *StreamLiquidationOrder) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.LiquidationInfo != nil {
-		l = m.LiquidationInfo.Size()
-		n += 1 + l + sovOrder(uint64(l))
-	}
-	if m.ClobPairId != 0 {
-		n += 1 + sovOrder(uint64(m.ClobPairId))
-	}
-	if m.IsBuy {
-		n += 2
-	}
-	if m.Quantums != 0 {
-		n += 1 + sovOrder(uint64(m.Quantums))
-	}
-	if m.Subticks != 0 {
-		n += 1 + sovOrder(uint64(m.Subticks))
-	}
-	return n
-}
-
 func sovOrder(x uint64) (n int) {
 	return (math_bits.Len64(x|1) + 6) / 7
 }
@@ -2796,169 +2619,6 @@ func (m *TransactionOrdering) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *StreamLiquidationOrder) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowOrder
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StreamLiquidationOrder: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StreamLiquidationOrder: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LiquidationInfo", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOrder
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthOrder
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthOrder
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.LiquidationInfo == nil {
-				m.LiquidationInfo = &PerpetualLiquidationInfo{}
-			}
-			if err := m.LiquidationInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClobPairId", wireType)
-			}
-			m.ClobPairId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOrder
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ClobPairId |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field IsBuy", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOrder
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.IsBuy = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Quantums", wireType)
-			}
-			m.Quantums = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOrder
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Quantums |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Subticks", wireType)
-			}
-			m.Subticks = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOrder
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Subticks |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipOrder(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthOrder
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
 func skipOrder(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
diff --git a/protocol/x/clob/types/orderbook.go b/protocol/x/clob/types/orderbook.go
index 6070a6c7c7..9fa532d3fc 100644
--- a/protocol/x/clob/types/orderbook.go
+++ b/protocol/x/clob/types/orderbook.go
@@ -153,16 +153,6 @@ type TakerOrderStatus struct {
 	OrderOptimisticallyFilledQuantums satypes.BaseQuantums
 }
 
-// ToStreamingTakerOrderStatus converts the TakerOrderStatus to a StreamTakerOrderStatus
-// to be emitted by full node streaming.
-func (tos *TakerOrderStatus) ToStreamingTakerOrderStatus() *StreamTakerOrderStatus {
-	return &StreamTakerOrderStatus{
-		OrderStatus:                  uint32(tos.OrderStatus),
-		RemainingQuantums:            tos.RemainingQuantums.ToUint64(),
-		OptimisticallyFilledQuantums: tos.OrderOptimisticallyFilledQuantums.ToUint64(),
-	}
-}
-
 // OrderStatus represents the status of an order after attempting to place it on the orderbook.
 type OrderStatus uint
 
@@ -197,9 +187,6 @@ const (
 	// with either multiple positions in isolated perpetuals or both an isolated and a cross perpetual
 	// position.
 	ViolatesIsolatedSubaccountConstraints
-	// PostOnlyWouldCrossMakerOrder indicates that matching the post only taker order would cross the
-	// orderbook, and was therefore canceled.
-	PostOnlyWouldCrossMakerOrder
 )
 
 // String returns a string representation of this `OrderStatus` enum.
@@ -257,9 +244,6 @@ type MatchableOrder interface {
 	// MustGetOrder returns the underlying order if this is not a liquidation order. Panics if called
 	// for a liquidation order.
 	MustGetOrder() Order
-	// MustGetLiquidationOrder returns the underlying liquidation order if this is not a regular order.
-	// Panics if called for a regular order.
-	MustGetLiquidationOrder() LiquidationOrder
 	// MustGetLiquidatedPerpetualId returns the perpetual ID if this is a liquidation order. Panics
 	// if called for a non-liquidation order.
 	MustGetLiquidatedPerpetualId() uint32
diff --git a/protocol/x/clob/types/query.pb.go b/protocol/x/clob/types/query.pb.go
index d0d00fd50a..b4c4596eae 100644
--- a/protocol/x/clob/types/query.pb.go
+++ b/protocol/x/clob/types/query.pb.go
@@ -11,8 +11,7 @@ import (
 	_ "github.com/cosmos/gogoproto/gogoproto"
 	grpc1 "github.com/cosmos/gogoproto/grpc"
 	proto "github.com/cosmos/gogoproto/proto"
-	types1 "github.com/dydxprotocol/v4-chain/protocol/indexer/off_chain_updates/types"
-	types "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
+	types "github.com/dydxprotocol/v4-chain/protocol/indexer/off_chain_updates/types"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
@@ -771,8 +770,6 @@ func (m *QueryLiquidationsConfigurationResponse) GetLiquidationsConfig() Liquida
 type StreamOrderbookUpdatesRequest struct {
 	// Clob pair ids to stream orderbook updates for.
 	ClobPairId []uint32 `protobuf:"varint,1,rep,packed,name=clob_pair_id,json=clobPairId,proto3" json:"clob_pair_id,omitempty"`
-	// Subaccount ids to stream subaccount updates for.
-	SubaccountIds []*types.SubaccountId `protobuf:"bytes,2,rep,name=subaccount_ids,json=subaccountIds,proto3" json:"subaccount_ids,omitempty"`
 }
 
 func (m *StreamOrderbookUpdatesRequest) Reset()         { *m = StreamOrderbookUpdatesRequest{} }
@@ -815,13 +812,6 @@ func (m *StreamOrderbookUpdatesRequest) GetClobPairId() []uint32 {
 	return nil
 }
 
-func (m *StreamOrderbookUpdatesRequest) GetSubaccountIds() []*types.SubaccountId {
-	if m != nil {
-		return m.SubaccountIds
-	}
-	return nil
-}
-
 // StreamOrderbookUpdatesResponse is a response message for the
 // StreamOrderbookUpdates method.
 type StreamOrderbookUpdatesResponse struct {
@@ -873,19 +863,17 @@ func (m *StreamOrderbookUpdatesResponse) GetUpdates() []StreamUpdate {
 // GRPC stream.
 type StreamUpdate struct {
 	// Contains one of an StreamOrderbookUpdate,
-	// StreamOrderbookFill, StreamTakerOrderStatus.
+	// StreamOrderbookFill.
 	//
 	// Types that are valid to be assigned to UpdateMessage:
 	//
 	//	*StreamUpdate_OrderbookUpdate
 	//	*StreamUpdate_OrderFill
-	//	*StreamUpdate_TakerOrder
-	//	*StreamUpdate_SubaccountUpdate
 	UpdateMessage isStreamUpdate_UpdateMessage `protobuf_oneof:"update_message"`
 	// Block height of the update.
-	BlockHeight uint32 `protobuf:"varint,5,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"`
+	BlockHeight uint32 `protobuf:"varint,3,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"`
 	// Exec mode of the update.
-	ExecMode uint32 `protobuf:"varint,6,opt,name=exec_mode,json=execMode,proto3" json:"exec_mode,omitempty"`
+	ExecMode uint32 `protobuf:"varint,4,opt,name=exec_mode,json=execMode,proto3" json:"exec_mode,omitempty"`
 }
 
 func (m *StreamUpdate) Reset()         { *m = StreamUpdate{} }
@@ -933,17 +921,9 @@ type StreamUpdate_OrderbookUpdate struct {
 type StreamUpdate_OrderFill struct {
 	OrderFill *StreamOrderbookFill `protobuf:"bytes,2,opt,name=order_fill,json=orderFill,proto3,oneof" json:"order_fill,omitempty"`
 }
-type StreamUpdate_TakerOrder struct {
-	TakerOrder *StreamTakerOrder `protobuf:"bytes,3,opt,name=taker_order,json=takerOrder,proto3,oneof" json:"taker_order,omitempty"`
-}
-type StreamUpdate_SubaccountUpdate struct {
-	SubaccountUpdate *types.StreamSubaccountUpdate `protobuf:"bytes,4,opt,name=subaccount_update,json=subaccountUpdate,proto3,oneof" json:"subaccount_update,omitempty"`
-}
 
-func (*StreamUpdate_OrderbookUpdate) isStreamUpdate_UpdateMessage()  {}
-func (*StreamUpdate_OrderFill) isStreamUpdate_UpdateMessage()        {}
-func (*StreamUpdate_TakerOrder) isStreamUpdate_UpdateMessage()       {}
-func (*StreamUpdate_SubaccountUpdate) isStreamUpdate_UpdateMessage() {}
+func (*StreamUpdate_OrderbookUpdate) isStreamUpdate_UpdateMessage() {}
+func (*StreamUpdate_OrderFill) isStreamUpdate_UpdateMessage()       {}
 
 func (m *StreamUpdate) GetUpdateMessage() isStreamUpdate_UpdateMessage {
 	if m != nil {
@@ -966,20 +946,6 @@ func (m *StreamUpdate) GetOrderFill() *StreamOrderbookFill {
 	return nil
 }
 
-func (m *StreamUpdate) GetTakerOrder() *StreamTakerOrder {
-	if x, ok := m.GetUpdateMessage().(*StreamUpdate_TakerOrder); ok {
-		return x.TakerOrder
-	}
-	return nil
-}
-
-func (m *StreamUpdate) GetSubaccountUpdate() *types.StreamSubaccountUpdate {
-	if x, ok := m.GetUpdateMessage().(*StreamUpdate_SubaccountUpdate); ok {
-		return x.SubaccountUpdate
-	}
-	return nil
-}
-
 func (m *StreamUpdate) GetBlockHeight() uint32 {
 	if m != nil {
 		return m.BlockHeight
@@ -999,8 +965,6 @@ func (*StreamUpdate) XXX_OneofWrappers() []interface{} {
 	return []interface{}{
 		(*StreamUpdate_OrderbookUpdate)(nil),
 		(*StreamUpdate_OrderFill)(nil),
-		(*StreamUpdate_TakerOrder)(nil),
-		(*StreamUpdate_SubaccountUpdate)(nil),
 	}
 }
 
@@ -1009,7 +973,7 @@ func (*StreamUpdate) XXX_OneofWrappers() []interface{} {
 type StreamOrderbookUpdate struct {
 	// Orderbook updates for the clob pair. Can contain order place, removals,
 	// or updates.
-	Updates []types1.OffChainUpdateV1 `protobuf:"bytes,1,rep,name=updates,proto3" json:"updates"`
+	Updates []types.OffChainUpdateV1 `protobuf:"bytes,1,rep,name=updates,proto3" json:"updates"`
 	// Snapshot indicates if the response is from a snapshot of the orderbook.
 	// All updates should be ignored until snapshot is recieved.
 	// If the snapshot is true, then all previous entries should be
@@ -1050,7 +1014,7 @@ func (m *StreamOrderbookUpdate) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_StreamOrderbookUpdate proto.InternalMessageInfo
 
-func (m *StreamOrderbookUpdate) GetUpdates() []types1.OffChainUpdateV1 {
+func (m *StreamOrderbookUpdate) GetUpdates() []types.OffChainUpdateV1 {
 	if m != nil {
 		return m.Updates
 	}
@@ -1131,180 +1095,6 @@ func (m *StreamOrderbookFill) GetFillAmounts() []uint64 {
 	return nil
 }
 
-// StreamTakerOrder provides information on a taker order that was attempted
-// to be matched on the orderbook.
-// It is intended to be used only in full node streaming.
-type StreamTakerOrder struct {
-	// The taker order that was matched on the orderbook. Can be a
-	// regular order or a liquidation order.
-	//
-	// Types that are valid to be assigned to TakerOrder:
-	//
-	//	*StreamTakerOrder_Order
-	//	*StreamTakerOrder_LiquidationOrder
-	TakerOrder isStreamTakerOrder_TakerOrder `protobuf_oneof:"taker_order"`
-	// Information on the taker order after it is matched on the book,
-	// either successfully or unsuccessfully.
-	TakerOrderStatus *StreamTakerOrderStatus `protobuf:"bytes,3,opt,name=taker_order_status,json=takerOrderStatus,proto3" json:"taker_order_status,omitempty"`
-}
-
-func (m *StreamTakerOrder) Reset()         { *m = StreamTakerOrder{} }
-func (m *StreamTakerOrder) String() string { return proto.CompactTextString(m) }
-func (*StreamTakerOrder) ProtoMessage()    {}
-func (*StreamTakerOrder) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3365c195b25c5bc0, []int{19}
-}
-func (m *StreamTakerOrder) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StreamTakerOrder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StreamTakerOrder.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StreamTakerOrder) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StreamTakerOrder.Merge(m, src)
-}
-func (m *StreamTakerOrder) XXX_Size() int {
-	return m.Size()
-}
-func (m *StreamTakerOrder) XXX_DiscardUnknown() {
-	xxx_messageInfo_StreamTakerOrder.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamTakerOrder proto.InternalMessageInfo
-
-type isStreamTakerOrder_TakerOrder interface {
-	isStreamTakerOrder_TakerOrder()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type StreamTakerOrder_Order struct {
-	Order *Order `protobuf:"bytes,1,opt,name=order,proto3,oneof" json:"order,omitempty"`
-}
-type StreamTakerOrder_LiquidationOrder struct {
-	LiquidationOrder *StreamLiquidationOrder `protobuf:"bytes,2,opt,name=liquidation_order,json=liquidationOrder,proto3,oneof" json:"liquidation_order,omitempty"`
-}
-
-func (*StreamTakerOrder_Order) isStreamTakerOrder_TakerOrder()            {}
-func (*StreamTakerOrder_LiquidationOrder) isStreamTakerOrder_TakerOrder() {}
-
-func (m *StreamTakerOrder) GetTakerOrder() isStreamTakerOrder_TakerOrder {
-	if m != nil {
-		return m.TakerOrder
-	}
-	return nil
-}
-
-func (m *StreamTakerOrder) GetOrder() *Order {
-	if x, ok := m.GetTakerOrder().(*StreamTakerOrder_Order); ok {
-		return x.Order
-	}
-	return nil
-}
-
-func (m *StreamTakerOrder) GetLiquidationOrder() *StreamLiquidationOrder {
-	if x, ok := m.GetTakerOrder().(*StreamTakerOrder_LiquidationOrder); ok {
-		return x.LiquidationOrder
-	}
-	return nil
-}
-
-func (m *StreamTakerOrder) GetTakerOrderStatus() *StreamTakerOrderStatus {
-	if m != nil {
-		return m.TakerOrderStatus
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*StreamTakerOrder) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*StreamTakerOrder_Order)(nil),
-		(*StreamTakerOrder_LiquidationOrder)(nil),
-	}
-}
-
-// StreamTakerOrderStatus is a representation of a taker order
-// after it is attempted to be matched on the orderbook.
-// It is intended to be used only in full node streaming.
-type StreamTakerOrderStatus struct {
-	// The state of the taker order after attempting to match it against the
-	// orderbook. Possible enum values can be found here:
-	// https://github.com/dydxprotocol/v4-chain/blob/main/protocol/x/clob/types/orderbook.go#L105
-	OrderStatus uint32 `protobuf:"varint,1,opt,name=order_status,json=orderStatus,proto3" json:"order_status,omitempty"`
-	// The amount of remaining (non-matched) base quantums of this taker order.
-	RemainingQuantums uint64 `protobuf:"varint,2,opt,name=remaining_quantums,json=remainingQuantums,proto3" json:"remaining_quantums,omitempty"`
-	// The amount of base quantums that were *optimistically* filled for this
-	// taker order when the order is matched against the orderbook. Note that if
-	// any quantums of this order were optimistically filled or filled in state
-	// before this invocation of the matching loop, this value will not include
-	// them.
-	OptimisticallyFilledQuantums uint64 `protobuf:"varint,3,opt,name=optimistically_filled_quantums,json=optimisticallyFilledQuantums,proto3" json:"optimistically_filled_quantums,omitempty"`
-}
-
-func (m *StreamTakerOrderStatus) Reset()         { *m = StreamTakerOrderStatus{} }
-func (m *StreamTakerOrderStatus) String() string { return proto.CompactTextString(m) }
-func (*StreamTakerOrderStatus) ProtoMessage()    {}
-func (*StreamTakerOrderStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3365c195b25c5bc0, []int{20}
-}
-func (m *StreamTakerOrderStatus) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StreamTakerOrderStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StreamTakerOrderStatus.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StreamTakerOrderStatus) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StreamTakerOrderStatus.Merge(m, src)
-}
-func (m *StreamTakerOrderStatus) XXX_Size() int {
-	return m.Size()
-}
-func (m *StreamTakerOrderStatus) XXX_DiscardUnknown() {
-	xxx_messageInfo_StreamTakerOrderStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamTakerOrderStatus proto.InternalMessageInfo
-
-func (m *StreamTakerOrderStatus) GetOrderStatus() uint32 {
-	if m != nil {
-		return m.OrderStatus
-	}
-	return 0
-}
-
-func (m *StreamTakerOrderStatus) GetRemainingQuantums() uint64 {
-	if m != nil {
-		return m.RemainingQuantums
-	}
-	return 0
-}
-
-func (m *StreamTakerOrderStatus) GetOptimisticallyFilledQuantums() uint64 {
-	if m != nil {
-		return m.OptimisticallyFilledQuantums
-	}
-	return 0
-}
-
 func init() {
 	proto.RegisterType((*QueryGetClobPairRequest)(nil), "dydxprotocol.clob.QueryGetClobPairRequest")
 	proto.RegisterType((*QueryClobPairResponse)(nil), "dydxprotocol.clob.QueryClobPairResponse")
@@ -1326,118 +1116,100 @@ func init() {
 	proto.RegisterType((*StreamUpdate)(nil), "dydxprotocol.clob.StreamUpdate")
 	proto.RegisterType((*StreamOrderbookUpdate)(nil), "dydxprotocol.clob.StreamOrderbookUpdate")
 	proto.RegisterType((*StreamOrderbookFill)(nil), "dydxprotocol.clob.StreamOrderbookFill")
-	proto.RegisterType((*StreamTakerOrder)(nil), "dydxprotocol.clob.StreamTakerOrder")
-	proto.RegisterType((*StreamTakerOrderStatus)(nil), "dydxprotocol.clob.StreamTakerOrderStatus")
 }
 
 func init() { proto.RegisterFile("dydxprotocol/clob/query.proto", fileDescriptor_3365c195b25c5bc0) }
 
 var fileDescriptor_3365c195b25c5bc0 = []byte{
-	// 1656 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x41, 0x4f, 0xdc, 0x46,
-	0x14, 0x5e, 0xb3, 0x84, 0xc0, 0xdb, 0x40, 0x60, 0x08, 0xc9, 0x66, 0x21, 0x0b, 0x71, 0x1a, 0xb2,
-	0x90, 0x66, 0x0d, 0x24, 0x8a, 0xd2, 0x50, 0xa5, 0x02, 0x5a, 0x42, 0xa4, 0xd0, 0x10, 0x43, 0x12,
-	0xd4, 0x46, 0xb2, 0xbc, 0xf6, 0xb0, 0x58, 0xd8, 0x9e, 0xc5, 0x1e, 0xaf, 0x40, 0x55, 0x55, 0xa9,
-	0x87, 0x5c, 0xda, 0x4a, 0x91, 0x7a, 0xe8, 0xa1, 0x52, 0x2f, 0x3d, 0xf5, 0x50, 0xa9, 0x97, 0x1e,
-	0xab, 0xb6, 0xb7, 0x1c, 0x23, 0xf5, 0xd2, 0x43, 0x55, 0x55, 0x49, 0xcf, 0xfd, 0x0d, 0x95, 0x67,
-	0xc6, 0x8b, 0x77, 0xd7, 0x5e, 0x08, 0x17, 0xb0, 0xdf, 0xbc, 0xf7, 0xe6, 0x7b, 0xef, 0x7d, 0xf3,
-	0xe6, 0x79, 0xe1, 0x82, 0xb9, 0x6f, 0xee, 0xd5, 0x3c, 0x42, 0x89, 0x41, 0x6c, 0xc5, 0xb0, 0x49,
-	0x45, 0xd9, 0x0d, 0xb0, 0xb7, 0x5f, 0x66, 0x32, 0x34, 0x14, 0x5f, 0x2e, 0x87, 0xcb, 0x85, 0x33,
-	0x55, 0x52, 0x25, 0x4c, 0xa4, 0x84, 0x4f, 0x5c, 0xb1, 0x30, 0x56, 0x25, 0xa4, 0x6a, 0x63, 0x45,
-	0xaf, 0x59, 0x8a, 0xee, 0xba, 0x84, 0xea, 0xd4, 0x22, 0xae, 0x2f, 0x56, 0xa7, 0x0d, 0xe2, 0x3b,
-	0xc4, 0x57, 0x2a, 0xba, 0x8f, 0xb9, 0x7f, 0xa5, 0x3e, 0x5b, 0xc1, 0x54, 0x9f, 0x55, 0x6a, 0x7a,
-	0xd5, 0x72, 0x99, 0xb2, 0xd0, 0x55, 0xda, 0x11, 0x55, 0x6c, 0x62, 0xec, 0x68, 0x9e, 0x4e, 0xb1,
-	0x66, 0x5b, 0x8e, 0x45, 0x35, 0x83, 0xb8, 0x5b, 0x56, 0x55, 0x18, 0x5c, 0x6c, 0x37, 0x08, 0xff,
-	0x68, 0x35, 0xdd, 0xf2, 0x84, 0xca, 0x4c, 0xbb, 0x0a, 0xde, 0x0d, 0x2c, 0xba, 0xaf, 0x51, 0x0b,
-	0x7b, 0x49, 0x4e, 0x13, 0xf2, 0x42, 0x3c, 0x13, 0x47, 0x0e, 0xc7, 0xdb, 0x97, 0x1d, 0x9d, 0x1a,
-	0xdb, 0x38, 0x8a, 0xf8, 0x6a, 0xbb, 0x82, 0x6d, 0xed, 0x06, 0x96, 0xc9, 0xf3, 0xd2, 0xbc, 0xd9,
-	0x68, 0x82, 0x37, 0x5c, 0x17, 0x8b, 0x77, 0x9a, 0x16, 0x2d, 0xd7, 0xc4, 0x7b, 0xd8, 0x53, 0xc8,
-	0xd6, 0x96, 0x66, 0x6c, 0xeb, 0x96, 0xab, 0x05, 0x35, 0x53, 0xa7, 0xd8, 0x6f, 0x97, 0x08, 0xfb,
-	0x52, 0x93, 0xbd, 0x1f, 0x54, 0x74, 0xc3, 0x20, 0x81, 0x4b, 0x7d, 0xc5, 0xa7, 0x1e, 0xd6, 0x1d,
-	0xcb, 0x8d, 0x60, 0x4c, 0xa5, 0x6b, 0x36, 0x9e, 0xb9, 0xaa, 0x3c, 0x05, 0xe7, 0x1e, 0x86, 0x65,
-	0xbc, 0x8b, 0xe9, 0x92, 0x4d, 0x2a, 0x6b, 0xba, 0xe5, 0xa9, 0x78, 0x37, 0xc0, 0x3e, 0x45, 0x03,
-	0xd0, 0x65, 0x99, 0x79, 0x69, 0x42, 0x2a, 0xf5, 0xab, 0x5d, 0x96, 0x29, 0x3f, 0x81, 0x11, 0xa6,
-	0x7a, 0xa0, 0xe7, 0xd7, 0x88, 0xeb, 0x63, 0x74, 0x07, 0xfa, 0x1a, 0x75, 0x62, 0xfa, 0xb9, 0xb9,
-	0xd1, 0x72, 0x1b, 0xdf, 0xca, 0x91, 0xdd, 0x62, 0xf7, 0x8b, 0xbf, 0xc7, 0x33, 0x6a, 0xaf, 0x21,
-	0xde, 0x65, 0x5d, 0x60, 0x58, 0xb0, 0xed, 0x56, 0x0c, 0xcb, 0x00, 0x07, 0xbc, 0x12, 0xbe, 0x27,
-	0xcb, 0x9c, 0x84, 0xe5, 0x90, 0x84, 0x65, 0x4e, 0x72, 0x41, 0xc2, 0xf2, 0x9a, 0x5e, 0xc5, 0xc2,
-	0x56, 0x8d, 0x59, 0xca, 0xdf, 0x4b, 0x90, 0x6f, 0x02, 0xbf, 0x60, 0xdb, 0x69, 0xf8, 0xb3, 0x6f,
-	0x88, 0x1f, 0xdd, 0x6d, 0x02, 0xd9, 0xc5, 0x40, 0x5e, 0x39, 0x14, 0x24, 0xdf, 0xbc, 0x09, 0xe5,
-	0x5f, 0x12, 0x8c, 0xaf, 0xe2, 0xfa, 0x87, 0xc4, 0xc4, 0x1b, 0x24, 0xfc, 0xbb, 0xa4, 0xdb, 0x46,
-	0x60, 0xb3, 0xc5, 0x28, 0x23, 0x4f, 0xe1, 0x2c, 0x3f, 0x45, 0x35, 0x8f, 0xd4, 0x88, 0x8f, 0x3d,
-	0x4d, 0xf0, 0xb5, 0x91, 0x9d, 0x76, 0xe4, 0x8f, 0x75, 0x3b, 0xe4, 0x2b, 0xf1, 0x56, 0x71, 0x7d,
-	0x95, 0x6b, 0xab, 0x67, 0x98, 0x97, 0x35, 0xe1, 0x44, 0x48, 0xd1, 0xc7, 0x30, 0x52, 0x8f, 0x94,
-	0x35, 0x07, 0xd7, 0x35, 0x07, 0x53, 0xcf, 0x32, 0xfc, 0x46, 0x54, 0xed, 0xce, 0x9b, 0x00, 0xaf,
-	0x72, 0x75, 0x75, 0xb8, 0x1e, 0xdf, 0x92, 0x0b, 0xe5, 0xff, 0x24, 0x98, 0x48, 0x0f, 0x4f, 0x14,
-	0xa3, 0x0a, 0x27, 0x3d, 0xec, 0x07, 0x36, 0xf5, 0x45, 0x29, 0xee, 0x1e, 0xb6, 0x67, 0x82, 0x97,
-	0x50, 0x61, 0xc1, 0x35, 0x1f, 0x13, 0x3b, 0x70, 0xf0, 0x1a, 0xf6, 0xc2, 0xd2, 0x89, 0xb2, 0x45,
-	0xde, 0x0b, 0x3a, 0x0c, 0x27, 0x68, 0xa1, 0x09, 0x38, 0xd5, 0x20, 0x83, 0xd6, 0xe0, 0x3f, 0x44,
-	0xc5, 0xbe, 0x67, 0xa2, 0x41, 0xc8, 0x3a, 0xb8, 0xce, 0x32, 0xd2, 0xa5, 0x86, 0x8f, 0xe8, 0x2c,
-	0xf4, 0xd4, 0x99, 0x93, 0x7c, 0x76, 0x42, 0x2a, 0x75, 0xab, 0xe2, 0x4d, 0x9e, 0x86, 0x12, 0x23,
-	0xdd, 0x07, 0xac, 0x45, 0x6d, 0x58, 0xd8, 0xbb, 0x1f, 0x36, 0xa8, 0x25, 0xd6, 0x32, 0x02, 0x2f,
-	0x5e, 0x57, 0xf9, 0x5b, 0x09, 0xa6, 0x8e, 0xa0, 0x2c, 0xb2, 0xe4, 0x42, 0x3e, 0xad, 0xef, 0x09,
-	0x1e, 0x28, 0x09, 0x69, 0xeb, 0xe4, 0x5a, 0xa4, 0x67, 0x04, 0x27, 0xe9, 0xc8, 0x53, 0x70, 0x85,
-	0x81, 0x5b, 0x0c, 0x49, 0xa3, 0xea, 0x14, 0xa7, 0x07, 0xf2, 0x8d, 0x24, 0xa2, 0xee, 0xa8, 0x2b,
-	0xe2, 0xd8, 0x81, 0x73, 0x29, 0x77, 0x82, 0x08, 0xa3, 0x9c, 0x10, 0x46, 0x07, 0xc7, 0x22, 0x0a,
-	0x4e, 0xee, 0x16, 0x15, 0x79, 0x13, 0xce, 0x33, 0x60, 0xeb, 0x54, 0xa7, 0x78, 0x2b, 0xb0, 0x1f,
-	0x84, 0xf7, 0x40, 0x74, 0xae, 0xe6, 0xa1, 0x97, 0xdd, 0x0b, 0x51, 0xcd, 0x73, 0x73, 0x85, 0x84,
-	0xad, 0x99, 0xc9, 0x3d, 0x33, 0xe2, 0x12, 0xe1, 0xaf, 0xf2, 0xcf, 0x12, 0x14, 0x92, 0x5c, 0x8b,
-	0x28, 0x37, 0xe1, 0x34, 0xf7, 0x5d, 0xb3, 0x75, 0x03, 0x3b, 0xd8, 0xa5, 0x62, 0x8b, 0xa9, 0x84,
-	0x2d, 0xee, 0x13, 0xb7, 0xba, 0x81, 0x3d, 0x87, 0xb9, 0x58, 0x8b, 0x0c, 0xc4, 0x8e, 0x03, 0xa4,
-	0x49, 0x8a, 0xc6, 0x21, 0xb7, 0x65, 0xd9, 0xb6, 0xa6, 0x3b, 0x61, 0x4f, 0x67, 0x9c, 0xec, 0x56,
-	0x21, 0x14, 0x2d, 0x30, 0x09, 0x1a, 0x83, 0x3e, 0xea, 0x59, 0xd5, 0x2a, 0xf6, 0xb0, 0xc9, 0xd8,
-	0xd9, 0xab, 0x1e, 0x08, 0xe4, 0x2b, 0x70, 0x99, 0xc1, 0xbe, 0x1f, 0xbb, 0xd1, 0x12, 0x8b, 0xfa,
-	0x4c, 0x82, 0xc9, 0xc3, 0x34, 0x45, 0xb0, 0x4f, 0x61, 0x38, 0xe1, 0x82, 0x14, 0x01, 0x5f, 0x4e,
-	0x0a, 0xb8, 0xcd, 0xa5, 0x08, 0x16, 0xd9, 0x6d, 0x2b, 0xf2, 0x73, 0x09, 0x2e, 0xac, 0xb3, 0xeb,
-	0x8e, 0xe5, 0xa7, 0x42, 0xc8, 0xce, 0x23, 0x7e, 0x4b, 0x46, 0x85, 0x6c, 0x3f, 0xc0, 0xd9, 0x96,
-	0x03, 0xbc, 0x0a, 0x03, 0x07, 0xf7, 0xa0, 0x66, 0x99, 0x61, 0x77, 0xcb, 0xb6, 0xb7, 0xce, 0xd8,
-	0xbd, 0x59, 0x5e, 0x6f, 0x3c, 0xdf, 0x33, 0xd5, 0x7e, 0x3f, 0xf6, 0xe6, 0xcb, 0x3a, 0x14, 0xd3,
-	0x10, 0x89, 0x94, 0xbc, 0x07, 0x27, 0xc5, 0x55, 0x2e, 0x7a, 0xda, 0x78, 0x42, 0x1a, 0xb8, 0x0f,
-	0x6e, 0x1a, 0xf1, 0x4b, 0x58, 0xc9, 0x3f, 0x64, 0xe1, 0x54, 0x7c, 0x1d, 0x3d, 0x82, 0x41, 0x12,
-	0xed, 0x26, 0xc6, 0x04, 0x91, 0xe1, 0x52, 0xaa, 0xeb, 0x16, 0x78, 0x2b, 0x19, 0xf5, 0x34, 0x69,
-	0x16, 0x85, 0x37, 0x19, 0x27, 0x6a, 0xc8, 0x20, 0xd1, 0xf3, 0x27, 0x0f, 0x77, 0xb8, 0x6c, 0xd9,
-	0xf6, 0x4a, 0x46, 0xed, 0x63, 0xb6, 0xe1, 0x0b, 0x5a, 0x86, 0x1c, 0xd5, 0x77, 0xb0, 0xa7, 0x31,
-	0x11, 0x23, 0x5e, 0x6e, 0xee, 0x52, 0xaa, 0xa7, 0x8d, 0x50, 0x97, 0xb9, 0x5b, 0xc9, 0xa8, 0x40,
-	0x1b, 0x6f, 0x48, 0x83, 0xa1, 0x58, 0xa9, 0x44, 0xa0, 0xdd, 0xcc, 0xdb, 0x4c, 0x87, 0x6a, 0x31,
-	0xa7, 0x07, 0x35, 0x6b, 0x04, 0x3c, 0xe8, 0xb7, 0xc8, 0xd0, 0x45, 0x38, 0xc5, 0x1b, 0xd0, 0x36,
-	0xb6, 0xaa, 0xdb, 0x34, 0x7f, 0x82, 0xb5, 0xfb, 0x1c, 0x93, 0xad, 0x30, 0x11, 0x1a, 0x85, 0x3e,
-	0xbc, 0x87, 0x0d, 0xcd, 0x21, 0x26, 0xce, 0xf7, 0xb0, 0xf5, 0xde, 0x50, 0xb0, 0x4a, 0x4c, 0xbc,
-	0x38, 0x08, 0x03, 0x1c, 0x95, 0xe6, 0x60, 0xdf, 0xd7, 0xab, 0x58, 0xfe, 0x4a, 0x82, 0x91, 0xc4,
-	0x84, 0xa3, 0xcd, 0x56, 0x1a, 0xdc, 0x6a, 0x0e, 0x41, 0x8c, 0x84, 0xe5, 0xf6, 0x01, 0xf0, 0xc1,
-	0xd6, 0xd6, 0x52, 0x28, 0xe0, 0x8e, 0x1e, 0xcf, 0xb6, 0xf0, 0x03, 0x15, 0xa0, 0xd7, 0x77, 0xf5,
-	0x9a, 0xbf, 0x4d, 0x78, 0x0f, 0xe8, 0x55, 0x1b, 0xef, 0xf2, 0x8f, 0x12, 0x0c, 0x27, 0xd4, 0x0b,
-	0xcd, 0x03, 0x3b, 0x13, 0x7c, 0x7c, 0x10, 0xe4, 0x19, 0x4b, 0x19, 0x7b, 0xd8, 0x78, 0xa0, 0xb2,
-	0x29, 0x89, 0x3d, 0xa2, 0x9b, 0xd0, 0xc3, 0x2a, 0x1b, 0x1d, 0x9d, 0x7c, 0x5a, 0xaf, 0x14, 0x48,
-	0x85, 0x76, 0x98, 0xee, 0x58, 0xbf, 0xf2, 0xf3, 0xd9, 0x89, 0x6c, 0xa9, 0x5b, 0xcd, 0x1d, 0x34,
-	0x2c, 0x5f, 0x7e, 0xd6, 0x05, 0x83, 0xad, 0xac, 0x40, 0x33, 0x70, 0x82, 0x33, 0x89, 0xe3, 0x4c,
-	0xdd, 0x6e, 0x25, 0xa3, 0x72, 0x45, 0xb4, 0x09, 0x43, 0xb1, 0xf6, 0x21, 0x78, 0xd8, 0x95, 0xda,
-	0x75, 0xf9, 0x8e, 0xb1, 0x56, 0x14, 0xb9, 0x1b, 0xb4, 0x5b, 0x64, 0xe8, 0x09, 0xa0, 0x18, 0xb7,
-	0x35, 0x9f, 0xea, 0x34, 0xf0, 0x05, 0xc5, 0xa7, 0x8e, 0x40, 0xf1, 0x75, 0x66, 0xa0, 0x0e, 0xd2,
-	0x16, 0xc9, 0x62, 0x7f, 0xd3, 0xa1, 0x91, 0x7f, 0x92, 0xe0, 0x6c, 0xb2, 0x6d, 0x98, 0xc6, 0xa6,
-	0xcd, 0xf9, 0x90, 0x92, 0x23, 0x31, 0x95, 0x6b, 0x80, 0x3c, 0xec, 0xe8, 0x96, 0x6b, 0xb9, 0x55,
-	0x6d, 0x37, 0xd0, 0x5d, 0x1a, 0x38, 0xbe, 0xb8, 0x20, 0x86, 0x1a, 0x2b, 0x0f, 0xc5, 0x02, 0x7a,
-	0x1f, 0x8a, 0xa4, 0x46, 0x2d, 0xc7, 0xf2, 0xa9, 0x65, 0xe8, 0xb6, 0xbd, 0xcf, 0x5a, 0x00, 0x36,
-	0x0f, 0x4c, 0xf9, 0x68, 0x33, 0xd6, 0xac, 0xb5, 0xcc, 0x94, 0x22, 0x2f, 0x73, 0xdf, 0x01, 0x9c,
-	0x60, 0xd7, 0x04, 0xfa, 0x42, 0x82, 0xde, 0x68, 0x60, 0x46, 0xd3, 0x09, 0x59, 0x49, 0xf9, 0xea,
-	0x28, 0x94, 0xd2, 0x74, 0x5b, 0x3f, 0x3b, 0xe4, 0xa9, 0xcf, 0xff, 0xf8, 0xf7, 0xeb, 0xae, 0x4b,
-	0xe8, 0xa2, 0xd2, 0xe1, 0xbb, 0x51, 0xf9, 0xc4, 0x32, 0x3f, 0x45, 0x5f, 0x4a, 0x90, 0x8b, 0x4d,
-	0xfe, 0xe9, 0x80, 0xda, 0x3f, 0x41, 0x0a, 0x57, 0x0f, 0x03, 0x14, 0xfb, 0x94, 0x90, 0xdf, 0x62,
-	0x98, 0x8a, 0x68, 0xac, 0x13, 0x26, 0xf4, 0xab, 0x04, 0xf9, 0xb4, 0x11, 0x16, 0xcd, 0xbd, 0xd1,
-	0xbc, 0xcb, 0x31, 0x5e, 0x3f, 0xc6, 0x8c, 0x2c, 0xdf, 0x66, 0x58, 0x6f, 0xdc, 0x96, 0xa6, 0x65,
-	0x45, 0x49, 0xfc, 0x70, 0xd5, 0x5c, 0x62, 0x62, 0x8d, 0x12, 0xfe, 0xdf, 0x88, 0x81, 0xfc, 0x5d,
-	0x82, 0xb1, 0x4e, 0xd3, 0x24, 0x9a, 0x4f, 0xcb, 0xda, 0x11, 0x66, 0xe1, 0xc2, 0xbb, 0xc7, 0x33,
-	0x16, 0x71, 0x4d, 0xb2, 0xb8, 0x26, 0x50, 0x51, 0xe9, 0xf8, 0x63, 0x01, 0xfa, 0x45, 0x82, 0xd1,
-	0x0e, 0xa3, 0x24, 0xba, 0x9d, 0x86, 0xe2, 0xf0, 0x21, 0xb8, 0x30, 0x7f, 0x2c, 0x5b, 0x11, 0xc0,
-	0x65, 0x16, 0xc0, 0x38, 0xba, 0xd0, 0xf1, 0x17, 0x14, 0xf4, 0x9b, 0x04, 0xe7, 0x53, 0xc7, 0x31,
-	0x74, 0x2b, 0x0d, 0xc1, 0x61, 0xb3, 0x5e, 0xe1, 0x9d, 0x63, 0x58, 0x0a, 0xe4, 0x65, 0x86, 0xbc,
-	0x84, 0x26, 0x95, 0x23, 0xfd, 0x6a, 0x82, 0x5c, 0xe8, 0x6f, 0x9a, 0x98, 0xd1, 0xdb, 0x69, 0x7b,
-	0x27, 0xcd, 0xec, 0x85, 0x6b, 0x47, 0xd4, 0x16, 0xe8, 0x32, 0xe8, 0xb3, 0xa8, 0xa3, 0xb6, 0x8e,
-	0x6a, 0x68, 0xe6, 0xa8, 0x63, 0x53, 0x34, 0x67, 0x16, 0x66, 0xdf, 0xc0, 0x82, 0x03, 0x98, 0x91,
-	0x16, 0xd7, 0x5e, 0xbc, 0x2a, 0x4a, 0x2f, 0x5f, 0x15, 0xa5, 0x7f, 0x5e, 0x15, 0xa5, 0xe7, 0xaf,
-	0x8b, 0x99, 0x97, 0xaf, 0x8b, 0x99, 0x3f, 0x5f, 0x17, 0x33, 0x1f, 0xdd, 0xac, 0x5a, 0x74, 0x3b,
-	0xa8, 0x94, 0x0d, 0xe2, 0x34, 0x27, 0xaf, 0x7e, 0xe3, 0x1a, 0x1b, 0x06, 0x94, 0x86, 0x64, 0x8f,
-	0x27, 0x94, 0xee, 0xd7, 0xb0, 0x5f, 0xe9, 0x61, 0xe2, 0xeb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff,
-	0x4d, 0x56, 0xef, 0xe3, 0x00, 0x14, 0x00, 0x00,
+	// 1407 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcf, 0x6f, 0xdc, 0xc4,
+	0x17, 0x5f, 0x27, 0xf9, 0xb6, 0x9b, 0x97, 0xfe, 0xfa, 0x4e, 0x9a, 0x76, 0xeb, 0xa4, 0x9b, 0xd4,
+	0xd0, 0x74, 0x93, 0xd2, 0x75, 0x93, 0x56, 0x55, 0x69, 0x50, 0x51, 0x12, 0xd1, 0x1f, 0x52, 0x43,
+	0x83, 0xfb, 0x83, 0x0a, 0x2a, 0x59, 0xb3, 0xf6, 0xac, 0x63, 0xd5, 0xf6, 0x6c, 0xec, 0xd9, 0x55,
+	0x22, 0x84, 0x40, 0x1c, 0xb8, 0x00, 0x12, 0x12, 0x07, 0x0e, 0x48, 0x5c, 0x38, 0x73, 0xe4, 0x88,
+	0x80, 0x5b, 0x8f, 0x95, 0xb8, 0x70, 0x40, 0x08, 0xb5, 0x9c, 0xf9, 0x1b, 0x90, 0x67, 0xc6, 0x9b,
+	0x75, 0x6c, 0xef, 0x26, 0xb9, 0xec, 0xda, 0x6f, 0xde, 0x7b, 0xf3, 0x79, 0xef, 0x7d, 0x66, 0xde,
+	0x33, 0x9c, 0xb5, 0xb7, 0xed, 0xad, 0x56, 0x48, 0x19, 0xb5, 0xa8, 0xa7, 0x5b, 0x1e, 0x6d, 0xe8,
+	0x9b, 0x6d, 0x12, 0x6e, 0xd7, 0xb9, 0x0c, 0xfd, 0xbf, 0x77, 0xb9, 0x1e, 0x2f, 0xab, 0x27, 0x1d,
+	0xea, 0x50, 0x2e, 0xd2, 0xe3, 0x27, 0xa1, 0xa8, 0x4e, 0x39, 0x94, 0x3a, 0x1e, 0xd1, 0x71, 0xcb,
+	0xd5, 0x71, 0x10, 0x50, 0x86, 0x99, 0x4b, 0x83, 0x48, 0xae, 0xce, 0x5b, 0x34, 0xf2, 0x69, 0xa4,
+	0x37, 0x70, 0x44, 0x84, 0x7f, 0xbd, 0xb3, 0xd0, 0x20, 0x0c, 0x2f, 0xe8, 0x2d, 0xec, 0xb8, 0x01,
+	0x57, 0x96, 0xba, 0x7a, 0x16, 0x51, 0xc3, 0xa3, 0xd6, 0x33, 0x33, 0xc4, 0x8c, 0x98, 0x9e, 0xeb,
+	0xbb, 0xcc, 0xb4, 0x68, 0xd0, 0x74, 0x1d, 0x69, 0x70, 0x2e, 0x6b, 0x10, 0xff, 0x98, 0x2d, 0xec,
+	0x86, 0x52, 0xe5, 0x72, 0x56, 0x85, 0x6c, 0xb6, 0x5d, 0xb6, 0x6d, 0x32, 0x97, 0x84, 0x79, 0x4e,
+	0x73, 0xf2, 0x42, 0x43, 0x9b, 0x24, 0x0e, 0xa7, 0xb3, 0xcb, 0x3e, 0x66, 0xd6, 0x06, 0x49, 0x22,
+	0xbe, 0x98, 0x55, 0xf0, 0xdc, 0xcd, 0xb6, 0x6b, 0x8b, 0xbc, 0xa4, 0x37, 0x9b, 0xcc, 0xf1, 0x46,
+	0x3a, 0x72, 0xf1, 0x66, 0x6a, 0xd1, 0x0d, 0x6c, 0xb2, 0x45, 0x42, 0x9d, 0x36, 0x9b, 0xa6, 0xb5,
+	0x81, 0xdd, 0xc0, 0x6c, 0xb7, 0x6c, 0xcc, 0x48, 0x94, 0x95, 0x08, 0x7b, 0x6d, 0x0e, 0x4e, 0xbf,
+	0x17, 0x67, 0xfc, 0x36, 0x61, 0xab, 0x1e, 0x6d, 0xac, 0x63, 0x37, 0x34, 0xc8, 0x66, 0x9b, 0x44,
+	0x0c, 0x1d, 0x83, 0x21, 0xd7, 0xae, 0x28, 0x33, 0x4a, 0xed, 0xa8, 0x31, 0xe4, 0xda, 0xda, 0xfb,
+	0x30, 0xc1, 0x55, 0x77, 0xf4, 0xa2, 0x16, 0x0d, 0x22, 0x82, 0x6e, 0xc2, 0x68, 0x37, 0xa5, 0x5c,
+	0x7f, 0x6c, 0x71, 0xb2, 0x9e, 0xa1, 0x46, 0x3d, 0xb1, 0x5b, 0x19, 0x79, 0xfe, 0xd7, 0x74, 0xc9,
+	0x28, 0x5b, 0xf2, 0x5d, 0xc3, 0x12, 0xc3, 0xb2, 0xe7, 0xed, 0xc6, 0x70, 0x0b, 0x60, 0x87, 0x02,
+	0xd2, 0xf7, 0x6c, 0x5d, 0xf0, 0xa5, 0x1e, 0xf3, 0xa5, 0x2e, 0xf8, 0x28, 0xf9, 0x52, 0x5f, 0xc7,
+	0x0e, 0x91, 0xb6, 0x46, 0x8f, 0xa5, 0xf6, 0x83, 0x02, 0x95, 0x14, 0xf8, 0x65, 0xcf, 0x2b, 0xc2,
+	0x3f, 0xbc, 0x4f, 0xfc, 0xe8, 0x76, 0x0a, 0xe4, 0x10, 0x07, 0x79, 0x61, 0x20, 0x48, 0xb1, 0x79,
+	0x0a, 0xe5, 0x9f, 0x0a, 0x4c, 0xaf, 0x91, 0xce, 0xbb, 0xd4, 0x26, 0x0f, 0x69, 0xfc, 0xbb, 0x8a,
+	0x3d, 0xab, 0xed, 0xf1, 0xc5, 0x24, 0x23, 0x4f, 0xe1, 0x94, 0x20, 0x7c, 0x2b, 0xa4, 0x2d, 0x1a,
+	0x91, 0xd0, 0x94, 0xd4, 0xea, 0x66, 0x27, 0x8b, 0xfc, 0x31, 0xf6, 0x62, 0x6a, 0xd1, 0x70, 0x8d,
+	0x74, 0xd6, 0x84, 0xb6, 0x71, 0x92, 0x7b, 0x59, 0x97, 0x4e, 0xa4, 0x14, 0x7d, 0x08, 0x13, 0x9d,
+	0x44, 0xd9, 0xf4, 0x49, 0xc7, 0xf4, 0x09, 0x0b, 0x5d, 0x2b, 0xea, 0x46, 0x95, 0x75, 0x9e, 0x02,
+	0xbc, 0x26, 0xd4, 0x8d, 0xf1, 0x4e, 0xef, 0x96, 0x42, 0xa8, 0xfd, 0xab, 0xc0, 0x4c, 0x71, 0x78,
+	0xb2, 0x18, 0x0e, 0x1c, 0x0e, 0x49, 0xd4, 0xf6, 0x58, 0x24, 0x4b, 0x71, 0x7b, 0xd0, 0x9e, 0x39,
+	0x5e, 0x62, 0x85, 0xe5, 0xc0, 0x7e, 0x4c, 0xbd, 0xb6, 0x4f, 0xd6, 0x49, 0x18, 0x97, 0x4e, 0x96,
+	0x2d, 0xf1, 0xae, 0x62, 0x18, 0xcf, 0xd1, 0x42, 0x33, 0x70, 0xa4, 0x4b, 0x06, 0xb3, 0xcb, 0x7f,
+	0x48, 0x8a, 0x7d, 0xd7, 0x46, 0x27, 0x60, 0xd8, 0x27, 0x1d, 0x9e, 0x91, 0x21, 0x23, 0x7e, 0x44,
+	0xa7, 0xe0, 0x50, 0x87, 0x3b, 0xa9, 0x0c, 0xcf, 0x28, 0xb5, 0x11, 0x43, 0xbe, 0x69, 0xf3, 0x50,
+	0xe3, 0xa4, 0x7b, 0x87, 0xdf, 0x26, 0x0f, 0x5d, 0x12, 0xde, 0x8b, 0xef, 0x92, 0x55, 0x7e, 0xba,
+	0xdb, 0x61, 0x6f, 0x5d, 0xb5, 0xef, 0x14, 0x98, 0xdb, 0x83, 0xb2, 0xcc, 0x52, 0x00, 0x95, 0xa2,
+	0x2b, 0x4a, 0xf2, 0x40, 0xcf, 0x49, 0x5b, 0x3f, 0xd7, 0x32, 0x3d, 0x13, 0x24, 0x4f, 0x47, 0x9b,
+	0x83, 0x0b, 0x1c, 0xdc, 0x4a, 0x4c, 0x1a, 0x03, 0x33, 0x52, 0x1c, 0xc8, 0xb7, 0x8a, 0x8c, 0xba,
+	0xaf, 0xae, 0x8c, 0xe3, 0x19, 0x9c, 0x2e, 0xb8, 0xbe, 0x65, 0x18, 0xf5, 0x9c, 0x30, 0xfa, 0x38,
+	0x96, 0x51, 0x08, 0x72, 0xef, 0x52, 0xd1, 0x9e, 0xc0, 0x19, 0x0e, 0xec, 0x01, 0xc3, 0x8c, 0x34,
+	0xdb, 0xde, 0xfd, 0xf8, 0xca, 0x4e, 0xce, 0xd5, 0x12, 0x94, 0xf9, 0x15, 0x9e, 0xd4, 0x7c, 0x6c,
+	0x51, 0xcd, 0xd9, 0x9a, 0x9b, 0xdc, 0xb5, 0x13, 0x2e, 0x51, 0xf1, 0xaa, 0xfd, 0xa4, 0x80, 0x9a,
+	0xe7, 0x5a, 0x46, 0xf9, 0x04, 0x8e, 0x0b, 0xdf, 0x2d, 0x0f, 0x5b, 0xc4, 0x27, 0x01, 0x93, 0x5b,
+	0xcc, 0xe5, 0x6c, 0x71, 0x8f, 0x06, 0xce, 0x43, 0x12, 0xfa, 0xdc, 0xc5, 0x7a, 0x62, 0x20, 0x77,
+	0x3c, 0x46, 0x53, 0x52, 0x34, 0x0d, 0x63, 0x4d, 0xd7, 0xf3, 0x4c, 0xec, 0xd3, 0x76, 0xc0, 0x38,
+	0x27, 0x47, 0x0c, 0x88, 0x45, 0xcb, 0x5c, 0x82, 0xa6, 0x60, 0x94, 0x85, 0xae, 0xe3, 0x90, 0x90,
+	0xd8, 0x9c, 0x9d, 0x65, 0x63, 0x47, 0xa0, 0x5d, 0x80, 0xf3, 0x1c, 0xf6, 0xbd, 0x9e, 0xe6, 0x93,
+	0x5b, 0xd4, 0xcf, 0x15, 0x98, 0x1d, 0xa4, 0x29, 0x83, 0x7d, 0x0a, 0xe3, 0x39, 0xbd, 0x4c, 0x06,
+	0x7c, 0x3e, 0x2f, 0xe0, 0x8c, 0x4b, 0x19, 0x2c, 0xf2, 0x32, 0x2b, 0xda, 0x32, 0x9c, 0x7d, 0xc0,
+	0x42, 0x82, 0x45, 0x7a, 0x1a, 0x94, 0x3e, 0x7b, 0x24, 0xfa, 0x59, 0x52, 0xc7, 0xec, 0xf9, 0x1d,
+	0x4e, 0x9f, 0x5f, 0x0d, 0x43, 0xb5, 0xc8, 0x85, 0x0c, 0xe1, 0x6d, 0x38, 0x2c, 0xbb, 0xa4, 0xbc,
+	0x83, 0xa6, 0x73, 0x60, 0x0b, 0x1f, 0xc2, 0x34, 0xe1, 0x83, 0xb4, 0xd2, 0x3e, 0x1d, 0x82, 0x23,
+	0xbd, 0xeb, 0xe8, 0x11, 0x9c, 0xa0, 0xc9, 0x6e, 0xb2, 0x03, 0xcb, 0x8c, 0xd4, 0x0a, 0x5d, 0xef,
+	0x82, 0x77, 0xa7, 0x64, 0x1c, 0xa7, 0x69, 0x51, 0xdc, 0x79, 0x04, 0xb1, 0xe2, 0x8a, 0xcb, 0x3b,
+	0x7a, 0x76, 0xb0, 0xc3, 0x5b, 0xae, 0xe7, 0xdd, 0x29, 0x19, 0xa3, 0xdc, 0x36, 0x7e, 0x41, 0xe7,
+	0xe0, 0x88, 0x38, 0x87, 0x1b, 0xc4, 0x75, 0x36, 0x18, 0x67, 0xca, 0x51, 0x63, 0x8c, 0xcb, 0xee,
+	0x70, 0x11, 0x9a, 0x84, 0x51, 0xb2, 0x45, 0x2c, 0xd3, 0xa7, 0x36, 0xa9, 0x8c, 0xf0, 0xf5, 0x72,
+	0x2c, 0x58, 0xa3, 0x36, 0x59, 0x39, 0x01, 0xc7, 0x44, 0x54, 0xa6, 0x4f, 0xa2, 0x08, 0x3b, 0x44,
+	0xfb, 0x4a, 0x81, 0x89, 0xdc, 0x38, 0xd0, 0x93, 0xdd, 0xd9, 0xbd, 0x9e, 0x46, 0x2c, 0x87, 0x98,
+	0x7a, 0x76, 0x64, 0xb9, 0xdf, 0x6c, 0xae, 0xc6, 0x02, 0xe1, 0xe8, 0xf1, 0xc2, 0xae, 0xb4, 0x23,
+	0x15, 0xca, 0x51, 0x80, 0x5b, 0xd1, 0x06, 0x15, 0x47, 0xa1, 0x6c, 0x74, 0xdf, 0xb5, 0x1f, 0x15,
+	0x18, 0xcf, 0x49, 0x03, 0x5a, 0x02, 0xce, 0x0d, 0xd1, 0x45, 0x65, 0x4d, 0xa6, 0x0a, 0xba, 0x3f,
+	0xef, 0x92, 0x06, 0x1f, 0x16, 0xf8, 0x23, 0xba, 0x06, 0x87, 0x78, 0x0e, 0xe3, 0xfe, 0x18, 0x47,
+	0x52, 0x29, 0xba, 0x32, 0x24, 0x52, 0xa9, 0x1d, 0xa7, 0xbb, 0xe7, 0xd8, 0x46, 0x95, 0xe1, 0x99,
+	0xe1, 0xda, 0x88, 0x31, 0xb6, 0x73, 0x6e, 0xa3, 0xc5, 0xef, 0x01, 0xfe, 0xc7, 0x4f, 0x1c, 0xfa,
+	0x42, 0x81, 0x72, 0x32, 0x7b, 0xa0, 0xf9, 0x9c, 0x1d, 0x0a, 0x06, 0x38, 0xb5, 0x56, 0xa4, 0xbb,
+	0x7b, 0x82, 0xd3, 0xe6, 0x3e, 0xfb, 0xfd, 0x9f, 0x6f, 0x86, 0x5e, 0x43, 0xe7, 0xf4, 0x3e, 0xd3,
+	0xb2, 0xfe, 0x91, 0x6b, 0x7f, 0x8c, 0xbe, 0x54, 0x60, 0xac, 0x67, 0x88, 0x2a, 0x06, 0x94, 0x9d,
+	0xe6, 0xd4, 0x8b, 0x83, 0x00, 0xf5, 0x4c, 0x65, 0xda, 0xeb, 0x1c, 0x53, 0x15, 0x4d, 0xf5, 0xc3,
+	0x84, 0x7e, 0x51, 0xa0, 0x52, 0x34, 0x0d, 0xa0, 0xc5, 0x7d, 0x8d, 0x0e, 0x02, 0xe3, 0x95, 0x03,
+	0x8c, 0x1b, 0xda, 0x0d, 0x8e, 0xf5, 0xea, 0x0d, 0x65, 0x5e, 0xd3, 0xf5, 0xdc, 0x71, 0xdd, 0x0c,
+	0xa8, 0x4d, 0x4c, 0x46, 0xc5, 0xbf, 0xd5, 0x03, 0xf2, 0x37, 0x05, 0xa6, 0xfa, 0x35, 0x66, 0xb4,
+	0x54, 0x94, 0xb5, 0x3d, 0x8c, 0x15, 0xea, 0x5b, 0x07, 0x33, 0x96, 0x71, 0xcd, 0xf2, 0xb8, 0x66,
+	0x50, 0x55, 0xef, 0xfb, 0x89, 0x84, 0x7e, 0x56, 0x60, 0xb2, 0x4f, 0x57, 0x46, 0x37, 0x8a, 0x50,
+	0x0c, 0x9e, 0x27, 0xd4, 0xa5, 0x03, 0xd9, 0xca, 0x00, 0xce, 0xf3, 0x00, 0xa6, 0xd1, 0xd9, 0xbe,
+	0xdf, 0x8d, 0xe8, 0x57, 0x05, 0xce, 0x14, 0x76, 0x36, 0x74, 0xbd, 0x08, 0xc1, 0xa0, 0xb6, 0xa9,
+	0xbe, 0x79, 0x00, 0x4b, 0x89, 0xbc, 0xce, 0x91, 0xd7, 0xd0, 0xac, 0xbe, 0xa7, 0x6f, 0x45, 0x14,
+	0xc0, 0xd1, 0xd4, 0xf0, 0x81, 0xde, 0x28, 0xda, 0x3b, 0x6f, 0xfc, 0x51, 0x2f, 0xed, 0x51, 0x5b,
+	0xa2, 0x2b, 0xa1, 0x4f, 0xe0, 0x54, 0x7e, 0x17, 0x45, 0x97, 0xf7, 0xda, 0xd1, 0x92, 0x9e, 0xad,
+	0x2e, 0xec, 0xc3, 0x42, 0x00, 0xb8, 0xac, 0xac, 0xac, 0x3f, 0x7f, 0x59, 0x55, 0x5e, 0xbc, 0xac,
+	0x2a, 0x7f, 0xbf, 0xac, 0x2a, 0x5f, 0xbf, 0xaa, 0x96, 0x5e, 0xbc, 0xaa, 0x96, 0xfe, 0x78, 0x55,
+	0x2d, 0x7d, 0x70, 0xcd, 0x71, 0xd9, 0x46, 0xbb, 0x51, 0xb7, 0xa8, 0x9f, 0x4e, 0x5e, 0xe7, 0xea,
+	0x25, 0xde, 0x50, 0xf4, 0xae, 0x64, 0x4b, 0x24, 0x94, 0x6d, 0xb7, 0x48, 0xd4, 0x38, 0xc4, 0xc5,
+	0x57, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0xc6, 0xcc, 0x0a, 0xf6, 0x10, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -2355,20 +2127,6 @@ func (m *StreamOrderbookUpdatesRequest) MarshalToSizedBuffer(dAtA []byte) (int,
 	_ = i
 	var l int
 	_ = l
-	if len(m.SubaccountIds) > 0 {
-		for iNdEx := len(m.SubaccountIds) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.SubaccountIds[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintQuery(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
 	if len(m.ClobPairId) > 0 {
 		dAtA12 := make([]byte, len(m.ClobPairId)*10)
 		var j11 int
@@ -2450,12 +2208,12 @@ func (m *StreamUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	if m.ExecMode != 0 {
 		i = encodeVarintQuery(dAtA, i, uint64(m.ExecMode))
 		i--
-		dAtA[i] = 0x30
+		dAtA[i] = 0x20
 	}
 	if m.BlockHeight != 0 {
 		i = encodeVarintQuery(dAtA, i, uint64(m.BlockHeight))
 		i--
-		dAtA[i] = 0x28
+		dAtA[i] = 0x18
 	}
 	if m.UpdateMessage != nil {
 		{
@@ -2511,48 +2269,6 @@ func (m *StreamUpdate_OrderFill) MarshalToSizedBuffer(dAtA []byte) (int, error)
 	}
 	return len(dAtA) - i, nil
 }
-func (m *StreamUpdate_TakerOrder) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamUpdate_TakerOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.TakerOrder != nil {
-		{
-			size, err := m.TakerOrder.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintQuery(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *StreamUpdate_SubaccountUpdate) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamUpdate_SubaccountUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.SubaccountUpdate != nil {
-		{
-			size, err := m.SubaccountUpdate.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintQuery(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	return len(dAtA) - i, nil
-}
 func (m *StreamOrderbookUpdate) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -2621,20 +2337,20 @@ func (m *StreamOrderbookFill) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	var l int
 	_ = l
 	if len(m.FillAmounts) > 0 {
-		dAtA18 := make([]byte, len(m.FillAmounts)*10)
-		var j17 int
+		dAtA16 := make([]byte, len(m.FillAmounts)*10)
+		var j15 int
 		for _, num := range m.FillAmounts {
 			for num >= 1<<7 {
-				dAtA18[j17] = uint8(uint64(num)&0x7f | 0x80)
+				dAtA16[j15] = uint8(uint64(num)&0x7f | 0x80)
 				num >>= 7
-				j17++
+				j15++
 			}
-			dAtA18[j17] = uint8(num)
-			j17++
+			dAtA16[j15] = uint8(num)
+			j15++
 		}
-		i -= j17
-		copy(dAtA[i:], dAtA18[:j17])
-		i = encodeVarintQuery(dAtA, i, uint64(j17))
+		i -= j15
+		copy(dAtA[i:], dAtA16[:j15])
+		i = encodeVarintQuery(dAtA, i, uint64(j15))
 		i--
 		dAtA[i] = 0x1a
 	}
@@ -2667,130 +2383,6 @@ func (m *StreamOrderbookFill) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
-func (m *StreamTakerOrder) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StreamTakerOrder) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamTakerOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.TakerOrderStatus != nil {
-		{
-			size, err := m.TakerOrderStatus.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintQuery(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.TakerOrder != nil {
-		{
-			size := m.TakerOrder.Size()
-			i -= size
-			if _, err := m.TakerOrder.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *StreamTakerOrder_Order) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamTakerOrder_Order) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.Order != nil {
-		{
-			size, err := m.Order.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintQuery(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *StreamTakerOrder_LiquidationOrder) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamTakerOrder_LiquidationOrder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.LiquidationOrder != nil {
-		{
-			size, err := m.LiquidationOrder.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintQuery(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *StreamTakerOrderStatus) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StreamTakerOrderStatus) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamTakerOrderStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.OptimisticallyFilledQuantums != 0 {
-		i = encodeVarintQuery(dAtA, i, uint64(m.OptimisticallyFilledQuantums))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.RemainingQuantums != 0 {
-		i = encodeVarintQuery(dAtA, i, uint64(m.RemainingQuantums))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.OrderStatus != 0 {
-		i = encodeVarintQuery(dAtA, i, uint64(m.OrderStatus))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
 func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
 	offset -= sovQuery(v)
 	base := offset
@@ -3008,12 +2600,6 @@ func (m *StreamOrderbookUpdatesRequest) Size() (n int) {
 		}
 		n += 1 + sovQuery(uint64(l)) + l
 	}
-	if len(m.SubaccountIds) > 0 {
-		for _, e := range m.SubaccountIds {
-			l = e.Size()
-			n += 1 + l + sovQuery(uint64(l))
-		}
-	}
 	return n
 }
 
@@ -3074,41 +2660,17 @@ func (m *StreamUpdate_OrderFill) Size() (n int) {
 	}
 	return n
 }
-func (m *StreamUpdate_TakerOrder) Size() (n int) {
+func (m *StreamOrderbookUpdate) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	if m.TakerOrder != nil {
-		l = m.TakerOrder.Size()
-		n += 1 + l + sovQuery(uint64(l))
-	}
-	return n
-}
-func (m *StreamUpdate_SubaccountUpdate) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.SubaccountUpdate != nil {
-		l = m.SubaccountUpdate.Size()
-		n += 1 + l + sovQuery(uint64(l))
-	}
-	return n
-}
-func (m *StreamOrderbookUpdate) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Updates) > 0 {
-		for _, e := range m.Updates {
-			l = e.Size()
-			n += 1 + l + sovQuery(uint64(l))
-		}
+	if len(m.Updates) > 0 {
+		for _, e := range m.Updates {
+			l = e.Size()
+			n += 1 + l + sovQuery(uint64(l))
+		}
 	}
 	if m.Snapshot {
 		n += 2
@@ -3142,64 +2704,6 @@ func (m *StreamOrderbookFill) Size() (n int) {
 	return n
 }
 
-func (m *StreamTakerOrder) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.TakerOrder != nil {
-		n += m.TakerOrder.Size()
-	}
-	if m.TakerOrderStatus != nil {
-		l = m.TakerOrderStatus.Size()
-		n += 1 + l + sovQuery(uint64(l))
-	}
-	return n
-}
-
-func (m *StreamTakerOrder_Order) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Order != nil {
-		l = m.Order.Size()
-		n += 1 + l + sovQuery(uint64(l))
-	}
-	return n
-}
-func (m *StreamTakerOrder_LiquidationOrder) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.LiquidationOrder != nil {
-		l = m.LiquidationOrder.Size()
-		n += 1 + l + sovQuery(uint64(l))
-	}
-	return n
-}
-func (m *StreamTakerOrderStatus) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.OrderStatus != 0 {
-		n += 1 + sovQuery(uint64(m.OrderStatus))
-	}
-	if m.RemainingQuantums != 0 {
-		n += 1 + sovQuery(uint64(m.RemainingQuantums))
-	}
-	if m.OptimisticallyFilledQuantums != 0 {
-		n += 1 + sovQuery(uint64(m.OptimisticallyFilledQuantums))
-	}
-	return n
-}
-
 func sovQuery(x uint64) (n int) {
 	return (math_bits.Len64(x|1) + 6) / 7
 }
@@ -4578,40 +4082,6 @@ func (m *StreamOrderbookUpdatesRequest) Unmarshal(dAtA []byte) error {
 			} else {
 				return fmt.Errorf("proto: wrong wireType = %d for field ClobPairId", wireType)
 			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SubaccountIds", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.SubaccountIds = append(m.SubaccountIds, &types.SubaccountId{})
-			if err := m.SubaccountIds[len(m.SubaccountIds)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipQuery(dAtA[iNdEx:])
@@ -4817,76 +4287,6 @@ func (m *StreamUpdate) Unmarshal(dAtA []byte) error {
 			m.UpdateMessage = &StreamUpdate_OrderFill{v}
 			iNdEx = postIndex
 		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TakerOrder", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &StreamTakerOrder{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.UpdateMessage = &StreamUpdate_TakerOrder{v}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SubaccountUpdate", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &types.StreamSubaccountUpdate{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.UpdateMessage = &StreamUpdate_SubaccountUpdate{v}
-			iNdEx = postIndex
-		case 5:
 			if wireType != 0 {
 				return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType)
 			}
@@ -4905,7 +4305,7 @@ func (m *StreamUpdate) Unmarshal(dAtA []byte) error {
 					break
 				}
 			}
-		case 6:
+		case 4:
 			if wireType != 0 {
 				return fmt.Errorf("proto: wrong wireType = %d for field ExecMode", wireType)
 			}
@@ -5003,7 +4403,7 @@ func (m *StreamOrderbookUpdate) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Updates = append(m.Updates, types1.OffChainUpdateV1{})
+			m.Updates = append(m.Updates, types.OffChainUpdateV1{})
 			if err := m.Updates[len(m.Updates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -5245,269 +4645,6 @@ func (m *StreamOrderbookFill) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *StreamTakerOrder) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowQuery
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StreamTakerOrder: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StreamTakerOrder: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &Order{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.TakerOrder = &StreamTakerOrder_Order{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LiquidationOrder", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &StreamLiquidationOrder{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.TakerOrder = &StreamTakerOrder_LiquidationOrder{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TakerOrderStatus", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthQuery
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.TakerOrderStatus == nil {
-				m.TakerOrderStatus = &StreamTakerOrderStatus{}
-			}
-			if err := m.TakerOrderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipQuery(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *StreamTakerOrderStatus) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowQuery
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StreamTakerOrderStatus: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StreamTakerOrderStatus: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field OrderStatus", wireType)
-			}
-			m.OrderStatus = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.OrderStatus |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RemainingQuantums", wireType)
-			}
-			m.RemainingQuantums = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RemainingQuantums |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field OptimisticallyFilledQuantums", wireType)
-			}
-			m.OptimisticallyFilledQuantums = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowQuery
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.OptimisticallyFilledQuantums |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipQuery(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthQuery
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
 func skipQuery(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
diff --git a/protocol/x/subaccounts/keeper/isolated_subaccount.go b/protocol/x/subaccounts/keeper/isolated_subaccount.go
index 4db1a60a42..9d4f4cebb5 100644
--- a/protocol/x/subaccounts/keeper/isolated_subaccount.go
+++ b/protocol/x/subaccounts/keeper/isolated_subaccount.go
@@ -23,7 +23,7 @@ import (
 // caused a failure, if any.
 func (k Keeper) checkIsolatedSubaccountConstraints(
 	ctx sdk.Context,
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) (
 	success bool,
@@ -59,7 +59,7 @@ func (k Keeper) checkIsolatedSubaccountConstraints(
 //   - a subaccount with no positions cannot be updated to have positions in multiple isolated
 //     perpetuals or a combination of isolated and non-isolated perpetuals
 func isValidIsolatedPerpetualUpdates(
-	settledUpdate types.SettledUpdate,
+	settledUpdate SettledUpdate,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) (types.UpdateResult, error) {
 	// If there are no perpetual updates, then this update does not violate constraints for isolated
@@ -141,7 +141,7 @@ func isValidIsolatedPerpetualUpdates(
 // The input `settledUpdate` must have an updated subaccount (`settledUpdate.SettledSubaccount`),
 // so all the updates must have been applied already to the subaccount.
 func GetIsolatedPerpetualStateTransition(
-	settledUpdateWithUpdatedSubaccount types.SettledUpdate,
+	settledUpdateWithUpdatedSubaccount SettledUpdate,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) (*types.IsolatedPerpetualPositionStateTransition, error) {
 	// This subaccount needs to have had the updates in the `settledUpdate` already applied to it.
@@ -317,7 +317,7 @@ func (k *Keeper) transferCollateralForIsolatedPerpetual(
 // Note: This uses the `x/bank` keeper and modifies `x/bank` state.
 func (k *Keeper) computeAndExecuteCollateralTransfer(
 	ctx sdk.Context,
-	settledUpdateWithUpdatedSubaccount types.SettledUpdate,
+	settledUpdateWithUpdatedSubaccount SettledUpdate,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) error {
 	// The subaccount in `settledUpdateWithUpdatedSubaccount` already has the perpetual updates
diff --git a/protocol/x/subaccounts/keeper/isolated_subaccount_test.go b/protocol/x/subaccounts/keeper/isolated_subaccount_test.go
index b42046a52f..5b226cdd0b 100644
--- a/protocol/x/subaccounts/keeper/isolated_subaccount_test.go
+++ b/protocol/x/subaccounts/keeper/isolated_subaccount_test.go
@@ -16,7 +16,7 @@ import (
 func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 	tests := map[string]struct {
 		// parameters
-		settledUpdateWithUpdatedSubaccount types.SettledUpdate
+		settledUpdateWithUpdatedSubaccount keeper.SettledUpdate
 		perpetuals                         []perptypes.Perpetual
 
 		// expectation
@@ -24,7 +24,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 		expectedErr             error
 	}{
 		`If no perpetual updates, nil state transition is returned`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id:                 &constants.Alice_Num0,
 					PerpetualPositions: nil,
@@ -37,7 +37,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedStateTransition: nil,
 		},
 		`If single non-isolated perpetual updates, nil state transition is returned`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id:                 &constants.Alice_Num0,
 					PerpetualPositions: nil,
@@ -57,7 +57,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedStateTransition: nil,
 		},
 		`If multiple non-isolated perpetual updates, nil state transition is returned`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id:                 &constants.Alice_Num0,
 					PerpetualPositions: nil,
@@ -82,7 +82,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedStateTransition: nil,
 		},
 		`If multiple non-isolated perpetual positions, nil state transition is returned`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
@@ -106,7 +106,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedStateTransition: nil,
 		},
 		`If single isolated perpetual update, no perpetual position, state transition is returned for closed position`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id:                 &constants.Alice_Num0,
 					PerpetualPositions: nil,
@@ -139,7 +139,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 		},
 		`If single isolated perpetual update, existing perpetual position with same size, state transition is returned for
 		opened position`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
@@ -177,7 +177,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 		},
 		`If single isolated perpetual update, existing perpetual position with different size, nil state transition
 		returned`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
@@ -209,7 +209,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedStateTransition: nil,
 		},
 		`Returns error if perpetual position was opened with no asset updates`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
@@ -237,7 +237,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedErr:             types.ErrFailedToUpdateSubaccounts,
 		},
 		`Returns error if perpetual position was opened with multiple asset updates`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
@@ -278,7 +278,7 @@ func TestGetIsolatedPerpetualStateTransition(t *testing.T) {
 			expectedErr:             types.ErrFailedToUpdateSubaccounts,
 		},
 		`Returns error if perpetual position was opened with non-usdc asset update`: {
-			settledUpdateWithUpdatedSubaccount: types.SettledUpdate{
+			settledUpdateWithUpdatedSubaccount: keeper.SettledUpdate{
 				SettledSubaccount: types.Subaccount{
 					Id: &constants.Alice_Num0,
 					PerpetualPositions: []*types.PerpetualPosition{
diff --git a/protocol/x/subaccounts/keeper/keeper.go b/protocol/x/subaccounts/keeper/keeper.go
index 635747e869..aebf6f805e 100644
--- a/protocol/x/subaccounts/keeper/keeper.go
+++ b/protocol/x/subaccounts/keeper/keeper.go
@@ -2,7 +2,6 @@ package keeper
 
 import (
 	"fmt"
-	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/types"
 
 	"cosmossdk.io/log"
 	storetypes "cosmossdk.io/store/types"
@@ -21,7 +20,6 @@ type (
 		perpetualsKeeper    types.PerpetualsKeeper
 		blocktimeKeeper     types.BlocktimeKeeper
 		indexerEventManager indexer_manager.IndexerEventManager
-		streamingManager    streamingtypes.FullNodeStreamingManager
 	}
 )
 
@@ -33,7 +31,6 @@ func NewKeeper(
 	perpetualsKeeper types.PerpetualsKeeper,
 	blocktimeKeeper types.BlocktimeKeeper,
 	indexerEventManager indexer_manager.IndexerEventManager,
-	streamingManager streamingtypes.FullNodeStreamingManager,
 ) *Keeper {
 	return &Keeper{
 		cdc:                 cdc,
@@ -43,7 +40,6 @@ func NewKeeper(
 		perpetualsKeeper:    perpetualsKeeper,
 		blocktimeKeeper:     blocktimeKeeper,
 		indexerEventManager: indexerEventManager,
-		streamingManager:    streamingManager,
 	}
 }
 
diff --git a/protocol/x/subaccounts/keeper/negative_tnc_subaccount.go b/protocol/x/subaccounts/keeper/negative_tnc_subaccount.go
index ca710ef19d..069dcff2b7 100644
--- a/protocol/x/subaccounts/keeper/negative_tnc_subaccount.go
+++ b/protocol/x/subaccounts/keeper/negative_tnc_subaccount.go
@@ -123,7 +123,7 @@ func (k Keeper) getNegativeTncSubaccountStoreSuffix(
 // The slice will be de-duplicated and will contain unique store suffixes.
 func (k Keeper) getNegativeTncSubaccountStoresuffixes(
 	ctx sdk.Context,
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 ) (
 	suffixes []string,
 	err error,
@@ -152,7 +152,7 @@ func (k Keeper) getNegativeTncSubaccountStoresuffixes(
 // collateral was seen for subaccounts in a slice of settled updates.
 func (k Keeper) getLastBlockNegativeSubaccountSeen(
 	ctx sdk.Context,
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 ) (
 	lastBlockNegativeSubaccountSeen uint32,
 	negativeSubaccountExists bool,
diff --git a/protocol/x/subaccounts/lib/oimf.go b/protocol/x/subaccounts/keeper/oimf.go
similarity index 97%
rename from protocol/x/subaccounts/lib/oimf.go
rename to protocol/x/subaccounts/keeper/oimf.go
index 80068c9b5f..691dd941a0 100644
--- a/protocol/x/subaccounts/lib/oimf.go
+++ b/protocol/x/subaccounts/keeper/oimf.go
@@ -1,4 +1,4 @@
-package lib
+package keeper
 
 import (
 	"fmt"
@@ -10,7 +10,7 @@ import (
 
 // Helper function to compute the delta long for a single settled update on a perpetual.
 func getDeltaLongFromSettledUpdate(
-	u types.SettledUpdate,
+	u SettledUpdate,
 	updatedPerpId uint32,
 ) (
 	deltaLong *big.Int,
@@ -51,7 +51,7 @@ func getDeltaLongFromSettledUpdate(
 //
 // For other update types, returns nil.
 func GetDeltaOpenInterestFromUpdates(
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 	updateType types.UpdateType,
 ) (ret *perptypes.OpenInterestDelta) {
 	if updateType != types.Match {
diff --git a/protocol/x/subaccounts/lib/oimf_test.go b/protocol/x/subaccounts/keeper/oimf_test.go
similarity index 92%
rename from protocol/x/subaccounts/lib/oimf_test.go
rename to protocol/x/subaccounts/keeper/oimf_test.go
index 13d9c7acc7..46cb310769 100644
--- a/protocol/x/subaccounts/lib/oimf_test.go
+++ b/protocol/x/subaccounts/keeper/oimf_test.go
@@ -1,4 +1,4 @@
-package lib_test
+package keeper_test
 
 import (
 	"fmt"
@@ -7,7 +7,7 @@ import (
 
 	"github.com/dydxprotocol/v4-chain/protocol/dtypes"
 	perptypes "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/types"
-	salib "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/lib"
+	keeper "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/keeper"
 	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
 	"github.com/stretchr/testify/require"
 )
@@ -23,14 +23,14 @@ var (
 
 func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 	tests := map[string]struct {
-		settledUpdates []types.SettledUpdate
+		settledUpdates []keeper.SettledUpdate
 		updateType     types.UpdateType
 		expectedVal    *perptypes.OpenInterestDelta
 		panicErr       string
 	}{
 		"Invalid: 1 update": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{},
 					PerpetualUpdates: []types.PerpetualUpdate{
@@ -45,7 +45,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Invalid: one of the updates contains no perp update": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -67,7 +67,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Invalid: updates are on different perpetuals": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -95,7 +95,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Invalid: updates don't have opposite signs": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -123,7 +123,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Invalid: updates don't have equal absolute base quantums": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -151,7 +151,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Valid: 0 -> -500, 0 -> 500, delta = 500": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -182,7 +182,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Valid: 500 -> 0, 0 -> 500, delta = 0": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id:                 aliceSubaccountId,
@@ -217,7 +217,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Not Match update, return nil": {
 			updateType: types.CollatCheck,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id:                 aliceSubaccountId,
@@ -235,7 +235,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Valid: 500 -> 350, 0 -> 150, delta = 0": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -270,7 +270,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Valid: -100 -> 200, 250 -> -50, delta = -50": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -313,7 +313,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 		},
 		"Valid: -3100 -> -5000, 1000 -> 2900, delta = 1900": {
 			updateType: types.Match,
-			settledUpdates: []types.SettledUpdate{
+			settledUpdates: []keeper.SettledUpdate{
 				{
 					SettledSubaccount: types.Subaccount{
 						Id: aliceSubaccountId,
@@ -364,7 +364,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 						tc.panicErr,
 						tc.settledUpdates,
 					), func() {
-						salib.GetDeltaOpenInterestFromUpdates(
+						keeper.GetDeltaOpenInterestFromUpdates(
 							tc.settledUpdates,
 							tc.updateType,
 						)
@@ -373,7 +373,7 @@ func TestGetDeltaOpenInterestFromUpdates(t *testing.T) {
 				return
 			}
 
-			perpOpenInterestDelta := salib.GetDeltaOpenInterestFromUpdates(
+			perpOpenInterestDelta := keeper.GetDeltaOpenInterestFromUpdates(
 				tc.settledUpdates,
 				tc.updateType,
 			)
diff --git a/protocol/x/subaccounts/keeper/subaccount.go b/protocol/x/subaccounts/keeper/subaccount.go
index 7904cf41de..5a10c242a0 100644
--- a/protocol/x/subaccounts/keeper/subaccount.go
+++ b/protocol/x/subaccounts/keeper/subaccount.go
@@ -3,7 +3,6 @@ package keeper
 import (
 	"errors"
 	"fmt"
-	streamingtypes "github.com/dydxprotocol/v4-chain/protocol/streaming/types"
 	"math/big"
 	"math/rand"
 	"time"
@@ -25,7 +24,6 @@ import (
 	"github.com/dydxprotocol/v4-chain/protocol/lib/metrics"
 	perplib "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/lib"
 	perptypes "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/types"
-	salib "github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/lib"
 	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
 	gometrics "github.com/hashicorp/go-metrics"
 )
@@ -133,35 +131,6 @@ func (k Keeper) GetSubaccount(
 	return val
 }
 
-func (k Keeper) GetStreamSubaccountUpdate(
-	ctx sdk.Context,
-	id types.SubaccountId,
-	snapshot bool,
-) (val types.StreamSubaccountUpdate) {
-	subaccount := k.GetSubaccount(ctx, id)
-	assetPositions := make([]*types.SubaccountAssetPosition, len(subaccount.AssetPositions))
-	for i, ap := range subaccount.AssetPositions {
-		assetPositions[i] = &types.SubaccountAssetPosition{
-			AssetId:  ap.AssetId,
-			Quantums: ap.Quantums.BigInt().Uint64(),
-		}
-	}
-	perpetualPositions := make([]*types.SubaccountPerpetualPosition, len(subaccount.PerpetualPositions))
-	for i, pp := range subaccount.PerpetualPositions {
-		perpetualPositions[i] = &types.SubaccountPerpetualPosition{
-			PerpetualId: pp.PerpetualId,
-			Quantums:    pp.Quantums.BigInt().Uint64(),
-		}
-	}
-
-	return types.StreamSubaccountUpdate{
-		SubaccountId:              &id,
-		UpdatedAssetPositions:     assetPositions,
-		UpdatedPerpetualPositions: perpetualPositions,
-		Snapshot:                  snapshot,
-	}
-}
-
 // GetAllSubaccount returns all subaccount.
 // For more performant searching and iteration, use `ForEachSubaccount`.
 func (k Keeper) GetAllSubaccount(ctx sdk.Context) (list []types.Subaccount) {
@@ -245,12 +214,12 @@ func (k Keeper) getSettledUpdates(
 	perpInfos map[uint32]perptypes.PerpInfo,
 	requireUniqueSubaccount bool,
 ) (
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 	subaccountIdToFundingPayments map[types.SubaccountId]map[uint32]dtypes.SerializableInt,
 	err error,
 ) {
 	var idToSettledSubaccount = make(map[types.SubaccountId]types.Subaccount)
-	settledUpdates = make([]types.SettledUpdate, len(updates))
+	settledUpdates = make([]SettledUpdate, len(updates))
 	subaccountIdToFundingPayments = make(map[types.SubaccountId]map[uint32]dtypes.SerializableInt)
 
 	// Iterate over all updates and query the relevant `Subaccounts`.
@@ -266,7 +235,7 @@ func (k Keeper) getSettledUpdates(
 		// idToSettledSubaccount map.
 		if !exists {
 			subaccount := k.GetSubaccount(ctx, u.SubaccountId)
-			settledSubaccount, fundingPayments, err = salib.GetSettledSubaccountWithPerpetuals(subaccount, perpInfos)
+			settledSubaccount, fundingPayments, err = GetSettledSubaccountWithPerpetuals(subaccount, perpInfos)
 			if err != nil {
 				return nil, nil, err
 			}
@@ -275,7 +244,7 @@ func (k Keeper) getSettledUpdates(
 			subaccountIdToFundingPayments[u.SubaccountId] = fundingPayments
 		}
 
-		settledUpdate := types.SettledUpdate{
+		settledUpdate := SettledUpdate{
 			SettledSubaccount: settledSubaccount,
 			AssetUpdates:      u.AssetUpdates,
 			PerpetualUpdates:  u.PerpetualUpdates,
@@ -287,43 +256,6 @@ func (k Keeper) getSettledUpdates(
 	return settledUpdates, subaccountIdToFundingPayments, nil
 }
 
-func GenerateStreamSubaccountUpdate(
-	settledUpdate types.SettledUpdate,
-	fundingPayments map[uint32]dtypes.SerializableInt,
-) types.StreamSubaccountUpdate {
-	// Get updated perpetual positions
-	updatedPerpetualPositions := salib.GetUpdatedPerpetualPositions(
-		settledUpdate,
-		fundingPayments,
-	)
-	// Convert updated perpetual positions to SubaccountPerpetualPosition type
-	perpetualPositions := make([]*types.SubaccountPerpetualPosition, len(updatedPerpetualPositions))
-	for i, pp := range updatedPerpetualPositions {
-		perpetualPositions[i] = &types.SubaccountPerpetualPosition{
-			PerpetualId: pp.PerpetualId,
-			Quantums:    pp.Quantums.BigInt().Uint64(),
-		}
-	}
-
-	updatedAssetPositions := salib.GetUpdatedAssetPositions(settledUpdate)
-
-	// Convert updated asset positions to SubaccountAssetPosition type
-	assetPositions := make([]*types.SubaccountAssetPosition, len(updatedAssetPositions))
-	for i, ap := range updatedAssetPositions {
-		assetPositions[i] = &types.SubaccountAssetPosition{
-			AssetId:  ap.AssetId,
-			Quantums: ap.Quantums.BigInt().Uint64(),
-		}
-	}
-
-	return types.StreamSubaccountUpdate{
-		SubaccountId:              settledUpdate.SettledSubaccount.Id,
-		UpdatedAssetPositions:     assetPositions,
-		UpdatedPerpetualPositions: perpetualPositions,
-		Snapshot:                  false,
-	}
-}
-
 // UpdateSubaccounts validates and applies all `updates` to the relevant subaccounts as long as this is a
 // valid state-transition for all subaccounts involved. All `updates` are made atomically, meaning that
 // all state-changes will either succeed or all will fail.
@@ -376,7 +308,7 @@ func (k Keeper) UpdateSubaccounts(
 	}
 
 	// Get OpenInterestDelta from the updates, and persist the OI change if any.
-	perpOpenInterestDelta := salib.GetDeltaOpenInterestFromUpdates(settledUpdates, updateType)
+	perpOpenInterestDelta := GetDeltaOpenInterestFromUpdates(settledUpdates, updateType)
 	if perpOpenInterestDelta != nil {
 		if err := k.perpetualsKeeper.ModifyOpenInterest(
 			ctx,
@@ -395,13 +327,13 @@ func (k Keeper) UpdateSubaccounts(
 	}
 
 	// Apply the updates to perpetual positions.
-	salib.UpdatePerpetualPositions(
+	UpdatePerpetualPositions(
 		settledUpdates,
 		perpInfos,
 	)
 
 	// Apply the updates to asset positions.
-	salib.UpdateAssetPositions(settledUpdates)
+	UpdateAssetPositions(settledUpdates)
 
 	// Transfer collateral between collateral pools for any isolated perpetual positions that changed
 	// state due to an update.
@@ -431,29 +363,16 @@ func (k Keeper) UpdateSubaccounts(
 			indexer_manager.GetBytes(
 				indexerevents.NewSubaccountUpdateEvent(
 					u.SettledSubaccount.Id,
-					salib.GetUpdatedPerpetualPositions(
+					getUpdatedPerpetualPositions(
 						u,
 						fundingPayments,
 					),
-					salib.GetUpdatedAssetPositions(u),
+					getUpdatedAssetPositions(u),
 					fundingPayments,
 				),
 			),
 		)
 
-		// if GRPC streaming is on, emit a generated subaccount update to stream.
-		if streamingManager := k.GetFullNodeStreamingManager(); streamingManager.Enabled() {
-			if k.GetFullNodeStreamingManager().TracksSubaccountId(*u.SettledSubaccount.Id) {
-				subaccountUpdate := GenerateStreamSubaccountUpdate(u, fundingPayments)
-				k.SendSubaccountUpdates(
-					ctx,
-					[]types.StreamSubaccountUpdate{
-						subaccountUpdate,
-					},
-				)
-			}
-		}
-
 		// Emit an event indicating a funding payment was paid / received for each settled funding
 		// payment. Note that `fundingPaid` is positive if the subaccount paid funding,
 		// and negative if the subaccount received funding.
@@ -516,6 +435,79 @@ func (k Keeper) CanUpdateSubaccounts(
 	return success, successPerUpdate, err
 }
 
+// GetSettledSubaccountWithPerpetuals returns 1. a new settled subaccount given an unsettled subaccount,
+// updating the USDC AssetPosition, FundingIndex, and LastFundingPayment fields accordingly
+// (does not persist any changes) and 2. a map with perpetual ID as key and last funding
+// payment as value (for emitting funding payments to indexer).
+//
+// Note that this is a stateless utility function.
+func GetSettledSubaccountWithPerpetuals(
+	subaccount types.Subaccount,
+	perpInfos map[uint32]perptypes.PerpInfo,
+) (
+	settledSubaccount types.Subaccount,
+	fundingPayments map[uint32]dtypes.SerializableInt,
+	err error,
+) {
+	totalNetSettlementPpm := big.NewInt(0)
+
+	newPerpetualPositions := []*types.PerpetualPosition{}
+	fundingPayments = make(map[uint32]dtypes.SerializableInt)
+
+	// Iterate through and settle all perpetual positions.
+	for _, p := range subaccount.PerpetualPositions {
+		perpInfo, found := perpInfos[p.PerpetualId]
+		if !found {
+			return types.Subaccount{}, nil, errorsmod.Wrapf(types.ErrPerpetualInfoDoesNotExist, "%d", p.PerpetualId)
+		}
+
+		// Call the stateless utility function to get the net settlement and new funding index.
+		bigNetSettlementPpm, newFundingIndex := perplib.GetSettlementPpmWithPerpetual(
+			perpInfo.Perpetual,
+			p.GetBigQuantums(),
+			p.FundingIndex.BigInt(),
+		)
+		// Record non-zero funding payment (to be later emitted in SubaccountUpdateEvent to indexer).
+		// Note: Funding payment is the negative of settlement, i.e. positive settlement is equivalent
+		// to a negative funding payment (position received funding payment) and vice versa.
+		if bigNetSettlementPpm.Cmp(lib.BigInt0()) != 0 {
+			fundingPayments[p.PerpetualId] = dtypes.NewIntFromBigInt(
+				new(big.Int).Neg(
+					new(big.Int).Div(bigNetSettlementPpm, lib.BigIntOneMillion()),
+				),
+			)
+		}
+
+		// Aggregate all net settlements.
+		totalNetSettlementPpm.Add(totalNetSettlementPpm, bigNetSettlementPpm)
+
+		// Update cached funding index of the perpetual position.
+		newPerpetualPositions = append(
+			newPerpetualPositions, &types.PerpetualPosition{
+				PerpetualId:  p.PerpetualId,
+				Quantums:     p.Quantums,
+				FundingIndex: dtypes.NewIntFromBigInt(newFundingIndex),
+			},
+		)
+	}
+
+	newSubaccount := types.Subaccount{
+		Id:                 subaccount.Id,
+		AssetPositions:     subaccount.AssetPositions,
+		PerpetualPositions: newPerpetualPositions,
+		MarginEnabled:      subaccount.MarginEnabled,
+	}
+	newUsdcPosition := new(big.Int).Add(
+		subaccount.GetUsdcPosition(),
+		// `Div` implements Euclidean division (unlike Go). When the diviser is positive,
+		// division result always rounds towards negative infinity.
+		totalNetSettlementPpm.Div(totalNetSettlementPpm, lib.BigIntOneMillion()),
+	)
+	// TODO(CLOB-993): Remove this function and use `UpdateAssetPositions` instead.
+	newSubaccount.SetUsdcAssetPosition(newUsdcPosition)
+	return newSubaccount, fundingPayments, nil
+}
+
 func checkPositionUpdatable(
 	ctx sdk.Context,
 	pk types.ProductKeeper,
@@ -556,7 +548,7 @@ func checkPositionUpdatable(
 // caused a failure, if any.
 func (k Keeper) internalCanUpdateSubaccounts(
 	ctx sdk.Context,
-	settledUpdates []types.SettledUpdate,
+	settledUpdates []SettledUpdate,
 	updateType types.UpdateType,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) (
@@ -646,7 +638,7 @@ func (k Keeper) internalCanUpdateSubaccounts(
 	// Get delta open interest from the updates.
 	// `perpOpenInterestDelta` is nil if the update type is not `Match` or if the updates
 	// do not result in OI changes.
-	perpOpenInterestDelta := salib.GetDeltaOpenInterestFromUpdates(settledUpdates, updateType)
+	perpOpenInterestDelta := GetDeltaOpenInterestFromUpdates(settledUpdates, updateType)
 
 	// Temporily apply open interest delta to perpetuals, so IMF is calculated based on open interest after the update.
 	// `perpOpenInterestDeltas` is only present for `Match` update type.
@@ -714,7 +706,7 @@ func (k Keeper) internalCanUpdateSubaccounts(
 		// We must now check if the state transition is valid.
 		if bigNewInitialMargin.Cmp(bigNewNetCollateral) > 0 {
 			// Get the current collateralization and margin requirements without the update applied.
-			emptyUpdate := types.SettledUpdate{
+			emptyUpdate := SettledUpdate{
 				SettledSubaccount: u.SettledSubaccount,
 			}
 
@@ -740,7 +732,7 @@ func (k Keeper) internalCanUpdateSubaccounts(
 			}
 
 			// Determine whether the state transition is valid.
-			result = salib.IsValidStateTransitionForUndercollateralizedSubaccount(
+			result = IsValidStateTransitionForUndercollateralizedSubaccount(
 				bigCurNetCollateral[saKey],
 				bigCurInitialMargin[saKey],
 				bigCurMaintenanceMargin[saKey],
@@ -760,6 +752,74 @@ func (k Keeper) internalCanUpdateSubaccounts(
 	return success, successPerUpdate, nil
 }
 
+// IsValidStateTransitionForUndercollateralizedSubaccount returns an `UpdateResult`
+// denoting whether this state transition is valid. This function accepts the collateral and
+// margin requirements of a subaccount before and after an update ("cur" and
+// "new", respectively).
+//
+// This function should only be called if the account is undercollateralized after the update.
+//
+// A state transition is valid if the subaccount enters a
+// "less-or-equally-risky" state after an update.
+// i.e.`newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`.
+//
+// Otherwise, the state transition is invalid. If the account was previously undercollateralized,
+// `types.StillUndercollateralized` is returned. If the account was previously
+// collateralized and is now undercollateralized, `types.NewlyUndercollateralized` is
+// returned.
+//
+// Note that the inequality `newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`
+// has divide-by-zero issue when margin requirements are zero. To make sure the state
+// transition is valid, we special case this scenario and only allow state transition that improves net collateral.
+func IsValidStateTransitionForUndercollateralizedSubaccount(
+	bigCurNetCollateral *big.Int,
+	bigCurInitialMargin *big.Int,
+	bigCurMaintenanceMargin *big.Int,
+	bigNewNetCollateral *big.Int,
+	bigNewMaintenanceMargin *big.Int,
+) types.UpdateResult {
+	// Determine whether the subaccount was previously undercollateralized before the update.
+	var underCollateralizationResult = types.StillUndercollateralized
+	if bigCurInitialMargin.Cmp(bigCurNetCollateral) <= 0 {
+		underCollateralizationResult = types.NewlyUndercollateralized
+	}
+
+	// If the maintenance margin is increasing, then the subaccount is undercollateralized.
+	if bigNewMaintenanceMargin.Cmp(bigCurMaintenanceMargin) > 0 {
+		return underCollateralizationResult
+	}
+
+	// If the maintenance margin is zero, it means the subaccount must have no open positions, and negative net
+	// collateral. If the net collateral is not improving then this transition is not valid.
+	if bigNewMaintenanceMargin.BitLen() == 0 || bigCurMaintenanceMargin.BitLen() == 0 {
+		if bigNewMaintenanceMargin.BitLen() == 0 &&
+			bigCurMaintenanceMargin.BitLen() == 0 &&
+			bigNewNetCollateral.Cmp(bigCurNetCollateral) > 0 {
+			return types.Success
+		}
+
+		return underCollateralizationResult
+	}
+
+	// Note that here we are effectively checking that
+	// `newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`.
+	// However, to avoid rounding errors, we factor this as
+	// `newNetCollateral * curMaintenanceMargin >= curNetCollateral * newMaintenanceMargin`.
+	bigCurRisk := new(big.Int).Mul(bigNewNetCollateral, bigCurMaintenanceMargin)
+	bigNewRisk := new(big.Int).Mul(bigCurNetCollateral, bigNewMaintenanceMargin)
+
+	// The subaccount is not well-collateralized, and the state transition leaves the subaccount in a
+	// "more-risky" state (collateral relative to margin requirements is decreasing).
+	if bigNewRisk.Cmp(bigCurRisk) > 0 {
+		return underCollateralizationResult
+	}
+
+	// The subaccount is in a "less-or-equally-risky" state (margin requirements are decreasing or unchanged,
+	// collateral relative to margin requirements is decreasing or unchanged).
+	// This subaccount is undercollateralized in this state, but we still consider this state transition valid.
+	return types.Success
+}
+
 // GetNetCollateralAndMarginRequirements returns the total net collateral, total initial margin requirement,
 // and total maintenance margin requirement for the subaccount as if the `update` was applied.
 // It is used to get information about speculative changes to the subaccount.
@@ -785,12 +845,12 @@ func (k Keeper) GetNetCollateralAndMarginRequirements(
 	if err != nil {
 		return nil, nil, nil, err
 	}
-	settledSubaccount, _, err := salib.GetSettledSubaccountWithPerpetuals(subaccount, perpInfos)
+	settledSubaccount, _, err := GetSettledSubaccountWithPerpetuals(subaccount, perpInfos)
 	if err != nil {
 		return nil, nil, nil, err
 	}
 
-	settledUpdate := types.SettledUpdate{
+	settledUpdate := SettledUpdate{
 		SettledSubaccount: settledSubaccount,
 		AssetUpdates:      update.AssetUpdates,
 		PerpetualUpdates:  update.PerpetualUpdates,
@@ -816,7 +876,7 @@ func (k Keeper) GetNetCollateralAndMarginRequirements(
 // If two position updates reference the same position, an error is returned.
 func (k Keeper) internalGetNetCollateralAndMarginRequirements(
 	ctx sdk.Context,
-	settledUpdate types.SettledUpdate,
+	settledUpdate SettledUpdate,
 	perpInfos map[uint32]perptypes.PerpInfo,
 ) (
 	bigNetCollateral *big.Int,
@@ -837,7 +897,7 @@ func (k Keeper) internalGetNetCollateralAndMarginRequirements(
 	bigMaintenanceMargin = big.NewInt(0)
 
 	// Merge updates and assets.
-	assetSizes, err := salib.ApplyUpdatesToPositions(
+	assetSizes, err := applyUpdatesToPositions(
 		settledUpdate.SettledSubaccount.AssetPositions,
 		settledUpdate.AssetUpdates,
 	)
@@ -846,7 +906,7 @@ func (k Keeper) internalGetNetCollateralAndMarginRequirements(
 	}
 
 	// Merge updates and perpetuals.
-	perpetualSizes, err := salib.ApplyUpdatesToPositions(
+	perpetualSizes, err := applyUpdatesToPositions(
 		settledUpdate.SettledSubaccount.PerpetualPositions,
 		settledUpdate.PerpetualUpdates,
 	)
@@ -898,6 +958,65 @@ func (k Keeper) internalGetNetCollateralAndMarginRequirements(
 	return bigNetCollateral, bigInitialMargin, bigMaintenanceMargin, nil
 }
 
+// applyUpdatesToPositions merges a slice of `types.UpdatablePositions` and `types.PositionSize`
+// (i.e. concrete types *types.AssetPosition` and `types.AssetUpdate`) into a slice of `types.PositionSize`.
+// If a given `PositionSize` shares an ID with an `UpdatablePositionSize`, the update and position are merged
+// into a single `PositionSize`.
+//
+// An error is returned if two updates share the same position id.
+//
+// Note: There are probably performance implications here for allocating a new slice of PositionSize,
+// and for allocating new slices when converting the concrete types to interfaces. However, without doing
+// this there would be a lot of duplicate code for calculating changes for both `Assets` and `Perpetuals`.
+func applyUpdatesToPositions[
+	P types.PositionSize,
+	U types.PositionSize,
+](positions []P, updates []U) ([]types.PositionSize, error) {
+	var result []types.PositionSize = make([]types.PositionSize, 0, len(positions)+len(updates))
+
+	updateMap := make(map[uint32]types.PositionSize)
+	updateIndexMap := make(map[uint32]int)
+	for i, update := range updates {
+		// Check for non-unique updates (two updates to the same position).
+		id := update.GetId()
+		_, exists := updateMap[id]
+		if exists {
+			errMsg := fmt.Sprintf("Multiple updates exist for position %v", update.GetId())
+			return nil, errorsmod.Wrap(types.ErrNonUniqueUpdatesPosition, errMsg)
+		}
+
+		updateMap[id] = update
+		updateIndexMap[id] = i
+		result = append(result, update)
+	}
+
+	// Iterate over each position, if the position shares an ID with
+	// an update, then we "merge" the update and the position into a new `PositionUpdate`.
+	for _, pos := range positions {
+		id := pos.GetId()
+		update, exists := updateMap[id]
+		if !exists {
+			result = append(result, pos)
+		} else {
+			var newPos = types.NewPositionUpdate(id)
+
+			// Add the position size and update together to get the new size.
+			var bigNewPositionSize = new(big.Int).Add(
+				pos.GetBigQuantums(),
+				update.GetBigQuantums(),
+			)
+
+			newPos.SetBigQuantums(bigNewPositionSize)
+
+			// Replace update with `PositionUpdate`
+			index := updateIndexMap[id]
+			result[index] = newPos
+		}
+	}
+
+	return result, nil
+}
+
 // GetAllRelevantPerpetuals returns all relevant perpetual information for a given set of updates.
 // This includes all perpetuals that exist on the accounts already and all perpetuals that are
 // being updated in the input updates.
@@ -947,22 +1066,3 @@ func (k Keeper) GetAllRelevantPerpetuals(
 
 	return perpetuals, nil
 }
-
-func (k Keeper) GetFullNodeStreamingManager() streamingtypes.FullNodeStreamingManager {
-	return k.streamingManager
-}
-
-// SendSubaccountUpdates sends the subaccount updates to the gRPC streaming manager.
-func (k Keeper) SendSubaccountUpdates(
-	ctx sdk.Context,
-	subaccountUpdates []types.StreamSubaccountUpdate,
-) {
-	if len(subaccountUpdates) == 0 {
-		return
-	}
-	k.GetFullNodeStreamingManager().SendSubaccountUpdates(
-		subaccountUpdates,
-		lib.MustConvertIntegerToUint32(ctx.BlockHeight()),
-		ctx.ExecMode(),
-	)
-}
diff --git a/protocol/x/subaccounts/keeper/subaccount_helper.go b/protocol/x/subaccounts/keeper/subaccount_helper.go
new file mode 100644
index 0000000000..9fc97011d6
--- /dev/null
+++ b/protocol/x/subaccounts/keeper/subaccount_helper.go
@@ -0,0 +1,224 @@
+package keeper
+
+import (
+	"sort"
+
+	errorsmod "cosmossdk.io/errors"
+
+	"github.com/dydxprotocol/v4-chain/protocol/dtypes"
+	perptypes "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/types"
+	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
+)
+
+// getUpdatedAssetPositions filters out all the asset positions on a subaccount that have
+// been updated. This will include any asset postions that were closed due to an update.
+// TODO(DEC-1295): look into reducing code duplication here using Generics+Reflect.
+func getUpdatedAssetPositions(
+	update SettledUpdate,
+) []*types.AssetPosition {
+	assetIdToPositionMap := make(map[uint32]*types.AssetPosition)
+	for _, assetPosition := range update.SettledSubaccount.AssetPositions {
+		assetIdToPositionMap[assetPosition.AssetId] = assetPosition
+	}
+
+	updatedAssetIds := make(map[uint32]struct{})
+	for _, assetUpdate := range update.AssetUpdates {
+		updatedAssetIds[assetUpdate.AssetId] = struct{}{}
+	}
+
+	updatedAssetPositions := make([]*types.AssetPosition, 0, len(updatedAssetIds))
+	for updatedId := range updatedAssetIds {
+		assetPosition, exists := assetIdToPositionMap[updatedId]
+		// If a position does not exist on the subaccount with the asset id of an update, it must
+		// have been deleted due to quantums becoming 0. This needs to be included in the event, so we
+		// construct a position with the AssetId of the update and a Quantums value of 0. The other
+		// properties are left as the default values as a 0-sized position indicates the position is
+		// closed.
+		if !exists {
+			assetPosition = &types.AssetPosition{
+				AssetId:  updatedId,
+				Quantums: dtypes.ZeroInt(),
+			}
+		}
+		updatedAssetPositions = append(updatedAssetPositions, assetPosition)
+	}
+
+	// Sort the asset positions in ascending order by asset id.
+	sort.Slice(updatedAssetPositions, func(i, j int) bool {
+		return updatedAssetPositions[i].GetId() < updatedAssetPositions[j].GetId()
+	})
+
+	return updatedAssetPositions
+}
+
+// getUpdatedPerpetualPositions filters out all the perpetual positions on a subaccount that have
+// been updated. This will include any perpetual postions that were closed due to an update or that
+// received / paid out funding payments..
+func getUpdatedPerpetualPositions(
+	update SettledUpdate,
+	fundingPayments map[uint32]dtypes.SerializableInt,
+) []*types.PerpetualPosition {
+	perpetualIdToPositionMap := make(map[uint32]*types.PerpetualPosition)
+	for _, perpetualPosition := range update.SettledSubaccount.PerpetualPositions {
+		perpetualIdToPositionMap[perpetualPosition.PerpetualId] = perpetualPosition
+	}
+
+	// `updatedPerpetualIds` indicates which perpetuals were either explicitly updated
+	// (through update.PerpetualUpdates) or implicitly updated (had non-zero last funding
+	// payment).
+	updatedPerpetualIds := make(map[uint32]struct{})
+	for _, perpetualUpdate := range update.PerpetualUpdates {
+		updatedPerpetualIds[perpetualUpdate.PerpetualId] = struct{}{}
+	}
+	// Mark perpetuals with non-zero funding payment also as updated.
+	for perpetualIdWithNonZeroLastFunding := range fundingPayments {
+		updatedPerpetualIds[perpetualIdWithNonZeroLastFunding] = struct{}{}
+	}
+
+	updatedPerpetualPositions := make([]*types.PerpetualPosition, 0, len(updatedPerpetualIds))
+	for updatedId := range updatedPerpetualIds {
+		perpetualPosition, exists := perpetualIdToPositionMap[updatedId]
+		// If a position does not exist on the subaccount with the perpetual id of an update, it must
+		// have been deleted due to quantums becoming 0. This needs to be included in the event, so we
+		// construct a position with the PerpetualId of the update and a Quantums value of 0. The other
+		// properties are left as the default values as a 0-sized position indicates the position is
+		// closed and thus the funding index and the side of the position does not matter.
+		if !exists {
+			perpetualPosition = &types.PerpetualPosition{
+				PerpetualId: updatedId,
+				Quantums:    dtypes.ZeroInt(),
+			}
+		}
+		updatedPerpetualPositions = append(updatedPerpetualPositions, perpetualPosition)
+	}
+
+	// Sort the perpetual positions in ascending order by perpetual id.
+	sort.Slice(updatedPerpetualPositions, func(i, j int) bool {
+		return updatedPerpetualPositions[i].GetId() < updatedPerpetualPositions[j].GetId()
+	})
+
+	return updatedPerpetualPositions
+}
+
+// For each settledUpdate in settledUpdates, updates its SettledSubaccount.PerpetualPositions
+// to reflect settledUpdate.PerpetualUpdates.
+// For newly created positions, use `perpIdToFundingIndex` map to populate the `FundingIndex` field.
+func UpdatePerpetualPositions(
+	settledUpdates []SettledUpdate,
+	perpInfos map[uint32]perptypes.PerpInfo,
+) {
+	// Apply the updates.
+	for i, u := range settledUpdates {
+		// Build a map of all the Subaccount's Perpetual Positions by id.
+		perpetualPositionsMap := make(map[uint32]*types.PerpetualPosition)
+		for _, pp := range u.SettledSubaccount.PerpetualPositions {
+			perpetualPositionsMap[pp.PerpetualId] = pp
+		}
+
+		// Update the perpetual positions.
+		for _, pu := range u.PerpetualUpdates {
+			// Check if the `Subaccount` already has a position with the same id.
+			// If so – we update the size of the existing position, otherwise
+			// we create a new position.
+			if pp, exists := perpetualPositionsMap[pu.PerpetualId]; exists {
+				curQuantums := pp.GetBigQuantums()
+				updateQuantums := pu.GetBigQuantums()
+				newQuantums := curQuantums.Add(curQuantums, updateQuantums)
+
+				// Handle the case where the position is now closed.
+				if newQuantums.Sign() == 0 {
+					delete(perpetualPositionsMap, pu.PerpetualId)
+				}
+				pp.Quantums = dtypes.NewIntFromBigInt(newQuantums)
+			} else {
+				// This subaccount does not have a matching position for this update.
+				// Create the new position.
+				perpInfo, exists := perpInfos[pu.PerpetualId]
+				if !exists {
+					// Invariant: `perpInfos` should all relevant perpetuals, which includes all
+					// perpetuals that are updated.
+					panic(errorsmod.Wrapf(types.ErrPerpetualInfoDoesNotExist, "%d", pu.PerpetualId))
+				}
+				perpetualPosition := &types.PerpetualPosition{
+					PerpetualId:  pu.PerpetualId,
+					Quantums:     dtypes.NewIntFromBigInt(pu.GetBigQuantums()),
+					FundingIndex: perpInfo.Perpetual.FundingIndex,
+				}
+
+				// Add the new position to the map.
+				perpetualPositionsMap[pu.PerpetualId] = perpetualPosition
+			}
+		}
+
+		// Convert the new PerpetualPostiion values back into a slice.
+		perpetualPositions := make([]*types.PerpetualPosition, 0, len(perpetualPositionsMap))
+		for _, value := range perpetualPositionsMap {
+			perpetualPositions = append(perpetualPositions, value)
+		}
+
+		// Sort the new PerpetualPositions in ascending order by Id.
+		sort.Slice(perpetualPositions, func(i, j int) bool {
+			return perpetualPositions[i].GetId() < perpetualPositions[j].GetId()
+		})
+
+		settledUpdates[i].SettledSubaccount.PerpetualPositions = perpetualPositions
+	}
+}
+
+// For each settledUpdate in settledUpdates, updates its SettledSubaccount.AssetPositions
+// to reflect settledUpdate.AssetUpdates.
+func UpdateAssetPositions(
+	settledUpdates []SettledUpdate,
+) {
+	// Apply the updates.
+	for i, u := range settledUpdates {
+		// Build a map of all the Subaccount's Asset Positions by id.
+		assetPositionsMap := make(map[uint32]*types.AssetPosition)
+		for _, ap := range u.SettledSubaccount.AssetPositions {
+			assetPositionsMap[ap.AssetId] = ap
+		}
+
+		// Update the asset positions.
+		for _, au := range u.AssetUpdates {
+			// Check if the `Subaccount` already has a position with the same id.
+			// If so - we update the size of the existing position, otherwise
+			// we create a new position.
+			if ap, exists := assetPositionsMap[au.AssetId]; exists {
+				curQuantums := ap.GetBigQuantums()
+				updateQuantums := au.GetBigQuantums()
+				newQuantums := curQuantums.Add(curQuantums, updateQuantums)
+
+				ap.Quantums = dtypes.NewIntFromBigInt(newQuantums)
+
+				// Handle the case where the position is now closed.
+				if ap.Quantums.Sign() == 0 {
+					delete(assetPositionsMap, au.AssetId)
+				}
+			} else {
+				// This subaccount does not have a matching asset position for this update.
+
+				// Create the new asset position.
+				assetPosition := &types.AssetPosition{
+					AssetId:  au.AssetId,
+					Quantums: dtypes.NewIntFromBigInt(au.GetBigQuantums()),
+				}
+
+				// Add the new asset position to the map.
+				assetPositionsMap[au.AssetId] = assetPosition
+			}
+		}
+
+		// Convert the new AssetPostiion values back into a slice.
+		assetPositions := make([]*types.AssetPosition, 0, len(assetPositionsMap))
+		for _, value := range assetPositionsMap {
+			assetPositions = append(assetPositions, value)
+		}
+
+		// Sort the new AssetPositions in ascending order by AssetId.
+		sort.Slice(assetPositions, func(i, j int) bool {
+			return assetPositions[i].GetId() < assetPositions[j].GetId()
+		})
+
+		settledUpdates[i].SettledSubaccount.AssetPositions = assetPositions
+	}
+}
diff --git a/protocol/x/subaccounts/keeper/subaccount_test.go b/protocol/x/subaccounts/keeper/subaccount_test.go
index fe47a46e5d..747ff8cfb9 100644
--- a/protocol/x/subaccounts/keeper/subaccount_test.go
+++ b/protocol/x/subaccounts/keeper/subaccount_test.go
@@ -5849,3 +5849,91 @@ func TestGetNetCollateralAndMarginRequirements(t *testing.T) {
 		})
 	}
 }
+
+func TestIsValidStateTransitionForUndercollateralizedSubaccount_ZeroMarginRequirements(t *testing.T) {
+	tests := map[string]struct {
+		bigCurNetCollateral     *big.Int
+		bigCurInitialMargin     *big.Int
+		bigCurMaintenanceMargin *big.Int
+		bigNewNetCollateral     *big.Int
+		bigNewMaintenanceMargin *big.Int
+
+		expectedResult types.UpdateResult
+	}{
+		// Tests when current margin requirement is zero and margin requirement increases.
+		"fails when MMR increases and TNC decreases - negative TNC": {
+			bigCurNetCollateral:     big.NewInt(-1),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(-2),
+			bigNewMaintenanceMargin: big.NewInt(1),
+			expectedResult:          types.StillUndercollateralized,
+		},
+		"fails when MMR increases and TNC stays the same - negative TNC": {
+			bigCurNetCollateral:     big.NewInt(-1),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(-1),
+			bigNewMaintenanceMargin: big.NewInt(1),
+			expectedResult:          types.StillUndercollateralized,
+		},
+		"fails when MMR increases and TNC increases - negative TNC": {
+			bigCurNetCollateral:     big.NewInt(-1),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(100),
+			bigNewMaintenanceMargin: big.NewInt(1),
+			expectedResult:          types.StillUndercollateralized,
+		},
+		// Tests when both margin requirements are zero.
+		"fails when both new and old MMR are zero and TNC stays the same": {
+			bigCurNetCollateral:     big.NewInt(-1),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(-1),
+			bigNewMaintenanceMargin: big.NewInt(0),
+			expectedResult:          types.StillUndercollateralized,
+		},
+		"fails when both new and old MMR are zero and TNC decrease from negative to negative": {
+			bigCurNetCollateral:     big.NewInt(-1),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(-2),
+			bigNewMaintenanceMargin: big.NewInt(0),
+			expectedResult:          types.StillUndercollateralized,
+		},
+		"succeeds when both new and old MMR are zero and TNC increases": {
+			bigCurNetCollateral:     big.NewInt(-2),
+			bigCurInitialMargin:     big.NewInt(0),
+			bigCurMaintenanceMargin: big.NewInt(0),
+			bigNewNetCollateral:     big.NewInt(-1),
+			bigNewMaintenanceMargin: big.NewInt(0),
+			expectedResult:          types.Success,
+		},
+		// Tests when new margin requirement is zero.
+		"fails when MMR decreased to zero, and TNC increases but is still negative": {
+			bigCurNetCollateral:     big.NewInt(-2),
+			bigCurInitialMargin:     big.NewInt(1),
+			bigCurMaintenanceMargin: big.NewInt(1),
+			bigNewNetCollateral:     big.NewInt(-1),
+			bigNewMaintenanceMargin: big.NewInt(0),
+			expectedResult:          types.StillUndercollateralized,
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			require.Equal(
+				t,
+				tc.expectedResult,
+				keeper.IsValidStateTransitionForUndercollateralizedSubaccount(
+					tc.bigCurNetCollateral,
+					tc.bigCurInitialMargin,
+					tc.bigCurMaintenanceMargin,
+					tc.bigNewNetCollateral,
+					tc.bigNewMaintenanceMargin,
+				),
+			)
+		})
+	}
+}
diff --git a/protocol/x/subaccounts/types/settled_update.go b/protocol/x/subaccounts/keeper/update.go
similarity index 70%
rename from protocol/x/subaccounts/types/settled_update.go
rename to protocol/x/subaccounts/keeper/update.go
index 76811b6410..30e0ad58f7 100644
--- a/protocol/x/subaccounts/types/settled_update.go
+++ b/protocol/x/subaccounts/keeper/update.go
@@ -1,4 +1,8 @@
-package types
+package keeper
+
+import (
+	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
+)
 
 // SettledUpdate is used internally in the subaccounts keeper to
 // to specify changes to one or more `Subaccounts` (for example the
@@ -6,9 +10,9 @@ package types
 // The subaccount is always in its settled state.
 type SettledUpdate struct {
 	// The `Subaccount` for which this update applies to, in its settled form.
-	SettledSubaccount Subaccount
+	SettledSubaccount types.Subaccount
 	// A list of changes to make to any `AssetPositions` in the `Subaccount`.
-	AssetUpdates []AssetUpdate
+	AssetUpdates []types.AssetUpdate
 	// A list of changes to make to any `PerpetualPositions` in the `Subaccount`.
-	PerpetualUpdates []PerpetualUpdate
+	PerpetualUpdates []types.PerpetualUpdate
 }
diff --git a/protocol/x/subaccounts/lib/updates.go b/protocol/x/subaccounts/lib/updates.go
deleted file mode 100644
index 27956ba211..0000000000
--- a/protocol/x/subaccounts/lib/updates.go
+++ /dev/null
@@ -1,425 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"math/big"
-	"sort"
-
-	errorsmod "cosmossdk.io/errors"
-	"github.com/dydxprotocol/v4-chain/protocol/dtypes"
-	"github.com/dydxprotocol/v4-chain/protocol/lib"
-	perplib "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/lib"
-	perptypes "github.com/dydxprotocol/v4-chain/protocol/x/perpetuals/types"
-	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-)
-
-// GetSettledSubaccountWithPerpetuals returns 1. a new settled subaccount given an unsettled subaccount,
-// updating the USDC AssetPosition, FundingIndex, and LastFundingPayment fields accordingly
-// (does not persist any changes) and 2. a map with perpetual ID as key and last funding
-// payment as value (for emitting funding payments to indexer).
-func GetSettledSubaccountWithPerpetuals(
-	subaccount types.Subaccount,
-	perpInfos map[uint32]perptypes.PerpInfo,
-) (
-	settledSubaccount types.Subaccount,
-	fundingPayments map[uint32]dtypes.SerializableInt,
-	err error,
-) {
-	totalNetSettlementPpm := big.NewInt(0)
-
-	newPerpetualPositions := []*types.PerpetualPosition{}
-	fundingPayments = make(map[uint32]dtypes.SerializableInt)
-
-	// Iterate through and settle all perpetual positions.
-	for _, p := range subaccount.PerpetualPositions {
-		perpInfo, found := perpInfos[p.PerpetualId]
-		if !found {
-			return types.Subaccount{}, nil, errorsmod.Wrapf(types.ErrPerpetualInfoDoesNotExist, "%d", p.PerpetualId)
-		}
-
-		// Call the stateless utility function to get the net settlement and new funding index.
-		bigNetSettlementPpm, newFundingIndex := perplib.GetSettlementPpmWithPerpetual(
-			perpInfo.Perpetual,
-			p.GetBigQuantums(),
-			p.FundingIndex.BigInt(),
-		)
-		// Record non-zero funding payment (to be later emitted in SubaccountUpdateEvent to indexer).
-		// Note: Funding payment is the negative of settlement, i.e. positive settlement is equivalent
-		// to a negative funding payment (position received funding payment) and vice versa.
-		if bigNetSettlementPpm.Cmp(lib.BigInt0()) != 0 {
-			fundingPayments[p.PerpetualId] = dtypes.NewIntFromBigInt(
-				new(big.Int).Neg(
-					new(big.Int).Div(bigNetSettlementPpm, lib.BigIntOneMillion()),
-				),
-			)
-		}
-
-		// Aggregate all net settlements.
-		totalNetSettlementPpm.Add(totalNetSettlementPpm, bigNetSettlementPpm)
-
-		// Update cached funding index of the perpetual position.
-		newPerpetualPositions = append(
-			newPerpetualPositions, &types.PerpetualPosition{
-				PerpetualId:  p.PerpetualId,
-				Quantums:     p.Quantums,
-				FundingIndex: dtypes.NewIntFromBigInt(newFundingIndex),
-			},
-		)
-	}
-
-	newSubaccount := types.Subaccount{
-		Id:                 subaccount.Id,
-		AssetPositions:     subaccount.AssetPositions,
-		PerpetualPositions: newPerpetualPositions,
-		MarginEnabled:      subaccount.MarginEnabled,
-	}
-	newUsdcPosition := new(big.Int).Add(
-		subaccount.GetUsdcPosition(),
-		// `Div` implements Euclidean division (unlike Go). When the diviser is positive,
-		// division result always rounds towards negative infinity.
-		totalNetSettlementPpm.Div(totalNetSettlementPpm, lib.BigIntOneMillion()),
-	)
-	// TODO(CLOB-993): Remove this function and use `UpdateAssetPositions` instead.
-	newSubaccount.SetUsdcAssetPosition(newUsdcPosition)
-	return newSubaccount, fundingPayments, nil
-}
-
-// IsValidStateTransitionForUndercollateralizedSubaccount returns an `UpdateResult`
-// denoting whether this state transition is valid. This function accepts the collateral and
-// margin requirements of a subaccount before and after an update ("cur" and
-// "new", respectively).
-//
-// This function should only be called if the account is undercollateralized after the update.
-//
-// A state transition is valid if the subaccount enters a
-// "less-or-equally-risky" state after an update.
-// i.e.`newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`.
-//
-// Otherwise, the state transition is invalid. If the account was previously undercollateralized,
-// `types.StillUndercollateralized` is returned. If the account was previously
-// collateralized and is now undercollateralized, `types.NewlyUndercollateralized` is
-// returned.
-//
-// Note that the inequality `newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`
-// has divide-by-zero issue when margin requirements are zero. To make sure the state
-// transition is valid, we special case this scenario and only allow state transition that improves net collateral.
-func IsValidStateTransitionForUndercollateralizedSubaccount(
-	bigCurNetCollateral *big.Int,
-	bigCurInitialMargin *big.Int,
-	bigCurMaintenanceMargin *big.Int,
-	bigNewNetCollateral *big.Int,
-	bigNewMaintenanceMargin *big.Int,
-) types.UpdateResult {
-	// Determine whether the subaccount was previously undercollateralized before the update.
-	var underCollateralizationResult = types.StillUndercollateralized
-	if bigCurInitialMargin.Cmp(bigCurNetCollateral) <= 0 {
-		underCollateralizationResult = types.NewlyUndercollateralized
-	}
-
-	// If the maintenance margin is increasing, then the subaccount is undercollateralized.
-	if bigNewMaintenanceMargin.Cmp(bigCurMaintenanceMargin) > 0 {
-		return underCollateralizationResult
-	}
-
-	// If the maintenance margin is zero, it means the subaccount must have no open positions, and negative net
-	// collateral. If the net collateral is not improving then this transition is not valid.
-	if bigNewMaintenanceMargin.BitLen() == 0 || bigCurMaintenanceMargin.BitLen() == 0 {
-		if bigNewMaintenanceMargin.BitLen() == 0 &&
-			bigCurMaintenanceMargin.BitLen() == 0 &&
-			bigNewNetCollateral.Cmp(bigCurNetCollateral) > 0 {
-			return types.Success
-		}
-
-		return underCollateralizationResult
-	}
-
-	// Note that here we are effectively checking that
-	// `newNetCollateral / newMaintenanceMargin >= curNetCollateral / curMaintenanceMargin`.
-	// However, to avoid rounding errors, we factor this as
-	// `newNetCollateral * curMaintenanceMargin >= curNetCollateral * newMaintenanceMargin`.
-	bigCurRisk := new(big.Int).Mul(bigNewNetCollateral, bigCurMaintenanceMargin)
-	bigNewRisk := new(big.Int).Mul(bigCurNetCollateral, bigNewMaintenanceMargin)
-
-	// The subaccount is not well-collateralized, and the state transition leaves the subaccount in a
-	// "more-risky" state (collateral relative to margin requirements is decreasing).
-	if bigNewRisk.Cmp(bigCurRisk) > 0 {
-		return underCollateralizationResult
-	}
-
-	// The subaccount is in a "less-or-equally-risky" state (margin requirements are decreasing or unchanged,
-	// collateral relative to margin requirements is decreasing or unchanged).
-	// This subaccount is undercollateralized in this state, but we still consider this state transition valid.
-	return types.Success
-}
-
-// ApplyUpdatesToPositions merges a slice of `types.UpdatablePositions` and `types.PositionSize`
-// (i.e. concrete types *types.AssetPosition` and `types.AssetUpdate`) into a slice of `types.PositionSize`.
-// If a given `PositionSize` shares an ID with an `UpdatablePositionSize`, the update and position are merged
-// into a single `PositionSize`.
-//
-// An error is returned if two updates share the same position id.
-//
-// Note: There are probably performance implications here for allocating a new slice of PositionSize,
-// and for allocating new slices when converting the concrete types to interfaces. However, without doing
-// this there would be a lot of duplicate code for calculating changes for both `Assets` and `Perpetuals`.
-func ApplyUpdatesToPositions[
-	P types.PositionSize,
-	U types.PositionSize,
-](positions []P, updates []U) ([]types.PositionSize, error) {
-	var result []types.PositionSize = make([]types.PositionSize, 0, len(positions)+len(updates))
-
-	updateMap := make(map[uint32]types.PositionSize, len(updates))
-	updateIndexMap := make(map[uint32]int, len(updates))
-	for i, update := range updates {
-		// Check for non-unique updates (two updates to the same position).
-		id := update.GetId()
-		_, exists := updateMap[id]
-		if exists {
-			errMsg := fmt.Sprintf("Multiple updates exist for position %v", update.GetId())
-			return nil, errorsmod.Wrap(types.ErrNonUniqueUpdatesPosition, errMsg)
-		}
-
-		updateMap[id] = update
-		updateIndexMap[id] = i
-		result = append(result, update)
-	}
-
-	// Iterate over each position, if the position shares an ID with
-	// an update, then we "merge" the update and the position into a new `PositionUpdate`.
-	for _, pos := range positions {
-		id := pos.GetId()
-		update, exists := updateMap[id]
-		if !exists {
-			result = append(result, pos)
-		} else {
-			var newPos = types.NewPositionUpdate(id)
-
-			// Add the position size and update together to get the new size.
-			var bigNewPositionSize = new(big.Int).Add(
-				pos.GetBigQuantums(),
-				update.GetBigQuantums(),
-			)
-
-			newPos.SetBigQuantums(bigNewPositionSize)
-
-			// Replace update with `PositionUpdate`
-			index := updateIndexMap[id]
-			result[index] = newPos
-		}
-	}
-
-	return result, nil
-}
-
-// GetUpdatedAssetPositions filters out all the asset positions on a subaccount that have
-// been updated. This will include any asset postions that were closed due to an update.
-// TODO(DEC-1295): look into reducing code duplication here using Generics+Reflect.
-func GetUpdatedAssetPositions(
-	update types.SettledUpdate,
-) []*types.AssetPosition {
-	assetIdToPositionMap := make(map[uint32]*types.AssetPosition)
-	for _, assetPosition := range update.SettledSubaccount.AssetPositions {
-		assetIdToPositionMap[assetPosition.AssetId] = assetPosition
-	}
-
-	updatedAssetIds := make(map[uint32]struct{})
-	for _, assetUpdate := range update.AssetUpdates {
-		updatedAssetIds[assetUpdate.AssetId] = struct{}{}
-	}
-
-	updatedAssetPositions := make([]*types.AssetPosition, 0, len(updatedAssetIds))
-	for updatedId := range updatedAssetIds {
-		assetPosition, exists := assetIdToPositionMap[updatedId]
-		// If a position does not exist on the subaccount with the asset id of an update, it must
-		// have been deleted due to quantums becoming 0. This needs to be included in the event, so we
-		// construct a position with the AssetId of the update and a Quantums value of 0. The other
-		// properties are left as the default values as a 0-sized position indicates the position is
-		// closed.
-		if !exists {
-			assetPosition = &types.AssetPosition{
-				AssetId:  updatedId,
-				Quantums: dtypes.ZeroInt(),
-			}
-		}
-		updatedAssetPositions = append(updatedAssetPositions, assetPosition)
-	}
-
-	// Sort the asset positions in ascending order by asset id.
-	sort.Slice(updatedAssetPositions, func(i, j int) bool {
-		return updatedAssetPositions[i].GetId() < updatedAssetPositions[j].GetId()
-	})
-
-	return updatedAssetPositions
-}
-
-// GetUpdatedPerpetualPositions filters out all the perpetual positions on a subaccount that have
-// been updated. This will include any perpetual postions that were closed due to an update or that
-// received / paid out funding payments..
-func GetUpdatedPerpetualPositions(
-	update types.SettledUpdate,
-	fundingPayments map[uint32]dtypes.SerializableInt,
-) []*types.PerpetualPosition {
-	perpetualIdToPositionMap := make(map[uint32]*types.PerpetualPosition)
-	for _, perpetualPosition := range update.SettledSubaccount.PerpetualPositions {
-		perpetualIdToPositionMap[perpetualPosition.PerpetualId] = perpetualPosition
-	}
-
-	// `updatedPerpetualIds` indicates which perpetuals were either explicitly updated
-	// (through update.PerpetualUpdates) or implicitly updated (had non-zero last funding
-	// payment).
-	updatedPerpetualIds := make(map[uint32]struct{})
-	for _, perpetualUpdate := range update.PerpetualUpdates {
-		updatedPerpetualIds[perpetualUpdate.PerpetualId] = struct{}{}
-	}
-	// Mark perpetuals with non-zero funding payment also as updated.
-	for perpetualIdWithNonZeroLastFunding := range fundingPayments {
-		updatedPerpetualIds[perpetualIdWithNonZeroLastFunding] = struct{}{}
-	}
-
-	updatedPerpetualPositions := make([]*types.PerpetualPosition, 0, len(updatedPerpetualIds))
-	for updatedId := range updatedPerpetualIds {
-		perpetualPosition, exists := perpetualIdToPositionMap[updatedId]
-		// If a position does not exist on the subaccount with the perpetual id of an update, it must
-		// have been deleted due to quantums becoming 0. This needs to be included in the event, so we
-		// construct a position with the PerpetualId of the update and a Quantums value of 0. The other
-		// properties are left as the default values as a 0-sized position indicates the position is
-		// closed and thus the funding index and the side of the position does not matter.
-		if !exists {
-			perpetualPosition = &types.PerpetualPosition{
-				PerpetualId: updatedId,
-				Quantums:    dtypes.ZeroInt(),
-			}
-		}
-		updatedPerpetualPositions = append(updatedPerpetualPositions, perpetualPosition)
-	}
-
-	// Sort the perpetual positions in ascending order by perpetual id.
-	sort.Slice(updatedPerpetualPositions, func(i, j int) bool {
-		return updatedPerpetualPositions[i].GetId() < updatedPerpetualPositions[j].GetId()
-	})
-
-	return updatedPerpetualPositions
-}
-
-// For each settledUpdate in settledUpdates, updates its SettledSubaccount.PerpetualPositions
-// to reflect settledUpdate.PerpetualUpdates.
-// For newly created positions, use `perpIdToFundingIndex` map to populate the `FundingIndex` field.
-func UpdatePerpetualPositions(
-	settledUpdates []types.SettledUpdate,
-	perpInfos map[uint32]perptypes.PerpInfo,
-) {
-	// Apply the updates.
-	for i, u := range settledUpdates {
-		// Build a map of all the Subaccount's Perpetual Positions by id.
-		perpetualPositionsMap := make(map[uint32]*types.PerpetualPosition)
-		for _, pp := range u.SettledSubaccount.PerpetualPositions {
-			perpetualPositionsMap[pp.PerpetualId] = pp
-		}
-
-		// Update the perpetual positions.
-		for _, pu := range u.PerpetualUpdates {
-			// Check if the `Subaccount` already has a position with the same id.
-			// If so – we update the size of the existing position, otherwise
-			// we create a new position.
-			if pp, exists := perpetualPositionsMap[pu.PerpetualId]; exists {
-				curQuantums := pp.GetBigQuantums()
-				updateQuantums := pu.GetBigQuantums()
-				newQuantums := curQuantums.Add(curQuantums, updateQuantums)
-
-				// Handle the case where the position is now closed.
-				if newQuantums.Sign() == 0 {
-					delete(perpetualPositionsMap, pu.PerpetualId)
-				}
-				pp.Quantums = dtypes.NewIntFromBigInt(newQuantums)
-			} else {
-				// This subaccount does not have a matching position for this update.
-				// Create the new position.
-				perpInfo, exists := perpInfos[pu.PerpetualId]
-				if !exists {
-					// Invariant: `perpInfos` should all relevant perpetuals, which includes all
-					// perpetuals that are updated.
-					panic(errorsmod.Wrapf(types.ErrPerpetualInfoDoesNotExist, "%d", pu.PerpetualId))
-				}
-				perpetualPosition := &types.PerpetualPosition{
-					PerpetualId:  pu.PerpetualId,
-					Quantums:     dtypes.NewIntFromBigInt(pu.GetBigQuantums()),
-					FundingIndex: perpInfo.Perpetual.FundingIndex,
-				}
-
-				// Add the new position to the map.
-				perpetualPositionsMap[pu.PerpetualId] = perpetualPosition
-			}
-		}
-
-		// Convert the new PerpetualPostiion values back into a slice.
-		perpetualPositions := make([]*types.PerpetualPosition, 0, len(perpetualPositionsMap))
-		for _, value := range perpetualPositionsMap {
-			perpetualPositions = append(perpetualPositions, value)
-		}
-
-		// Sort the new PerpetualPositions in ascending order by Id.
-		sort.Slice(perpetualPositions, func(i, j int) bool {
-			return perpetualPositions[i].GetId() < perpetualPositions[j].GetId()
-		})
-
-		settledUpdates[i].SettledSubaccount.PerpetualPositions = perpetualPositions
-	}
-}
-
-// For each settledUpdate in settledUpdates, updates its SettledSubaccount.AssetPositions
-// to reflect settledUpdate.AssetUpdates.
-func UpdateAssetPositions(
-	settledUpdates []types.SettledUpdate,
-) {
-	// Apply the updates.
-	for i, u := range settledUpdates {
-		// Build a map of all the Subaccount's Asset Positions by id.
-		assetPositionsMap := make(map[uint32]*types.AssetPosition)
-		for _, ap := range u.SettledSubaccount.AssetPositions {
-			assetPositionsMap[ap.AssetId] = ap
-		}
-
-		// Update the asset positions.
-		for _, au := range u.AssetUpdates {
-			// Check if the `Subaccount` already has a position with the same id.
-			// If so - we update the size of the existing position, otherwise
-			// we create a new position.
-			if ap, exists := assetPositionsMap[au.AssetId]; exists {
-				curQuantums := ap.GetBigQuantums()
-				updateQuantums := au.GetBigQuantums()
-				newQuantums := curQuantums.Add(curQuantums, updateQuantums)
-
-				ap.Quantums = dtypes.NewIntFromBigInt(newQuantums)
-
-				// Handle the case where the position is now closed.
-				if ap.Quantums.Sign() == 0 {
-					delete(assetPositionsMap, au.AssetId)
-				}
-			} else {
-				// This subaccount does not have a matching asset position for this update.
-
-				// Create the new asset position.
-				assetPosition := &types.AssetPosition{
-					AssetId:  au.AssetId,
-					Quantums: dtypes.NewIntFromBigInt(au.GetBigQuantums()),
-				}
-
-				// Add the new asset position to the map.
-				assetPositionsMap[au.AssetId] = assetPosition
-			}
-		}
-
-		// Convert the new AssetPostiion values back into a slice.
-		assetPositions := make([]*types.AssetPosition, 0, len(assetPositionsMap))
-		for _, value := range assetPositionsMap {
-			assetPositions = append(assetPositions, value)
-		}
-
-		// Sort the new AssetPositions in ascending order by AssetId.
-		sort.Slice(assetPositions, func(i, j int) bool {
-			return assetPositions[i].GetId() < assetPositions[j].GetId()
-		})
-
-		settledUpdates[i].SettledSubaccount.AssetPositions = assetPositions
-	}
-}
diff --git a/protocol/x/subaccounts/lib/updates_test.go b/protocol/x/subaccounts/lib/updates_test.go
deleted file mode 100644
index 99e2d73ba7..0000000000
--- a/protocol/x/subaccounts/lib/updates_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package lib_test
-
-import (
-	"math/big"
-	"testing"
-
-	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/lib"
-	"github.com/dydxprotocol/v4-chain/protocol/x/subaccounts/types"
-	"github.com/stretchr/testify/require"
-)
-
-func TestIsValidStateTransitionForUndercollateralizedSubaccount_ZeroMarginRequirements(t *testing.T) {
-	tests := map[string]struct {
-		bigCurNetCollateral     *big.Int
-		bigCurInitialMargin     *big.Int
-		bigCurMaintenanceMargin *big.Int
-		bigNewNetCollateral     *big.Int
-		bigNewMaintenanceMargin *big.Int
-
-		expectedResult types.UpdateResult
-	}{
-		// Tests when current margin requirement is zero and margin requirement increases.
-		"fails when MMR increases and TNC decreases - negative TNC": {
-			bigCurNetCollateral:     big.NewInt(-1),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(-2),
-			bigNewMaintenanceMargin: big.NewInt(1),
-			expectedResult:          types.StillUndercollateralized,
-		},
-		"fails when MMR increases and TNC stays the same - negative TNC": {
-			bigCurNetCollateral:     big.NewInt(-1),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(-1),
-			bigNewMaintenanceMargin: big.NewInt(1),
-			expectedResult:          types.StillUndercollateralized,
-		},
-		"fails when MMR increases and TNC increases - negative TNC": {
-			bigCurNetCollateral:     big.NewInt(-1),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(100),
-			bigNewMaintenanceMargin: big.NewInt(1),
-			expectedResult:          types.StillUndercollateralized,
-		},
-		// Tests when both margin requirements are zero.
-		"fails when both new and old MMR are zero and TNC stays the same": {
-			bigCurNetCollateral:     big.NewInt(-1),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(-1),
-			bigNewMaintenanceMargin: big.NewInt(0),
-			expectedResult:          types.StillUndercollateralized,
-		},
-		"fails when both new and old MMR are zero and TNC decrease from negative to negative": {
-			bigCurNetCollateral:     big.NewInt(-1),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(-2),
-			bigNewMaintenanceMargin: big.NewInt(0),
-			expectedResult:          types.StillUndercollateralized,
-		},
-		"succeeds when both new and old MMR are zero and TNC increases": {
-			bigCurNetCollateral:     big.NewInt(-2),
-			bigCurInitialMargin:     big.NewInt(0),
-			bigCurMaintenanceMargin: big.NewInt(0),
-			bigNewNetCollateral:     big.NewInt(-1),
-			bigNewMaintenanceMargin: big.NewInt(0),
-			expectedResult:          types.Success,
-		},
-		// Tests when new margin requirement is zero.
-		"fails when MMR decreased to zero, and TNC increases but is still negative": {
-			bigCurNetCollateral:     big.NewInt(-2),
-			bigCurInitialMargin:     big.NewInt(1),
-			bigCurMaintenanceMargin: big.NewInt(1),
-			bigNewNetCollateral:     big.NewInt(-1),
-			bigNewMaintenanceMargin: big.NewInt(0),
-			expectedResult:          types.StillUndercollateralized,
-		},
-	}
-
-	for name, tc := range tests {
-		t.Run(name, func(t *testing.T) {
-			require.Equal(
-				t,
-				tc.expectedResult,
-				lib.IsValidStateTransitionForUndercollateralizedSubaccount(
-					tc.bigCurNetCollateral,
-					tc.bigCurInitialMargin,
-					tc.bigCurMaintenanceMargin,
-					tc.bigNewNetCollateral,
-					tc.bigNewMaintenanceMargin,
-				),
-			)
-		})
-	}
-}
diff --git a/protocol/x/subaccounts/types/streaming.pb.go b/protocol/x/subaccounts/types/streaming.pb.go
deleted file mode 100644
index 2babfbf862..0000000000
--- a/protocol/x/subaccounts/types/streaming.pb.go
+++ /dev/null
@@ -1,900 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: dydxprotocol/subaccounts/streaming.proto
-
-package types
-
-import (
-	fmt "fmt"
-	proto "github.com/cosmos/gogoproto/proto"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// StreamSubaccountUpdate provides information on a subaccount update. Used in
-// the full node GRPC stream.
-type StreamSubaccountUpdate struct {
-	SubaccountId *SubaccountId `protobuf:"bytes,1,opt,name=subaccount_id,json=subaccountId,proto3" json:"subaccount_id,omitempty"`
-	// updated_perpetual_positions will each be for unique perpetuals.
-	UpdatedPerpetualPositions []*SubaccountPerpetualPosition `protobuf:"bytes,2,rep,name=updated_perpetual_positions,json=updatedPerpetualPositions,proto3" json:"updated_perpetual_positions,omitempty"`
-	// updated_asset_positions will each be for unique assets.
-	UpdatedAssetPositions []*SubaccountAssetPosition `protobuf:"bytes,3,rep,name=updated_asset_positions,json=updatedAssetPositions,proto3" json:"updated_asset_positions,omitempty"`
-	// Snapshot indicates if the response is from a snapshot of the subaccount.
-	// All updates should be ignored until snapshot is received.
-	// If the snapshot is true, then all previous entries should be
-	// discarded and the subaccount should be resynced.
-	// For a snapshot subaccount update, the `updated_perpetual_positions` and
-	// `updated_asset_positions` fields will contain the full state of the
-	// subaccount.
-	Snapshot bool `protobuf:"varint,4,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
-}
-
-func (m *StreamSubaccountUpdate) Reset()         { *m = StreamSubaccountUpdate{} }
-func (m *StreamSubaccountUpdate) String() string { return proto.CompactTextString(m) }
-func (*StreamSubaccountUpdate) ProtoMessage()    {}
-func (*StreamSubaccountUpdate) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e6cf3092946c3c13, []int{0}
-}
-func (m *StreamSubaccountUpdate) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StreamSubaccountUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StreamSubaccountUpdate.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StreamSubaccountUpdate) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StreamSubaccountUpdate.Merge(m, src)
-}
-func (m *StreamSubaccountUpdate) XXX_Size() int {
-	return m.Size()
-}
-func (m *StreamSubaccountUpdate) XXX_DiscardUnknown() {
-	xxx_messageInfo_StreamSubaccountUpdate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamSubaccountUpdate proto.InternalMessageInfo
-
-func (m *StreamSubaccountUpdate) GetSubaccountId() *SubaccountId {
-	if m != nil {
-		return m.SubaccountId
-	}
-	return nil
-}
-
-func (m *StreamSubaccountUpdate) GetUpdatedPerpetualPositions() []*SubaccountPerpetualPosition {
-	if m != nil {
-		return m.UpdatedPerpetualPositions
-	}
-	return nil
-}
-
-func (m *StreamSubaccountUpdate) GetUpdatedAssetPositions() []*SubaccountAssetPosition {
-	if m != nil {
-		return m.UpdatedAssetPositions
-	}
-	return nil
-}
-
-func (m *StreamSubaccountUpdate) GetSnapshot() bool {
-	if m != nil {
-		return m.Snapshot
-	}
-	return false
-}
-
-// SubaccountPerpetualPosition provides information on a subaccount's updated
-// perpetual positions.
-type SubaccountPerpetualPosition struct {
-	// The `Id` of the `Perpetual`.
-	PerpetualId uint32 `protobuf:"varint,1,opt,name=perpetual_id,json=perpetualId,proto3" json:"perpetual_id,omitempty"`
-	// The size of the position in base quantums.
-	Quantums uint64 `protobuf:"varint,2,opt,name=quantums,proto3" json:"quantums,omitempty"`
-}
-
-func (m *SubaccountPerpetualPosition) Reset()         { *m = SubaccountPerpetualPosition{} }
-func (m *SubaccountPerpetualPosition) String() string { return proto.CompactTextString(m) }
-func (*SubaccountPerpetualPosition) ProtoMessage()    {}
-func (*SubaccountPerpetualPosition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e6cf3092946c3c13, []int{1}
-}
-func (m *SubaccountPerpetualPosition) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SubaccountPerpetualPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SubaccountPerpetualPosition.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SubaccountPerpetualPosition) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SubaccountPerpetualPosition.Merge(m, src)
-}
-func (m *SubaccountPerpetualPosition) XXX_Size() int {
-	return m.Size()
-}
-func (m *SubaccountPerpetualPosition) XXX_DiscardUnknown() {
-	xxx_messageInfo_SubaccountPerpetualPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SubaccountPerpetualPosition proto.InternalMessageInfo
-
-func (m *SubaccountPerpetualPosition) GetPerpetualId() uint32 {
-	if m != nil {
-		return m.PerpetualId
-	}
-	return 0
-}
-
-func (m *SubaccountPerpetualPosition) GetQuantums() uint64 {
-	if m != nil {
-		return m.Quantums
-	}
-	return 0
-}
-
-// SubaccountAssetPosition provides information on a subaccount's updated asset
-// positions.
-type SubaccountAssetPosition struct {
-	// The `Id` of the `Asset`.
-	AssetId uint32 `protobuf:"varint,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"`
-	// The absolute size of the position in base quantums.
-	Quantums uint64 `protobuf:"varint,2,opt,name=quantums,proto3" json:"quantums,omitempty"`
-}
-
-func (m *SubaccountAssetPosition) Reset()         { *m = SubaccountAssetPosition{} }
-func (m *SubaccountAssetPosition) String() string { return proto.CompactTextString(m) }
-func (*SubaccountAssetPosition) ProtoMessage()    {}
-func (*SubaccountAssetPosition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e6cf3092946c3c13, []int{2}
-}
-func (m *SubaccountAssetPosition) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SubaccountAssetPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SubaccountAssetPosition.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SubaccountAssetPosition) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SubaccountAssetPosition.Merge(m, src)
-}
-func (m *SubaccountAssetPosition) XXX_Size() int {
-	return m.Size()
-}
-func (m *SubaccountAssetPosition) XXX_DiscardUnknown() {
-	xxx_messageInfo_SubaccountAssetPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SubaccountAssetPosition proto.InternalMessageInfo
-
-func (m *SubaccountAssetPosition) GetAssetId() uint32 {
-	if m != nil {
-		return m.AssetId
-	}
-	return 0
-}
-
-func (m *SubaccountAssetPosition) GetQuantums() uint64 {
-	if m != nil {
-		return m.Quantums
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterType((*StreamSubaccountUpdate)(nil), "dydxprotocol.subaccounts.StreamSubaccountUpdate")
-	proto.RegisterType((*SubaccountPerpetualPosition)(nil), "dydxprotocol.subaccounts.SubaccountPerpetualPosition")
-	proto.RegisterType((*SubaccountAssetPosition)(nil), "dydxprotocol.subaccounts.SubaccountAssetPosition")
-}
-
-func init() {
-	proto.RegisterFile("dydxprotocol/subaccounts/streaming.proto", fileDescriptor_e6cf3092946c3c13)
-}
-
-var fileDescriptor_e6cf3092946c3c13 = []byte{
-	// 353 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6a, 0xea, 0x40,
-	0x14, 0x86, 0x1d, 0x95, 0x7b, 0x65, 0xd4, 0xcd, 0xc0, 0xbd, 0x46, 0x85, 0x60, 0x5d, 0x94, 0x74,
-	0xd1, 0x84, 0xda, 0x76, 0xd9, 0x45, 0xbb, 0x93, 0x6e, 0x24, 0x52, 0x0a, 0xa5, 0x20, 0x63, 0x26,
-	0xe8, 0x80, 0x66, 0xa6, 0x9e, 0x99, 0xa2, 0x6f, 0xd1, 0xc7, 0xea, 0xd2, 0x65, 0x97, 0xc5, 0xbc,
-	0x48, 0x31, 0xc6, 0x31, 0x52, 0x14, 0x77, 0xf9, 0xcf, 0x7f, 0xce, 0xf7, 0x67, 0x0e, 0x07, 0x3b,
-	0x6c, 0xc1, 0xe6, 0x72, 0x26, 0x94, 0x08, 0xc4, 0xc4, 0x03, 0x3d, 0xa4, 0x41, 0x20, 0x74, 0xa4,
-	0xc0, 0x03, 0x35, 0x0b, 0xe9, 0x94, 0x47, 0x23, 0x37, 0xb1, 0x89, 0x95, 0xed, 0x74, 0x33, 0x9d,
-	0x8d, 0x8b, 0xc3, 0x0c, 0xf3, 0xbd, 0x81, 0xb4, 0xe3, 0x3c, 0xfe, 0xdf, 0x4f, 0xc0, 0x7d, 0x63,
-	0x3d, 0x49, 0x46, 0x55, 0x48, 0x1e, 0x71, 0x75, 0xd7, 0x3e, 0xe0, 0xcc, 0x42, 0x2d, 0xe4, 0x94,
-	0x3b, 0xe7, 0xee, 0xa1, 0x5c, 0x77, 0x87, 0xe8, 0x32, 0xbf, 0x02, 0x19, 0x45, 0x34, 0x6e, 0xea,
-	0x04, 0xcb, 0x06, 0x32, 0x9c, 0xc9, 0x50, 0x69, 0x3a, 0x19, 0x48, 0x01, 0x5c, 0x71, 0x11, 0x81,
-	0x95, 0x6f, 0x15, 0x9c, 0x72, 0xe7, 0xf6, 0x14, 0x74, 0x6f, 0x3b, 0xde, 0x4b, 0xa7, 0xfd, 0x7a,
-	0x4a, 0xfe, 0xe5, 0x00, 0xe1, 0xb8, 0xb6, 0x8d, 0xa5, 0x00, 0xa1, 0xca, 0x44, 0x16, 0x92, 0xc8,
-	0xab, 0x53, 0x22, 0xef, 0xd7, 0xa3, 0x26, 0xee, 0x5f, 0x4a, 0xdc, 0xab, 0x02, 0x69, 0xe0, 0x12,
-	0x44, 0x54, 0xc2, 0x58, 0x28, 0xab, 0xd8, 0x42, 0x4e, 0xc9, 0x37, 0xba, 0xfd, 0x8a, 0x9b, 0x47,
-	0x1e, 0x40, 0xce, 0x70, 0x65, 0xb7, 0x94, 0x74, 0xd1, 0x55, 0xbf, 0x6c, 0x6a, 0x5d, 0xb6, 0xa6,
-	0xbf, 0x69, 0x1a, 0x29, 0x3d, 0x5d, 0x2f, 0x0b, 0x39, 0x45, 0xdf, 0xe8, 0x76, 0x0f, 0xd7, 0x0e,
-	0xfc, 0x2b, 0xa9, 0xe3, 0xd2, 0xe6, 0xdd, 0x86, 0xfa, 0x37, 0xd1, 0xc7, 0x89, 0x0f, 0xcf, 0x9f,
-	0x2b, 0x1b, 0x2d, 0x57, 0x36, 0xfa, 0x5e, 0xd9, 0xe8, 0x23, 0xb6, 0x73, 0xcb, 0xd8, 0xce, 0x7d,
-	0xc5, 0x76, 0xee, 0xe5, 0x6e, 0xc4, 0xd5, 0x58, 0x0f, 0xdd, 0x40, 0x4c, 0xbd, 0xbd, 0x2b, 0x7b,
-	0xbf, 0xb9, 0x0c, 0xc6, 0x94, 0x47, 0x9e, 0xa9, 0xcc, 0xf7, 0x2e, 0x4f, 0x2d, 0x64, 0x08, 0xc3,
-	0x3f, 0x89, 0x7b, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x34, 0x6c, 0x66, 0xe6, 0x02, 0x00,
-	0x00,
-}
-
-func (m *StreamSubaccountUpdate) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StreamSubaccountUpdate) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamSubaccountUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Snapshot {
-		i--
-		if m.Snapshot {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if len(m.UpdatedAssetPositions) > 0 {
-		for iNdEx := len(m.UpdatedAssetPositions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.UpdatedAssetPositions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintStreaming(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.UpdatedPerpetualPositions) > 0 {
-		for iNdEx := len(m.UpdatedPerpetualPositions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.UpdatedPerpetualPositions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintStreaming(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.SubaccountId != nil {
-		{
-			size, err := m.SubaccountId.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintStreaming(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SubaccountPerpetualPosition) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SubaccountPerpetualPosition) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SubaccountPerpetualPosition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Quantums != 0 {
-		i = encodeVarintStreaming(dAtA, i, uint64(m.Quantums))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.PerpetualId != 0 {
-		i = encodeVarintStreaming(dAtA, i, uint64(m.PerpetualId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SubaccountAssetPosition) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SubaccountAssetPosition) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SubaccountAssetPosition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Quantums != 0 {
-		i = encodeVarintStreaming(dAtA, i, uint64(m.Quantums))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.AssetId != 0 {
-		i = encodeVarintStreaming(dAtA, i, uint64(m.AssetId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintStreaming(dAtA []byte, offset int, v uint64) int {
-	offset -= sovStreaming(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *StreamSubaccountUpdate) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.SubaccountId != nil {
-		l = m.SubaccountId.Size()
-		n += 1 + l + sovStreaming(uint64(l))
-	}
-	if len(m.UpdatedPerpetualPositions) > 0 {
-		for _, e := range m.UpdatedPerpetualPositions {
-			l = e.Size()
-			n += 1 + l + sovStreaming(uint64(l))
-		}
-	}
-	if len(m.UpdatedAssetPositions) > 0 {
-		for _, e := range m.UpdatedAssetPositions {
-			l = e.Size()
-			n += 1 + l + sovStreaming(uint64(l))
-		}
-	}
-	if m.Snapshot {
-		n += 2
-	}
-	return n
-}
-
-func (m *SubaccountPerpetualPosition) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.PerpetualId != 0 {
-		n += 1 + sovStreaming(uint64(m.PerpetualId))
-	}
-	if m.Quantums != 0 {
-		n += 1 + sovStreaming(uint64(m.Quantums))
-	}
-	return n
-}
-
-func (m *SubaccountAssetPosition) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.AssetId != 0 {
-		n += 1 + sovStreaming(uint64(m.AssetId))
-	}
-	if m.Quantums != 0 {
-		n += 1 + sovStreaming(uint64(m.Quantums))
-	}
-	return n
-}
-
-func sovStreaming(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozStreaming(x uint64) (n int) {
-	return sovStreaming(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *StreamSubaccountUpdate) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowStreaming
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StreamSubaccountUpdate: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StreamSubaccountUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SubaccountId", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.SubaccountId == nil {
-				m.SubaccountId = &SubaccountId{}
-			}
-			if err := m.SubaccountId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedPerpetualPositions", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.UpdatedPerpetualPositions = append(m.UpdatedPerpetualPositions, &SubaccountPerpetualPosition{})
-			if err := m.UpdatedPerpetualPositions[len(m.UpdatedPerpetualPositions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAssetPositions", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.UpdatedAssetPositions = append(m.UpdatedAssetPositions, &SubaccountAssetPosition{})
-			if err := m.UpdatedAssetPositions[len(m.UpdatedAssetPositions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Snapshot = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipStreaming(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SubaccountPerpetualPosition) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowStreaming
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SubaccountPerpetualPosition: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SubaccountPerpetualPosition: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PerpetualId", wireType)
-			}
-			m.PerpetualId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.PerpetualId |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Quantums", wireType)
-			}
-			m.Quantums = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Quantums |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipStreaming(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SubaccountAssetPosition) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowStreaming
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SubaccountAssetPosition: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SubaccountAssetPosition: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AssetId", wireType)
-			}
-			m.AssetId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.AssetId |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Quantums", wireType)
-			}
-			m.Quantums = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Quantums |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipStreaming(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthStreaming
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipStreaming(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	depth := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowStreaming
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-		case 1:
-			iNdEx += 8
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowStreaming
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthStreaming
-			}
-			iNdEx += length
-		case 3:
-			depth++
-		case 4:
-			if depth == 0 {
-				return 0, ErrUnexpectedEndOfGroupStreaming
-			}
-			depth--
-		case 5:
-			iNdEx += 4
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-		if iNdEx < 0 {
-			return 0, ErrInvalidLengthStreaming
-		}
-		if depth == 0 {
-			return iNdEx, nil
-		}
-	}
-	return 0, io.ErrUnexpectedEOF
-}
-
-var (
-	ErrInvalidLengthStreaming        = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowStreaming          = fmt.Errorf("proto: integer overflow")
-	ErrUnexpectedEndOfGroupStreaming = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/protocol/x/subaccounts/types/types.go b/protocol/x/subaccounts/types/types.go
index e977afebfe..cf4ac34475 100644
--- a/protocol/x/subaccounts/types/types.go
+++ b/protocol/x/subaccounts/types/types.go
@@ -63,11 +63,6 @@ type SubaccountsKeeper interface {
 		ctx sdk.Context,
 		id SubaccountId,
 	) (val Subaccount)
-	GetStreamSubaccountUpdate(
-		ctx sdk.Context,
-		id SubaccountId,
-		snapshot bool,
-	) (val StreamSubaccountUpdate)
 	LegacyGetNegativeTncSubaccountSeenAtBlock(ctx sdk.Context) (uint32, bool)
 	GetNegativeTncSubaccountSeenAtBlock(
 		ctx sdk.Context,
@@ -78,8 +73,4 @@ type SubaccountsKeeper interface {
 		perpetualId uint32,
 		blockHeight uint32,
 	) error
-	SendSubaccountUpdates(
-		ctx sdk.Context,
-		subaccountUpdates []StreamSubaccountUpdate,
-	)
 }