diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
index 7ac339aa43c8..012f584561ac 100644
--- a/.github/workflows/e2e.yml
+++ b/.github/workflows/e2e.yml
@@ -311,3 +311,39 @@ jobs:
if grep -q "ERROR" flwr_output.log; then
exit 1
fi
+
+ build_and_install:
+ runs-on: ubuntu-22.04
+ timeout-minutes: 10
+ needs: wheel
+ strategy:
+ matrix:
+ framework: ["numpy"]
+ python-version: ["3.9", "3.10", "3.11"]
+
+ name: |
+ Build & Install /
+ Python ${{ matrix.python-version }} /
+ ${{ matrix.framework }}
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Bootstrap
+ uses: ./.github/actions/bootstrap
+ with:
+ python-version: ${{ matrix.python-version }}
+ poetry-skip: 'true'
+ - name: Install Flower from repo
+ if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }}
+ run: |
+ python -m pip install .
+ - name: Install Flower wheel from artifact store
+ if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }}
+ run: |
+ python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }}
+ - name: Create project, build, and install it
+ run: |
+ flwr new tmp-${{ matrix.framework }} --framework ${{ matrix.framework }} --username gh_ci
+ cd tmp-${{ matrix.framework }}
+ flwr build
+ flwr install *.fab
diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py
index 974c264a6220..a2667dbcf006 100644
--- a/baselines/doc/source/conf.py
+++ b/baselines/doc/source/conf.py
@@ -37,7 +37,7 @@
author = "The Flower Authors"
# The full version, including alpha/beta/rc tags
-release = "1.11.0"
+release = "1.13.0"
# -- General configuration ---------------------------------------------------
diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml
index 82a10d30173b..3c155387ef93 100644
--- a/dev/changelog_config.toml
+++ b/dev/changelog_config.toml
@@ -3,972 +3,979 @@
type = ["ci", "docs", "feat", "fix", "refactor", "break"]
-project = ["framework", "baselines", "datasets", "examples", "benchmarks", "glossary"]
+project = [
+ "framework",
+ "baselines",
+ "datasets",
+ "examples",
+ "benchmarks",
+ "glossary",
+]
scope = "skip"
pattern_template = "^({types})\\(({projects})(?::({scope}))?\\) ([A-Z][^\\n]*[^\\.\\n])$"
-allowed_verbs=[
- "Abandon",
- "Abort",
- "Abstract",
- "Accept",
- "Accomodate",
- "Accompany",
- "Account",
- "Accumulate",
- "Accuse",
- "Ache",
- "Achieve",
- "Acknowledge",
- "Acquire",
- "Act",
- "Activate",
- "Active",
- "Adapt",
- "Add",
- "Address",
- "Adhere",
- "Adjust",
- "Admit",
- "Adopt",
- "Advance",
- "Advise",
- "Advocate",
- "Affect",
- "Affirm",
- "Afford",
- "Agree",
- "Aim",
- "Align",
- "Allow",
- "Alter",
- "Amend",
- "Analyse",
- "Analyze",
- "Anchor",
- "Annotate",
- "Announce",
- "Annoy",
- "Annul",
- "Answer",
- "Appeal",
- "Appear",
- "Append",
- "Applicate",
- "Apply",
- "Appoint",
- "Appreciate",
- "Approach",
- "Approve",
- "Argue",
- "Arise",
- "Arrange",
- "Arrest",
- "Arrive",
- "Ask",
- "Assert",
- "Assess",
- "Assign",
- "Assist",
- "Associate",
- "Assume",
- "Assure",
- "Attach",
- "Attack",
- "Attempt",
- "Attend",
- "Attract",
- "Augment",
- "Avoid",
- "Awake",
- "Back",
- "Backport",
- "Backup",
- "Bake",
- "Base",
- "Battle",
- "Be",
- "Bear",
- "Beat",
- "Become",
- "Begin",
- "Behave",
- "Believe",
- "Belong",
- "Bend",
- "Benefit",
- "Better",
- "Beware",
- "Bind",
- "Blacklist",
- "Blame",
- "Blend",
- "Block",
- "Blow",
- "Blur",
- "Bootstrap",
- "Born",
- "Borrow",
- "Bother",
- "Break",
- "Bridge",
- "Bring",
- "Broadcast",
- "Buffer",
- "Build",
- "Bump",
- "Bundle",
- "Burn",
- "Busy",
- "Buy",
- "Bypass",
- "Cache",
- "Calculate",
- "Call",
- "Cancel",
- "Capitalize",
- "Capture",
- "Care",
- "Carry",
- "Carryout",
- "Cast",
- "Catch",
- "Categorize",
- "Cause",
- "Center",
- "Centralize",
- "Challenge",
- "Change",
- "Chant",
- "Charge",
- "Chase",
- "Chat",
- "Check",
- "Choose",
- "Circle",
- "Claim",
- "Clarify",
- "Clean",
- "Cleanse",
- "Clear",
- "Climb",
- "Clip",
- "Close",
- "Clothe",
- "Coalesce",
- "Collapse",
- "Collect",
- "Combine",
- "Come",
- "Command",
- "Comment",
- "Commit",
- "Compare",
- "Compensate",
- "Compile",
- "Complain",
- "Complement",
- "Complete",
- "Compose",
- "Compress",
- "Compute",
- "Conceal",
- "Concentrate",
- "Conclude",
- "Concur",
- "Conduct",
- "Configure",
- "Confirm",
- "Confront",
- "Connect",
- "Connote",
- "Consider",
- "Consist",
- "Consolidate",
- "Constitute",
- "Construct",
- "Consume",
- "Contact",
- "Contain",
- "Contest",
- "Continue",
- "Contribute",
- "Control",
- "Convert",
- "Convey",
- "Cook",
- "Coordinate",
- "Cope",
- "Copy",
- "Correct",
- "Cost",
- "Counsel",
- "Count",
- "Cover",
- "Create",
- "Cross",
- "Cry",
- "Cut",
- "Cycle",
- "Damage",
- "Dance",
- "Deal",
- "Debate",
- "Decide",
- "Declare",
- "Decode",
- "Deconstruct",
- "Decouple",
- "Decrease",
- "Dedup",
- "Duplicate",
- "Deduplicate",
- "Default",
- "Defeat",
- "Defend",
- "Defer",
- "Define",
- "Delay",
- "Delegate",
- "Delete",
- "Deliver",
- "Demand",
- "Demolish",
- "Demonstrate",
- "Deny",
- "Depart",
- "Depend",
- "Depict",
- "Deprecate",
- "Derive",
- "Describe",
- "Deserialize",
- "Design",
- "Desire",
- "Destroy",
- "Detail",
- "Detect",
- "Determine",
- "Develop",
- "Devote",
- "Die",
- "Dim",
- "Direct",
- "Disable",
- "Disallow",
- "Disappear",
- "Disconnect",
- "Discontinue",
- "Discourage",
- "Discover",
- "Discuss",
- "Dislike",
- "Dismiss",
- "Dispatch",
- "Displace",
- "Display",
- "Distinguish",
- "Divide",
- "Do",
- "Document",
- "Dominate",
- "Downgrade",
- "Download",
- "Draw",
- "Dread",
- "Dress",
- "Drink",
- "Drive",
- "Drop",
- "Dry",
- "Dump",
- "Duplicate",
- "Earn",
- "Eat",
- "Echo",
- "Edit",
- "Educate",
- "Elaborate",
- "Elect",
- "Elevate",
- "Eliminate",
- "Embed",
- "Emerge",
- "Emit",
- "Employ",
- "Empty",
- "Enable",
- "Encapsulate",
- "Encourage",
- "End",
- "Endorse",
- "Endure",
- "Enforce",
- "Engage",
- "Enhance",
- "Enjoy",
- "Enquire",
- "Enroll",
- "Ensure",
- "Enter",
- "Enumerate",
- "Equal",
- "Equate",
- "Erase",
- "Escape",
- "Establish",
- "Estimate",
- "Evaluate",
- "Examine",
- "Except",
- "Exclude",
- "Excuse",
- "Execute",
- "Exempt",
- "Exercise",
- "Exert",
- "Exist",
- "Exit",
- "Expand",
- "Expect",
- "Experience",
- "Explain",
- "Explore",
- "Export",
- "Expose",
- "Express",
- "Extend",
- "Extract",
- "Face",
- "Factor",
- "Fail",
- "Fall",
- "Fault",
- "Favor",
- "Fear",
- "Feature",
- "Feed",
- "Feel",
- "Fetch",
- "Fight",
- "Fill",
- "Filter",
- "Find",
- "Finish",
- "Fit",
- "Fix",
- "Flatten",
- "Flee",
- "Flip",
- "Float",
- "Flow",
- "Flunk",
- "Flush",
- "Fly",
- "Focus",
- "Fold",
- "Follow",
- "Force",
- "Foresee",
- "Forget",
- "Fork",
- "Form",
- "Formalize",
- "Format",
- "Forward",
- "Found",
- "Free",
- "Freeze",
- "Gain",
- "Gather",
- "Generalize",
- "Generate",
- "Get",
- "Gitignore",
- "Give",
- "Giveup",
- "Glance",
- "Go",
- "Going",
- "Govern",
- "Grant",
- "Grin",
- "Group",
- "Grow",
- "Guard",
- "Guess",
- "Guide",
- "Hack",
- "Halt",
- "Hand",
- "Handle",
- "Hang",
- "Happen",
- "Hardcode",
- "Harm",
- "Hate",
- "Have",
- "Head",
- "Hear",
- "Help",
- "Hide",
- "Highlight",
- "Hint",
- "Hire",
- "Hit",
- "Hold",
- "Hook",
- "Hope",
- "House",
- "Hurt",
- "Identify",
- "Ignore",
- "Illuminate",
- "Illustrate",
- "Imagine",
- "Impersonate",
- "Implement",
- "Imply",
- "Import",
- "Importune",
- "Impose",
- "Improve",
- "Include",
- "Incorporate",
- "Increase",
- "Incur",
- "Indent",
- "Indicate",
- "Infer",
- "Influence",
- "Inform",
- "Inherit",
- "Init",
- "Initialize",
- "Initiate",
- "Injure",
- "In-line",
- "Inline",
- "Insist",
- "Install",
- "Instantiate",
- "Instruct",
- "Integrate",
- "Intend",
- "Intercept",
- "Internalize",
- "Interpret",
- "Introduce",
- "Invalidate",
- "Invert",
- "Invest",
- "Investigate",
- "Invite",
- "Invoke",
- "Involve",
- "Isolate",
- "Issue",
- "Join",
- "Journey",
- "Joy",
- "Judge",
- "Jump",
- "Justify",
- "Keep",
- "Key",
- "Kick",
- "Kill",
- "Kiss",
- "Knock",
- "Know",
- "Label",
- "Lack",
- "Land",
- "Last",
- "Laugh",
- "Launch",
- "Lay",
- "Lead",
- "Lean",
- "Leap",
- "Learn",
- "Leave",
- "Let",
- "Lie",
- "Lift",
- "Light",
- "Like",
- "Limit",
- "Link",
- "List",
- "Listen",
- "Live",
- "Load",
- "Localize",
- "Locate",
- "Lock",
- "Log",
- "Login",
- "Look",
- "Loop",
- "Lose",
- "Love",
- "Lower",
- "Maintain",
- "Make",
- "Manage",
- "Map",
- "Mark",
- "Marry",
- "Match",
- "Materialize",
- "Matter",
- "Mean",
- "Measure",
- "Meet",
- "Memoize",
- "Menace",
- "Mention",
- "Merge",
- "Migrate",
- "Mind",
- "Mirror",
- "Misinform",
- "Miss",
- "Mix",
- "Mock",
- "Modernize",
- "Modify",
- "Monitor",
- "Monomorphize",
- "Move",
- "Mutate",
- "Name",
- "Navigate",
- "Near",
- "Need",
- "Nod",
- "Normalize",
- "Notarize",
- "Note",
- "Notice",
- "Notify",
- "Observe",
- "Obtain",
- "Occupy",
- "Occur",
- "Offer",
- "Officiate",
- "Omit",
- "Open",
- "Operate",
- "Optimise",
- "Optimize",
- "Order",
- "Organise",
- "Organize",
- "Output",
- "Overhaul",
- "Override",
- "Overwrite",
- "Owe",
- "Own",
- "Pack",
- "Package",
- "Paint",
- "Panic",
- "Parameterize",
- "Parse",
- "Partake",
- "Pass",
- "Patch",
- "Pause",
- "Pay",
- "Perform",
- "Permit",
- "Persist",
- "Persuade",
- "Pick",
- "Pin",
- "Ping",
- "Pipe",
- "Place",
- "Plan",
- "Play",
- "Plow",
- "Point",
- "Ponder",
- "Populate",
- "Port",
- "Position",
- "Possess",
- "Pour",
- "Predict",
- "Prefer",
- "Prefix",
- "Prepare",
- "Present",
- "Preserve",
- "Press",
- "Presume",
- "Prevent",
- "Print",
- "Prioritize",
- "Privatize",
- "Proceed",
- "Process",
- "Procure",
- "Produce",
- "Prolong",
- "Promise",
- "Promote",
- "Prompt",
- "Propagate",
- "Propose",
- "Prosecute",
- "Protect",
- "Protest",
- "Prove",
- "Provide",
- "Prune",
- "Publish",
- "Pull",
- "Purchase",
- "Purge",
- "Pursue",
- "Push",
- "Put",
- "Puton",
- "Qualify",
- "Query",
- "Question",
- "Queue",
- "Quit",
- "Quote",
- "Race",
- "Raise",
- "Randomize",
- "Reach",
- "React",
- "Read",
- "Realise",
- "Realize",
- "Reapply",
- "Rearrange",
- "Reason",
- "Rebuild",
- "Recall",
- "Receive",
- "Reckon",
- "Recognise",
- "Recognize",
- "Recommend",
- "Reconnect",
- "Record",
- "Recover",
- "Recur",
- "Redact",
- "Re-define",
- "Redefine",
- "Re-design",
- "Redesign",
- "Redirect",
- "Re-do",
- "Redo",
- "Reduce",
- "Re-enable",
- "Refactor",
- "Refer",
- "Reference",
- "Refine",
- "Reflect",
- "Reformat",
- "Refresh",
- "Refuse",
- "Regard",
- "Regenerate",
- "Register",
- "Reimplement",
- "Re-instate",
- "Reinstate",
- "Reject",
- "Relate",
- "Relax",
- "Release",
- "Reload",
- "Rely",
- "Remain",
- "Remember",
- "Remind",
- "Remove",
- "Rename",
- "Render",
- "Re-order",
- "Reorder",
- "Reorganise",
- "Reorganize",
- "Repair",
- "Reparent",
- "Repeat",
- "Repel",
- "Rephrase",
- "Replace",
- "Reply",
- "Report",
- "Reposition",
- "Represent",
- "Request",
- "Require",
- "Rerender",
- "Rerun",
- "Re-scale",
- "Rescale",
- "Research",
- "Re-set",
- "Reset",
- "Reside",
- "Resize",
- "Resolve",
- "Respect",
- "Respond",
- "Rest",
- "Restart",
- "Restore",
- "Restrict",
- "Restructure",
- "Result",
- "Resume",
- "Resurface",
- "Retain",
- "Retire",
- "Retreat",
- "Retrieve",
- "Retry",
- "Return",
- "Reuse",
- "Revamp",
- "Reveal",
- "Reverse",
- "Revert",
- "Review",
- "Revise",
- "Revisit",
- "Revoke",
- "Reword",
- "Re-wrap",
- "Rewrap",
- "Rewrite",
- "Ride",
- "Ring",
- "Rise",
- "Roll",
- "Rotate",
- "Round",
- "Route",
- "Rule",
- "Run",
- "Sale",
- "Salute",
- "Sample",
- "Sanitize",
- "Save",
- "Say",
- "Scale",
- "Scope",
- "Score",
- "Scroll",
- "Search",
- "Secure",
- "See",
- "Seek",
- "Seem",
- "Select",
- "Self-initialize",
- "Sell",
- "Send",
- "Separate",
- "Serialize",
- "Serve",
- "Set",
- "Settle",
- "Shake",
- "Shape",
- "Share",
- "Shift",
- "Shoot",
- "Shorten",
- "Shout",
- "Show",
- "Shrink",
- "Shuffle",
- "Shut",
- "Sign",
- "Signify",
- "Silence",
- "Simplify",
- "Simulate",
- "Sing",
- "Sit",
- "Size",
- "Skip",
- "Sleep",
- "Slide",
- "Slip",
- "Smile",
- "Solve",
- "Sort",
- "Sound",
- "Source",
- "Spawn",
- "Speak",
- "Specify",
- "Spend",
- "Split",
- "Spread",
- "Stand",
- "Standardize",
- "Stare",
- "Start",
- "State",
- "Stay",
- "Steal",
- "Steer",
- "Step",
- "Stick",
- "Stop",
- "Store",
- "Stress",
- "Stretch",
- "Strike",
- "Stringify",
- "Strip",
- "Struggle",
- "Stub",
- "Study",
- "Style",
- "Subclass",
- "Submit",
- "Substitute",
- "Subtract",
- "Succeed",
- "Suffer",
- "Suggest",
- "Suit",
- "Supply",
- "Support",
- "Suppose",
- "Suppress",
- "Surround",
- "Survive",
- "Suspect",
- "Swallow",
- "Swap",
- "Sway",
- "Switch",
- "Sync",
- "Synchronise",
- "Synchronize",
- "Synthesize",
- "Take",
- "Talk",
- "Talkover",
- "Target",
- "Teach",
- "Tell",
- "Tempt",
- "Tend",
- "Terminate",
- "Test",
- "Testify",
- "Thank",
- "Think",
- "Threaten",
- "Throw",
- "Tie",
- "Time",
- "Toggle",
- "Touch",
- "Track",
- "Trade",
- "Train",
- "Transfer",
- "Transform",
- "Translate",
- "Transpile",
- "Trash",
- "Travel",
- "Tread",
- "Treat",
- "Trigger",
- "Trim",
- "Truncate",
- "Trust",
- "Try",
- "Tune",
- "Turn",
- "Tweak",
- "Twist",
- "Unblock",
- "Uncomment",
- "Uncover",
- "Understand",
- "Undertake",
- "Undo",
- "Undry",
- "Unescape",
- "Unfold",
- "Unify",
- "Unignore",
- "Unite",
- "Unload",
- "Unlock",
- "Unpack",
- "Unregister",
- "Unskip",
- "Unsubscribe",
- "Untrack",
- "Unwrap",
- "Update",
- "Upgrade",
- "Upload",
- "Urge",
- "Use",
- "Utter",
- "Validate",
- "Value",
- "Vanish",
- "Vary",
- "Verbosify",
- "Verify",
- "View",
- "Visit",
- "Vocalize",
- "Voice",
- "Vote",
- "Wait",
- "Wake",
- "Walk",
- "Want",
- "Warn",
- "Warrant",
- "Wash",
- "Watch",
- "Wear",
- "Weep",
- "Weigh",
- "Welcome",
- "Whitelist",
- "Win",
- "Wipe",
- "Wire",
- "Wish",
- "Withdraw",
- "Wonder",
- "Work",
- "Workout",
- "Worry",
- "Wrap",
- "Write"
+allowed_verbs = [
+ "Abandon",
+ "Abort",
+ "Abstract",
+ "Accept",
+ "Accomodate",
+ "Accompany",
+ "Account",
+ "Accumulate",
+ "Accuse",
+ "Ache",
+ "Achieve",
+ "Acknowledge",
+ "Acquire",
+ "Act",
+ "Activate",
+ "Active",
+ "Adapt",
+ "Add",
+ "Address",
+ "Adhere",
+ "Adjust",
+ "Admit",
+ "Adopt",
+ "Advance",
+ "Advise",
+ "Advocate",
+ "Affect",
+ "Affirm",
+ "Afford",
+ "Agree",
+ "Aim",
+ "Align",
+ "Allow",
+ "Alter",
+ "Amend",
+ "Analyse",
+ "Analyze",
+ "Anchor",
+ "Annotate",
+ "Announce",
+ "Annoy",
+ "Annul",
+ "Answer",
+ "Appeal",
+ "Appear",
+ "Append",
+ "Applicate",
+ "Apply",
+ "Appoint",
+ "Appreciate",
+ "Approach",
+ "Approve",
+ "Argue",
+ "Arise",
+ "Arrange",
+ "Arrest",
+ "Arrive",
+ "Ask",
+ "Assert",
+ "Assess",
+ "Assign",
+ "Assist",
+ "Associate",
+ "Assume",
+ "Assure",
+ "Attach",
+ "Attack",
+ "Attempt",
+ "Attend",
+ "Attract",
+ "Augment",
+ "Avoid",
+ "Awake",
+ "Back",
+ "Backport",
+ "Backup",
+ "Bake",
+ "Base",
+ "Battle",
+ "Be",
+ "Bear",
+ "Beat",
+ "Become",
+ "Begin",
+ "Behave",
+ "Believe",
+ "Belong",
+ "Bend",
+ "Benefit",
+ "Better",
+ "Beware",
+ "Bind",
+ "Blacklist",
+ "Blame",
+ "Blend",
+ "Block",
+ "Blow",
+ "Blur",
+ "Bootstrap",
+ "Born",
+ "Borrow",
+ "Bother",
+ "Break",
+ "Bridge",
+ "Bring",
+ "Broadcast",
+ "Buffer",
+ "Build",
+ "Bump",
+ "Bundle",
+ "Burn",
+ "Busy",
+ "Buy",
+ "Bypass",
+ "Cache",
+ "Calculate",
+ "Call",
+ "Cancel",
+ "Capitalize",
+ "Capture",
+ "Care",
+ "Carry",
+ "Carryout",
+ "Cast",
+ "Catch",
+ "Categorize",
+ "Cause",
+ "Center",
+ "Centralize",
+ "Challenge",
+ "Change",
+ "Chant",
+ "Charge",
+ "Chase",
+ "Chat",
+ "Check",
+ "Choose",
+ "Circle",
+ "Claim",
+ "Clarify",
+ "Clean",
+ "Cleanse",
+ "Clear",
+ "Climb",
+ "Clip",
+ "Close",
+ "Clothe",
+ "Coalesce",
+ "Collapse",
+ "Collect",
+ "Combine",
+ "Come",
+ "Command",
+ "Comment",
+ "Commit",
+ "Compare",
+ "Compensate",
+ "Compile",
+ "Complain",
+ "Complement",
+ "Complete",
+ "Compose",
+ "Compress",
+ "Compute",
+ "Conceal",
+ "Concentrate",
+ "Conclude",
+ "Concur",
+ "Conduct",
+ "Configure",
+ "Confirm",
+ "Confront",
+ "Connect",
+ "Connote",
+ "Consider",
+ "Consist",
+ "Consolidate",
+ "Constitute",
+ "Construct",
+ "Consume",
+ "Contact",
+ "Contain",
+ "Contest",
+ "Continue",
+ "Contribute",
+ "Control",
+ "Convert",
+ "Convey",
+ "Cook",
+ "Coordinate",
+ "Cope",
+ "Copy",
+ "Correct",
+ "Cost",
+ "Counsel",
+ "Count",
+ "Cover",
+ "Create",
+ "Cross",
+ "Cry",
+ "Cut",
+ "Cycle",
+ "Damage",
+ "Dance",
+ "Deal",
+ "Debate",
+ "Decide",
+ "Declare",
+ "Decode",
+ "Deconstruct",
+ "Decouple",
+ "Decrease",
+ "Dedup",
+ "Duplicate",
+ "Deduplicate",
+ "Default",
+ "Defeat",
+ "Defend",
+ "Defer",
+ "Define",
+ "Delay",
+ "Delegate",
+ "Delete",
+ "Deliver",
+ "Demand",
+ "Demolish",
+ "Demonstrate",
+ "Deny",
+ "Depart",
+ "Depend",
+ "Depict",
+ "Deprecate",
+ "Derive",
+ "Describe",
+ "Deserialize",
+ "Design",
+ "Desire",
+ "Destroy",
+ "Detail",
+ "Detect",
+ "Determine",
+ "Develop",
+ "Devote",
+ "Die",
+ "Dim",
+ "Direct",
+ "Disable",
+ "Disallow",
+ "Disappear",
+ "Disconnect",
+ "Discontinue",
+ "Discourage",
+ "Discover",
+ "Discuss",
+ "Dislike",
+ "Dismiss",
+ "Dispatch",
+ "Displace",
+ "Display",
+ "Distinguish",
+ "Divide",
+ "Do",
+ "Document",
+ "Dominate",
+ "Downgrade",
+ "Download",
+ "Draw",
+ "Dread",
+ "Dress",
+ "Drink",
+ "Drive",
+ "Drop",
+ "Dry",
+ "Dump",
+ "Duplicate",
+ "Earn",
+ "Eat",
+ "Echo",
+ "Edit",
+ "Educate",
+ "Elaborate",
+ "Elect",
+ "Elevate",
+ "Eliminate",
+ "Embed",
+ "Emerge",
+ "Emit",
+ "Employ",
+ "Empty",
+ "Enable",
+ "Encapsulate",
+ "Encourage",
+ "End",
+ "Endorse",
+ "Endure",
+ "Enforce",
+ "Engage",
+ "Enhance",
+ "Enjoy",
+ "Enquire",
+ "Enroll",
+ "Ensure",
+ "Enter",
+ "Enumerate",
+ "Equal",
+ "Equate",
+ "Erase",
+ "Escape",
+ "Establish",
+ "Estimate",
+ "Evaluate",
+ "Examine",
+ "Except",
+ "Exclude",
+ "Excuse",
+ "Execute",
+ "Exempt",
+ "Exercise",
+ "Exert",
+ "Exist",
+ "Exit",
+ "Expand",
+ "Expect",
+ "Experience",
+ "Explain",
+ "Explore",
+ "Export",
+ "Expose",
+ "Express",
+ "Extend",
+ "Extract",
+ "Face",
+ "Factor",
+ "Fail",
+ "Fall",
+ "Fault",
+ "Favor",
+ "Fear",
+ "Feature",
+ "Feed",
+ "Feel",
+ "Fetch",
+ "Fight",
+ "Fill",
+ "Filter",
+ "Find",
+ "Finish",
+ "Fit",
+ "Fix",
+ "Flatten",
+ "Flee",
+ "Flip",
+ "Float",
+ "Flow",
+ "Flunk",
+ "Flush",
+ "Fly",
+ "Focus",
+ "Fold",
+ "Follow",
+ "Force",
+ "Foresee",
+ "Forget",
+ "Fork",
+ "Form",
+ "Formalize",
+ "Format",
+ "Forward",
+ "Found",
+ "Free",
+ "Freeze",
+ "Gain",
+ "Gather",
+ "Generalize",
+ "Generate",
+ "Get",
+ "Gitignore",
+ "Give",
+ "Giveup",
+ "Glance",
+ "Go",
+ "Going",
+ "Govern",
+ "Grant",
+ "Grin",
+ "Group",
+ "Grow",
+ "Guard",
+ "Guess",
+ "Guide",
+ "Hack",
+ "Halt",
+ "Hand",
+ "Handle",
+ "Hang",
+ "Happen",
+ "Hardcode",
+ "Harm",
+ "Hate",
+ "Have",
+ "Head",
+ "Hear",
+ "Help",
+ "Hide",
+ "Highlight",
+ "Hint",
+ "Hire",
+ "Hit",
+ "Hold",
+ "Hook",
+ "Hope",
+ "House",
+ "Hurt",
+ "Identify",
+ "Ignore",
+ "Illuminate",
+ "Illustrate",
+ "Imagine",
+ "Impersonate",
+ "Implement",
+ "Imply",
+ "Import",
+ "Importune",
+ "Impose",
+ "Improve",
+ "Include",
+ "Incorporate",
+ "Increase",
+ "Incur",
+ "Indent",
+ "Indicate",
+ "Infer",
+ "Influence",
+ "Inform",
+ "Inherit",
+ "Init",
+ "Initialize",
+ "Initiate",
+ "Injure",
+ "In-line",
+ "Inline",
+ "Insist",
+ "Install",
+ "Instantiate",
+ "Instruct",
+ "Integrate",
+ "Intend",
+ "Intercept",
+ "Internalize",
+ "Interpret",
+ "Introduce",
+ "Invalidate",
+ "Invert",
+ "Invest",
+ "Investigate",
+ "Invite",
+ "Invoke",
+ "Involve",
+ "Isolate",
+ "Issue",
+ "Join",
+ "Journey",
+ "Joy",
+ "Judge",
+ "Jump",
+ "Justify",
+ "Keep",
+ "Key",
+ "Kick",
+ "Kill",
+ "Kiss",
+ "Knock",
+ "Know",
+ "Label",
+ "Lack",
+ "Land",
+ "Last",
+ "Laugh",
+ "Launch",
+ "Lay",
+ "Lead",
+ "Lean",
+ "Leap",
+ "Learn",
+ "Leave",
+ "Let",
+ "Lie",
+ "Lift",
+ "Light",
+ "Like",
+ "Limit",
+ "Link",
+ "List",
+ "Listen",
+ "Live",
+ "Load",
+ "Localize",
+ "Locate",
+ "Lock",
+ "Log",
+ "Login",
+ "Look",
+ "Loop",
+ "Lose",
+ "Love",
+ "Lower",
+ "Maintain",
+ "Make",
+ "Manage",
+ "Map",
+ "Mark",
+ "Marry",
+ "Match",
+ "Materialize",
+ "Matter",
+ "Mean",
+ "Measure",
+ "Meet",
+ "Memoize",
+ "Menace",
+ "Mention",
+ "Merge",
+ "Migrate",
+ "Mind",
+ "Mirror",
+ "Misinform",
+ "Miss",
+ "Mix",
+ "Mock",
+ "Modernize",
+ "Modify",
+ "Monitor",
+ "Monomorphize",
+ "Move",
+ "Mutate",
+ "Name",
+ "Navigate",
+ "Near",
+ "Need",
+ "Nod",
+ "Normalize",
+ "Notarize",
+ "Note",
+ "Notice",
+ "Notify",
+ "Observe",
+ "Obtain",
+ "Occupy",
+ "Occur",
+ "Offer",
+ "Officiate",
+ "Omit",
+ "Open",
+ "Operate",
+ "Optimise",
+ "Optimize",
+ "Order",
+ "Organise",
+ "Organize",
+ "Output",
+ "Overhaul",
+ "Override",
+ "Overwrite",
+ "Owe",
+ "Own",
+ "Pack",
+ "Package",
+ "Paint",
+ "Panic",
+ "Parameterize",
+ "Parse",
+ "Partake",
+ "Pass",
+ "Patch",
+ "Pause",
+ "Pay",
+ "Perform",
+ "Permit",
+ "Persist",
+ "Persuade",
+ "Pick",
+ "Pin",
+ "Ping",
+ "Pipe",
+ "Place",
+ "Plan",
+ "Play",
+ "Plow",
+ "Point",
+ "Ponder",
+ "Populate",
+ "Port",
+ "Position",
+ "Possess",
+ "Pour",
+ "Predict",
+ "Prefer",
+ "Prefix",
+ "Prepare",
+ "Present",
+ "Preserve",
+ "Press",
+ "Presume",
+ "Prevent",
+ "Print",
+ "Prioritize",
+ "Privatize",
+ "Proceed",
+ "Process",
+ "Procure",
+ "Produce",
+ "Prolong",
+ "Promise",
+ "Promote",
+ "Prompt",
+ "Propagate",
+ "Propose",
+ "Prosecute",
+ "Protect",
+ "Protest",
+ "Prove",
+ "Provide",
+ "Prune",
+ "Publish",
+ "Pull",
+ "Purchase",
+ "Purge",
+ "Pursue",
+ "Push",
+ "Put",
+ "Puton",
+ "Qualify",
+ "Query",
+ "Question",
+ "Queue",
+ "Quit",
+ "Quote",
+ "Race",
+ "Raise",
+ "Randomize",
+ "Reach",
+ "React",
+ "Read",
+ "Realise",
+ "Realize",
+ "Reapply",
+ "Rearrange",
+ "Reason",
+ "Rebuild",
+ "Recall",
+ "Receive",
+ "Reckon",
+ "Recognise",
+ "Recognize",
+ "Recommend",
+ "Reconnect",
+ "Record",
+ "Recover",
+ "Recur",
+ "Redact",
+ "Re-define",
+ "Redefine",
+ "Re-design",
+ "Redesign",
+ "Redirect",
+ "Re-do",
+ "Redo",
+ "Reduce",
+ "Re-enable",
+ "Refactor",
+ "Refer",
+ "Reference",
+ "Refine",
+ "Reflect",
+ "Reformat",
+ "Refresh",
+ "Refuse",
+ "Regard",
+ "Regenerate",
+ "Register",
+ "Reimplement",
+ "Re-instate",
+ "Reinstate",
+ "Reject",
+ "Relate",
+ "Relax",
+ "Release",
+ "Reload",
+ "Rely",
+ "Remain",
+ "Remember",
+ "Remind",
+ "Remove",
+ "Rename",
+ "Render",
+ "Re-order",
+ "Reorder",
+ "Reorganise",
+ "Reorganize",
+ "Repair",
+ "Reparent",
+ "Repeat",
+ "Repel",
+ "Rephrase",
+ "Replace",
+ "Reply",
+ "Report",
+ "Reposition",
+ "Represent",
+ "Request",
+ "Require",
+ "Rerender",
+ "Rerun",
+ "Re-scale",
+ "Rescale",
+ "Research",
+ "Re-set",
+ "Reset",
+ "Reside",
+ "Resize",
+ "Resolve",
+ "Respect",
+ "Respond",
+ "Rest",
+ "Restart",
+ "Restore",
+ "Restrict",
+ "Restructure",
+ "Result",
+ "Resume",
+ "Resurface",
+ "Retain",
+ "Retire",
+ "Retreat",
+ "Retrieve",
+ "Retry",
+ "Return",
+ "Reuse",
+ "Revamp",
+ "Reveal",
+ "Reverse",
+ "Revert",
+ "Review",
+ "Revise",
+ "Revisit",
+ "Revoke",
+ "Reword",
+ "Re-wrap",
+ "Rewrap",
+ "Rewrite",
+ "Ride",
+ "Ring",
+ "Rise",
+ "Roll",
+ "Rotate",
+ "Round",
+ "Route",
+ "Rule",
+ "Run",
+ "Sale",
+ "Salute",
+ "Sample",
+ "Sanitize",
+ "Save",
+ "Say",
+ "Scale",
+ "Scope",
+ "Score",
+ "Scroll",
+ "Search",
+ "Secure",
+ "See",
+ "Seek",
+ "Seem",
+ "Select",
+ "Self-initialize",
+ "Sell",
+ "Send",
+ "Separate",
+ "Serialize",
+ "Serve",
+ "Set",
+ "Settle",
+ "Shake",
+ "Shape",
+ "Share",
+ "Shift",
+ "Shoot",
+ "Shorten",
+ "Shout",
+ "Show",
+ "Shrink",
+ "Shuffle",
+ "Shut",
+ "Sign",
+ "Signify",
+ "Silence",
+ "Simplify",
+ "Simulate",
+ "Sing",
+ "Sit",
+ "Size",
+ "Skip",
+ "Sleep",
+ "Slide",
+ "Slip",
+ "Smile",
+ "Solve",
+ "Sort",
+ "Sound",
+ "Source",
+ "Spawn",
+ "Speak",
+ "Specify",
+ "Spend",
+ "Split",
+ "Spread",
+ "Stand",
+ "Standardize",
+ "Stare",
+ "Start",
+ "State",
+ "Stay",
+ "Steal",
+ "Steer",
+ "Step",
+ "Stick",
+ "Stop",
+ "Store",
+ "Stress",
+ "Stretch",
+ "Strike",
+ "Stringify",
+ "Strip",
+ "Struggle",
+ "Stub",
+ "Study",
+ "Style",
+ "Subclass",
+ "Submit",
+ "Substitute",
+ "Subtract",
+ "Succeed",
+ "Suffer",
+ "Suggest",
+ "Suit",
+ "Supply",
+ "Support",
+ "Suppose",
+ "Suppress",
+ "Surround",
+ "Survive",
+ "Suspect",
+ "Swallow",
+ "Swap",
+ "Sway",
+ "Switch",
+ "Sync",
+ "Synchronise",
+ "Synchronize",
+ "Synthesize",
+ "Take",
+ "Talk",
+ "Talkover",
+ "Target",
+ "Teach",
+ "Tell",
+ "Tempt",
+ "Tend",
+ "Terminate",
+ "Test",
+ "Testify",
+ "Thank",
+ "Think",
+ "Threaten",
+ "Throw",
+ "Tie",
+ "Time",
+ "Toggle",
+ "Touch",
+ "Track",
+ "Trade",
+ "Train",
+ "Transfer",
+ "Transform",
+ "Translate",
+ "Transpile",
+ "Trash",
+ "Travel",
+ "Tread",
+ "Treat",
+ "Trigger",
+ "Trim",
+ "Truncate",
+ "Trust",
+ "Try",
+ "Tune",
+ "Turn",
+ "Tweak",
+ "Twist",
+ "Unblock",
+ "Uncomment",
+ "Uncover",
+ "Understand",
+ "Undertake",
+ "Undo",
+ "Undry",
+ "Unescape",
+ "Unfold",
+ "Unify",
+ "Unignore",
+ "Unite",
+ "Unload",
+ "Unlock",
+ "Unpack",
+ "Unregister",
+ "Unskip",
+ "Unsubscribe",
+ "Untrack",
+ "Unwrap",
+ "Update",
+ "Upgrade",
+ "Upload",
+ "Urge",
+ "Use",
+ "Utter",
+ "Validate",
+ "Value",
+ "Vanish",
+ "Vary",
+ "Verbosify",
+ "Verify",
+ "View",
+ "Visit",
+ "Vocalize",
+ "Voice",
+ "Vote",
+ "Wait",
+ "Wake",
+ "Walk",
+ "Want",
+ "Warn",
+ "Warrant",
+ "Wash",
+ "Watch",
+ "Wear",
+ "Weep",
+ "Weigh",
+ "Welcome",
+ "Whitelist",
+ "Win",
+ "Wipe",
+ "Wire",
+ "Wish",
+ "Withdraw",
+ "Wonder",
+ "Work",
+ "Workout",
+ "Worry",
+ "Wrap",
+ "Write",
]
diff --git a/dev/format.sh b/dev/format.sh
index ada5a7f13abc..a3129b932e5d 100755
--- a/dev/format.sh
+++ b/dev/format.sh
@@ -2,6 +2,8 @@
set -e
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../
+taplo fmt
+
# Python
python -m flwr_tool.check_copyright src/py/flwr
python -m flwr_tool.init_py_fix src/py/flwr
diff --git a/dev/test.sh b/dev/test.sh
index 170d9f4acd1e..b8eeed14bc46 100755
--- a/dev/test.sh
+++ b/dev/test.sh
@@ -26,6 +26,10 @@ echo "- docformatter: start"
python -m docformatter -c -r src/py/flwr e2e -e src/py/flwr/proto
echo "- docformatter: done"
+echo "- docsig: start"
+docsig src/py/flwr
+echo "- docsig: done"
+
echo "- ruff: start"
python -m ruff check src/py/flwr
echo "- ruff: done"
@@ -56,6 +60,14 @@ echo "- mdformat: done"
echo "- All Markdown checks passed"
+echo "- Start TOML checks"
+
+echo "- taplo: start"
+taplo fmt --check
+echo "- taplo: done"
+
+echo "- All TOML checks passed"
+
echo "- Start rST checks"
echo "- docstrfmt: start"
diff --git a/dev/update_python.py b/dev/update_python.py
new file mode 100644
index 000000000000..5eea6af75488
--- /dev/null
+++ b/dev/update_python.py
@@ -0,0 +1,238 @@
+"""Script to update Python versions in the codebase."""
+
+import argparse
+import re
+from pathlib import Path
+
+
+def _compute_old_version(new_version):
+ """Compute the old version as the immediate previous minor version."""
+ major_str, minor_str = new_version.split(".")
+ major = int(major_str)
+ minor = int(minor_str)
+
+ if minor > 0:
+ old_minor = minor - 1
+ old_version = f"{major}.{old_minor}"
+ else:
+ raise ValueError("Minor version is 0, can't infer previous version.")
+ return old_version
+
+
+def _update_python_versions(
+ new_full_version,
+ patch_only=False,
+ dry_run=False,
+):
+ """Update Python version strings in the specified files."""
+ new_major_minor = ".".join(new_full_version.split(".")[:2])
+
+ if patch_only:
+ print(f"Updating patch version for {new_major_minor} to {new_full_version}")
+
+ # Define the version pattern to match any full version with the same major.minor
+ version_pattern = re.escape(new_major_minor) + r"\.\d+"
+
+ # Define the file patterns and corresponding replacement patterns
+ replacements = {
+ # Shell scripts
+ "dev/*.sh": [
+ # Update version in scripts
+ (
+ r"(version=\$\{1:-)" + version_pattern + r"(\})",
+ r"\g<1>" + new_full_version + r"\g<2>",
+ ),
+ # Update pyenv uninstall commands
+ (
+ r"(pyenv uninstall -f flower-)" + version_pattern,
+ r"\g<1>" + new_full_version,
+ ),
+ ],
+ # Python files
+ "**/*.py": [
+ # Update version assignments
+ (
+ r'(["\'])' + version_pattern + r'(["\'])',
+ r"\g<1>" + new_full_version + r"\g<2>",
+ ),
+ ],
+ # Documentation files
+ "doc/source/conf.py": [
+ # Update Python full version in conf.py
+ (
+ r"(\.\.\s*\|python_full_version\|\s*replace::\s*)"
+ + version_pattern,
+ r"\g<1>" + new_full_version,
+ ),
+ ],
+ }
+ else:
+ # Compute old_version as immediate previous minor version
+ old_version = _compute_old_version(new_major_minor)
+
+ print(f"Determined old version: {old_version}")
+ print(
+ f"Updating to new version: {new_major_minor} "
+ f"(full version: {new_full_version})"
+ )
+
+ # Define the file patterns and corresponding replacement patterns
+ replacements = {
+ # action.yml files
+ ".github/actions/bootstrap/action.yml": [
+ # Update default Python version
+ (
+ r"^(\s*default:\s*)" + re.escape(old_version) + r"(\s*)$",
+ r"\g<1>" + new_major_minor + r"\g<2>",
+ ),
+ ],
+ # YAML workflow files
+ ".github/workflows/*.yml": [
+ # Update specific python-version entries
+ (
+ r"^(\s*python-version:\s*)" + re.escape(old_version) + r"(\s*)$",
+ r"\g<1>" + new_major_minor + r"\g<2>",
+ ),
+ (
+ r"(['\"]?)" + re.escape(old_version) + r"(['\"]?,?\s*)",
+ lambda m: (
+ "" if m.group(2).strip() == "," else ""
+ ), # Handle the case where a comma follows
+ ),
+ ],
+ # Shell scripts
+ "dev/*.sh": [
+ # Update version in scripts
+ (
+ r"(version=\$\{1:-)" + re.escape(old_version) + r"(\.\d+)?(\})",
+ r"\g<1>" + new_full_version + r"\g<3>",
+ ),
+ # Update pyenv uninstall commands
+ (
+ r"(pyenv uninstall -f flower-)"
+ + re.escape(old_version)
+ + r"(\.\d+)?",
+ r"\g<1>" + new_full_version,
+ ),
+ ],
+ # pyproject.toml files
+ "**/pyproject.toml": [
+ # Update python version constraints
+ (
+ r'(python\s*=\s*">=)'
+ + re.escape(old_version)
+ + r'(,\s*<\d+\.\d+")',
+ r"\g<1>" + new_major_minor + r"\g<2>",
+ ),
+ ],
+ "dev/*.py": [
+ # Update version assignments
+ (
+ r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'],?)\s*\n?',
+ lambda m: (
+ "" if m.group(3) == "," else ""
+ ), # Remove version and handle comma if present
+ ),
+ ],
+ # Python files
+ "**/*.py": [
+ # Update version assignments
+ (
+ r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'])',
+ r"\g<1>" + new_full_version + r"\g<3>",
+ ),
+ ],
+ # Documentation files
+ "doc/source/conf.py": [
+ # Update Python version in conf.py
+ (
+ r"(\.\.\s*\|python_version\|\s*replace::\s*)"
+ + re.escape(old_version),
+ r"\g<1>" + new_major_minor,
+ ),
+ # Update Python full version in conf.py
+ (
+ r"(\.\.\s*\|python_full_version\|\s*replace::\s*)"
+ + re.escape(old_version)
+ + r"\.\d+",
+ r"\g<1>" + new_full_version,
+ ),
+ ],
+ # ReStructuredText files
+ "doc/source/*.rst": [
+ # Update Python version in rst files
+ (
+ r"(`Python\s*"
+ + re.escape(old_version)
+ + r"\s*`_)",
+ r"`Python "
+ + new_major_minor
+ + " `_",
+ ),
+ ],
+ # PO files for localization
+ "doc/locales/*/LC_MESSAGES/framework-docs.po": [
+ # Update Python version in localization files
+ (
+ r"(`Python\s*"
+ + re.escape(old_version)
+ + r"\s*`_)",
+ r"`Python "
+ + new_major_minor
+ + " `_",
+ ),
+ ],
+ }
+
+ # Process each file pattern
+ for file_pattern, patterns in replacements.items():
+ for file_path in Path().rglob(file_pattern):
+ if not file_path.is_file():
+ continue
+ content = file_path.read_text()
+ original_content = content
+ for pattern, repl in patterns:
+ if callable(repl):
+ content = re.sub(pattern, repl, content, flags=re.MULTILINE)
+ else:
+ content = re.sub(pattern, repl, content, flags=re.MULTILINE)
+ if content != original_content:
+ if dry_run:
+ print(f"Would update {file_path}")
+ else:
+ file_path.write_text(content)
+ print(f"Updated {file_path}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Script to update Python versions in the codebase."
+ )
+ parser.add_argument(
+ "new_full_version", help="New full Python version to use (e.g., 3.9.22)"
+ )
+ parser.add_argument(
+ "--patch-only",
+ action="store_true",
+ help="Update only the patch version for matching major.minor versions.",
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Show changes without modifying files.",
+ )
+ args = parser.parse_args()
+
+ _update_python_versions(
+ new_full_version=args.new_full_version,
+ patch_only=args.patch_only,
+ dry_run=args.dry_run,
+ )
diff --git a/dev/update_version.py b/dev/update_version.py
new file mode 100644
index 000000000000..cbb4d8e138c2
--- /dev/null
+++ b/dev/update_version.py
@@ -0,0 +1,150 @@
+"""Utility used to bump the version of the package."""
+
+import argparse
+import re
+import sys
+from pathlib import Path
+
+
+REPLACE_CURR_VERSION = {
+ "doc/source/conf.py": [
+ ".. |stable_flwr_version| replace:: {version}",
+ ],
+ "src/py/flwr/cli/new/templates/app/pyproject.*.toml.tpl": [
+ "flwr[simulation]>={version}",
+ ],
+ "src/docker/complete/compose.yml": ["FLWR_VERSION:-{version}"],
+ "src/docker/distributed/client/compose.yml": ["FLWR_VERSION:-{version}"],
+ "src/docker/distributed/server/compose.yml": ["FLWR_VERSION:-{version}"],
+}
+
+REPLACE_NEXT_VERSION = {
+ "pyproject.toml": ['version = "{version}"'],
+ "doc/source/conf.py": [
+ 'release = "{version}"',
+ ],
+ "examples/doc/source/conf.py": ['release = "{version}"'],
+ "baselines/doc/source/conf.py": ['release = "{version}"'],
+}
+
+EXAMPLES = {
+ "examples/*/pyproject.toml": [
+ "flwr[simulation]=={version}",
+ "flwr[simulation]>={version}",
+ ],
+}
+
+
+def _get_next_version(curr_version, increment):
+ """Calculate the next version based on the type of release."""
+ major, minor, patch_version = map(int, curr_version.split("."))
+ if increment == "patch":
+ patch_version += 1
+ elif increment == "minor":
+ minor += 1
+ patch_version = 0
+ elif increment == "major":
+ major += 1
+ minor = 0
+ patch_version = 0
+ else:
+ raise ValueError(
+ "Invalid increment type. Must be 'major', 'minor', or 'patch'."
+ )
+ return f"{major}.{minor}.{patch_version}"
+
+
+def _update_versions(file_patterns, replace_strings, new_version, check):
+ """Update the version strings in the specified files."""
+ wrong = False
+ for pattern in file_patterns:
+ files = list(Path(__file__).parents[1].glob(pattern))
+ for file_path in files:
+ if not file_path.is_file():
+ continue
+ content = file_path.read_text()
+ original_content = content
+ for s in replace_strings:
+ # Construct regex pattern to match any version number in the string
+ escaped_s = re.escape(s).replace(r"\{version\}", r"(\d+\.\d+\.\d+)")
+ regex_pattern = re.compile(escaped_s)
+ content = regex_pattern.sub(s.format(version=new_version), content)
+ if content != original_content:
+ wrong = True
+ if check:
+ print(f"{file_path} would be updated")
+ else:
+ file_path.write_text(content)
+ print(f"Updated {file_path}")
+
+ return wrong
+
+
+if __name__ == "__main__":
+ conf_path = Path("doc/source/conf.py")
+
+ if not conf_path.is_file():
+ raise FileNotFoundError(f"{conf_path} not found!")
+
+ content = conf_path.read_text()
+
+ # Search for the current non-updated version
+ match = re.search(r"\.\.\s*\|stable_flwr_version\|\s*replace::\s*(\S+)", content)
+
+ parser = argparse.ArgumentParser(
+ description="Utility used to bump the version of the package."
+ )
+ parser.add_argument(
+ "--old_version",
+ help="Current (non-updated) version of the package, soon to be the old version.",
+ default=match.group(1) if match else None,
+ )
+ parser.add_argument(
+ "--check", action="store_true", help="Fails if any file would be modified."
+ )
+ parser.add_argument(
+ "--examples", action="store_true", help="Also modify flwr version in examples."
+ )
+
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument(
+ "--patch", action="store_true", help="Increment the patch version."
+ )
+ group.add_argument(
+ "--major", action="store_true", help="Increment the major version."
+ )
+ args = parser.parse_args()
+
+ if not args.old_version:
+ raise ValueError("Version not found in conf.py, please provide current version")
+
+ # Determine the type of version increment
+ if args.major:
+ increment = "major"
+ elif args.patch:
+ increment = "patch"
+ else:
+ increment = "minor"
+
+ curr_version = _get_next_version(args.old_version, increment)
+ next_version = _get_next_version(curr_version, "minor")
+
+ wrong = False
+
+ # Update files with next version
+ for file_pattern, strings in REPLACE_NEXT_VERSION.items():
+ if not _update_versions([file_pattern], strings, next_version, args.check):
+ wrong = True
+
+ # Update files with current version
+ for file_pattern, strings in REPLACE_CURR_VERSION.items():
+ if not _update_versions([file_pattern], strings, curr_version, args.check):
+ wrong = True
+
+ if args.examples:
+ for file_pattern, strings in EXAMPLES.items():
+ if not _update_versions([file_pattern], strings, curr_version, args.check):
+ wrong = True
+
+ if wrong and args.check:
+ sys.exit("Some version haven't been updated.")
diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po
index e6cd61627bf6..a11f44f6bd59 100644
--- a/doc/locales/fr/LC_MESSAGES/framework-docs.po
+++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po
@@ -3,7 +3,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Flower Docs\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2024-09-27 00:30+0000\n"
+"POT-Creation-Date: 2024-10-10 00:29+0000\n"
"PO-Revision-Date: 2023-09-05 17:54+0000\n"
"Last-Translator: Charles Beauville \n"
"Language: fr\n"
@@ -1284,7 +1284,7 @@ msgstr ""
"reStructuredText (fichiers `.rst`) et Markdown (fichiers `.md`)."
#: ../../source/contributor-how-to-write-documentation.rst:10
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196
#, fuzzy
msgid ""
"Note that, in order to build the documentation locally (with ``poetry run"
@@ -2449,25 +2449,25 @@ msgstr ""
#, fuzzy
msgid ""
"If you don't have ``pyenv`` installed, the following script that will "
-"install it, set it up, and create the virtual environment (with ``Python "
-"3.9.20`` by default):"
+"install it, set it up, and create the virtual environment (with "
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
"Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script"
" suivant qui l'installera, le configurera et créera l'environnement "
"virtuel (avec :code:`Python 3.9.20` par défaut)::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69
#, fuzzy
msgid ""
"If you already have ``pyenv`` installed (along with the ``pyenv-"
"virtualenv`` plugin), you can use the following convenience script (with "
-"``Python 3.9.20`` by default):"
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
"Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script"
" suivant qui l'installera, le configurera et créera l'environnement "
"virtuel (avec :code:`Python 3.9.20` par défaut)::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77
#, fuzzy
msgid ""
"3. Install the Flower package in development mode (think ``pip install "
@@ -2476,11 +2476,11 @@ msgstr ""
"Troisièmement, installez le paquet Flower en mode de développement ( "
":code :`pip install -e`) avec toutes les dépendances nécessaires :"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
msgid "Convenience Scripts"
msgstr "Scripts pratiques"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87
#, fuzzy
msgid ""
"The Flower repository contains a number of convenience scripts to make "
@@ -2493,27 +2493,27 @@ msgstr ""
"problématiques. Voir le sous-répertoire :code :`/dev` pour une liste "
"complète. Les scripts suivants sont parmis les plus importants :"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92
msgid "Create/Delete Virtual Environment"
msgstr "Créer/Supprimer l'environment virtuel"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101
msgid "Compile ProtoBuf Definitions"
msgstr "Compiler les définitions ProtoBuf"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108
msgid "Auto-Format Code"
msgstr "Formatter le code"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115
msgid "Run Linters and Tests"
msgstr "Vérifier le format et tester le code"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122
msgid "Add a pre-commit hook"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124
msgid ""
"Developers may integrate a pre-commit hook into their workflow utilizing "
"the `pre-commit `_ library. The pre-"
@@ -2521,44 +2521,44 @@ msgid ""
"``./dev/format.sh`` and ``./dev/test.sh`` scripts."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128
msgid "There are multiple ways developers can use this:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130
msgid "Install the pre-commit hook to your local git directory by simply running:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136
msgid ""
"Each ``git commit`` will trigger the execution of formatting and "
"linting/test scripts."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138
msgid ""
"If in a hurry, bypass the hook using ``--no-verify`` with the ``git "
"commit`` command."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145
msgid ""
"For developers who prefer not to install the hook permanently, it is "
"possible to execute a one-time check prior to committing changes by using"
" the following command:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152
msgid ""
"This executes the formatting and linting checks/tests on all the files "
"without modifying the default behavior of ``git commit``."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156
msgid "Run Github Actions (CI) locally"
msgstr "Exécuter les GitHub Actions (CI) localement"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158
#, fuzzy
msgid ""
"Developers could run the full set of Github Actions workflows under their"
@@ -2572,7 +2572,7 @@ msgstr ""
"fois installé, exécuter la commande suivante dans le dossier principale "
"de Flower :"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167
msgid ""
"The Flower default workflow would run by setting up the required Docker "
"machines underneath."
@@ -2580,12 +2580,12 @@ msgstr ""
"Le workflow par défaut de Flower sera exécuté en configurant les machines"
" Docker requises en arrière plan."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171
#, fuzzy
msgid "Build Release"
msgstr "Inédit"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173
#, fuzzy
msgid ""
"Flower uses Poetry to build releases. The necessary command is wrapped in"
@@ -2594,7 +2594,7 @@ msgstr ""
"Flower utilise Poetry pour construire les nouvelles versions. La commande"
" nécessaire est comprise dans un script simple ::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180
#, fuzzy
msgid ""
"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the "
@@ -2603,19 +2603,19 @@ msgstr ""
"Les versions résultantes :code:`.whl` et :code:`.tar.gz` seront stockées "
"dans le sous-répertoire:code:`/dist`."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184
#, fuzzy
msgid "Build Documentation"
msgstr "Amélioration de la documentation"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186
msgid ""
"Flower's documentation uses `Sphinx `_. "
"There's no convenience script to re-build the documentation yet, but it's"
" pretty easy:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194
msgid "This will generate HTML documentation in ``doc/build/html``."
msgstr ""
@@ -8329,7 +8329,7 @@ msgstr ""
"l'utilisation de ``start_simulation``)"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:28
-#: ../../source/how-to-upgrade-to-flower-next.rst:120
+#: ../../source/how-to-upgrade-to-flower-next.rst:121
msgid "Required changes"
msgstr "Changements nécessaires"
@@ -8594,7 +8594,7 @@ msgstr ""
"round_timeout=600.0), ...)``"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:121
-#: ../../source/how-to-upgrade-to-flower-next.rst:348
+#: ../../source/how-to-upgrade-to-flower-next.rst:349
msgid "Further help"
msgstr "Aide supplémentaire"
@@ -8685,7 +8685,7 @@ msgid ""
"``pyproject.toml``:"
msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité."
-#: ../../source/how-to-upgrade-to-flower-next.rst:122
+#: ../../source/how-to-upgrade-to-flower-next.rst:123
msgid ""
"In Flower Next, the *infrastructure* and *application layers* have been "
"decoupled. Instead of starting a client in code via ``start_client()``, "
@@ -8698,33 +8698,33 @@ msgid ""
"way:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:131
+#: ../../source/how-to-upgrade-to-flower-next.rst:132
#, fuzzy
msgid "|clientapp_link|_"
msgstr "client"
-#: ../../source/how-to-upgrade-to-flower-next.rst:133
+#: ../../source/how-to-upgrade-to-flower-next.rst:134
msgid ""
"Wrap your existing client with |clientapp_link|_ instead of launching it "
"via |startclient_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:156
+#: ../../source/how-to-upgrade-to-flower-next.rst:157
#, fuzzy
msgid "|serverapp_link|_"
msgstr "serveur"
-#: ../../source/how-to-upgrade-to-flower-next.rst:158
+#: ../../source/how-to-upgrade-to-flower-next.rst:159
msgid ""
"Wrap your existing strategy with |serverapp_link|_ instead of starting "
"the server via |startserver_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:179
+#: ../../source/how-to-upgrade-to-flower-next.rst:180
msgid "Deployment"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:181
+#: ../../source/how-to-upgrade-to-flower-next.rst:182
msgid ""
"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, "
"in sequence, |flowernext_clientapp_link|_ (2x) and "
@@ -8732,13 +8732,13 @@ msgid ""
" `server.py` as Python scripts."
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:184
+#: ../../source/how-to-upgrade-to-flower-next.rst:185
msgid ""
"Here's an example to start the server without HTTPS (only for "
"prototyping):"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:200
+#: ../../source/how-to-upgrade-to-flower-next.rst:201
msgid ""
"Here's another example to start with HTTPS. Use the ``--ssl-ca-"
"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line "
@@ -8746,19 +8746,19 @@ msgid ""
"private key)."
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:228
+#: ../../source/how-to-upgrade-to-flower-next.rst:229
#, fuzzy
msgid "Simulation in CLI"
msgstr "Simulation de moniteur"
-#: ../../source/how-to-upgrade-to-flower-next.rst:230
+#: ../../source/how-to-upgrade-to-flower-next.rst:231
msgid ""
"Wrap your existing client and strategy with |clientapp_link|_ and "
"|serverapp_link|_, respectively. There is no need to use |startsim_link|_"
" anymore. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:263
+#: ../../source/how-to-upgrade-to-flower-next.rst:264
msgid ""
"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / "
"``client_app`` object in the code instead of executing the Python script."
@@ -8766,24 +8766,24 @@ msgid ""
"objects are in a ``sim.py`` module):"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:280
+#: ../../source/how-to-upgrade-to-flower-next.rst:281
msgid ""
"Set default resources for each |clientapp_link|_ using the ``--backend-"
"config`` command line argument instead of setting the "
"``client_resources`` argument in |startsim_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:304
+#: ../../source/how-to-upgrade-to-flower-next.rst:305
msgid "Simulation in a Notebook"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:306
+#: ../../source/how-to-upgrade-to-flower-next.rst:307
msgid ""
"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's "
"an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:350
+#: ../../source/how-to-upgrade-to-flower-next.rst:351
#, fuzzy
msgid ""
"Some official `Flower code examples `_ "
@@ -8801,19 +8801,19 @@ msgstr ""
"Flower `_ et utilise le canal "
"``#questions``."
-#: ../../source/how-to-upgrade-to-flower-next.rst:357
+#: ../../source/how-to-upgrade-to-flower-next.rst:358
#, fuzzy
msgid "Important"
msgstr "Changements importants :"
-#: ../../source/how-to-upgrade-to-flower-next.rst:359
+#: ../../source/how-to-upgrade-to-flower-next.rst:360
msgid ""
"As we continuously enhance Flower Next at a rapid pace, we'll be "
"periodically updating this guide. Please feel free to share any feedback "
"with us!"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:365
+#: ../../source/how-to-upgrade-to-flower-next.rst:366
msgid "Happy migrating! 🚀"
msgstr ""
@@ -28807,7 +28807,7 @@ msgstr ""
"chose d'autre, comme la régression linéaire classique."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41
-msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+msgid "|ac0a9766e26044d6aea222a829859b20|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109
@@ -28826,7 +28826,7 @@ msgstr ""
" Go."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53
-msgid "|d741075f8e624331b42c0746f7d258a0|"
+msgid "|36cd6e248b1443ce8a82b5a025bba368|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111
@@ -28857,7 +28857,7 @@ msgstr ""
"chanson."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67
-msgid "|8fc92d668bcb42b8bda55143847f2329|"
+msgid "|bf4fb057f4774df39e1dcb5c71fd804a|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113
@@ -28878,7 +28878,7 @@ msgstr ""
" données pour la même tâche."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79
-msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+msgid "|71bb9f3c74c04f959b9bc1f02b736c95|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115
@@ -28899,7 +28899,7 @@ msgstr ""
"cloud."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91
-msgid "|77a037b546a84262b608e04bc82a2c96|"
+msgid "|7605632e1b0f49599ffacf841491fcfb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117
@@ -28920,7 +28920,7 @@ msgstr ""
"appuyés."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103
-msgid "|f568e24c9fb0435690ac628210a4be96|"
+msgid "|91b1b5a7d3484eb7a2350c1923f18307|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119
@@ -28945,7 +28945,7 @@ msgstr ""
" sur un serveur centralisé."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138
-msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+msgid "|5405ed430e4746e28b083b146fb71731|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173
@@ -28964,7 +28964,7 @@ msgstr ""
"suffisantes pour former un bon modèle."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150
-msgid "|3f645ad807f84be8b1f8f3267173939c|"
+msgid "|a389e87dab394eb48a8949aa2397687b|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175
@@ -29186,7 +29186,7 @@ msgstr ""
"partir d'un point de contrôle précédemment sauvegardé."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210
-msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+msgid "|89c412136a5146ec8dc32c0973729f12|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307
@@ -29221,7 +29221,7 @@ msgstr ""
"rendements décroissants."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225
-msgid "|edcf9a04d96e42608fd01a333375febe|"
+msgid "|9503d3dc3a144e8aa295f8800cd8a766|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309
@@ -29254,7 +29254,7 @@ msgstr ""
"données locales, ou même de quelques étapes (mini-batchs)."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240
-msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+msgid "|aadb59e29b9e445d8e239d9a8a7045cb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311
@@ -29285,7 +29285,7 @@ msgstr ""
" l'entraînement local."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255
-msgid "|ba178f75267d4ad8aa7363f20709195f|"
+msgid "|a7579ad7734347508e959d9e14f2f53d|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313
@@ -29344,7 +29344,7 @@ msgstr ""
"times as much as each of the 100 examples."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273
-msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+msgid "|73d15dd1d4fc41678b2d54815503fbe8|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315
@@ -29487,7 +29487,7 @@ msgstr ""
"quel cadre de ML et n'importe quel langage de programmation."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334
-msgid "|e7cec00a114b48359935c6510595132e|"
+msgid "|55472eef61274ba1b739408607e109df|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340
@@ -38429,3 +38429,45 @@ msgstr ""
#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|"
#~ msgstr ""
+#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+#~ msgstr ""
+
+#~ msgid "|d741075f8e624331b42c0746f7d258a0|"
+#~ msgstr ""
+
+#~ msgid "|8fc92d668bcb42b8bda55143847f2329|"
+#~ msgstr ""
+
+#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+#~ msgstr ""
+
+#~ msgid "|77a037b546a84262b608e04bc82a2c96|"
+#~ msgstr ""
+
+#~ msgid "|f568e24c9fb0435690ac628210a4be96|"
+#~ msgstr ""
+
+#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+#~ msgstr ""
+
+#~ msgid "|3f645ad807f84be8b1f8f3267173939c|"
+#~ msgstr ""
+
+#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+#~ msgstr ""
+
+#~ msgid "|edcf9a04d96e42608fd01a333375febe|"
+#~ msgstr ""
+
+#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+#~ msgstr ""
+
+#~ msgid "|ba178f75267d4ad8aa7363f20709195f|"
+#~ msgstr ""
+
+#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+#~ msgstr ""
+
+#~ msgid "|e7cec00a114b48359935c6510595132e|"
+#~ msgstr ""
+
diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po
index 4cdf1c565be6..424eaf5f86a2 100644
--- a/doc/locales/ko/LC_MESSAGES/framework-docs.po
+++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Flower main\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2024-09-27 00:30+0000\n"
+"POT-Creation-Date: 2024-10-10 00:29+0000\n"
"PO-Revision-Date: 2024-08-23 13:09+0000\n"
"Last-Translator: Seulki Yun \n"
"Language: ko\n"
@@ -1262,7 +1262,7 @@ msgstr ""
"텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합니다."
#: ../../source/contributor-how-to-write-documentation.rst:10
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196
msgid ""
"Note that, in order to build the documentation locally (with ``poetry run"
" make html``, like described below), `Pandoc "
@@ -2268,23 +2268,23 @@ msgstr ""
#, fuzzy
msgid ""
"If you don't have ``pyenv`` installed, the following script that will "
-"install it, set it up, and create the virtual environment (with ``Python "
-"3.9.20`` by default):"
+"install it, set it up, and create the virtual environment (with "
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 및 가상 환경을 생성합니다(기본적으로 "
":code:`Python 3.9.20` 사용):"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69
#, fuzzy
msgid ""
"If you already have ``pyenv`` installed (along with the ``pyenv-"
"virtualenv`` plugin), you can use the following convenience script (with "
-"``Python 3.9.20`` by default):"
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 함께) 다음과 같은 "
"편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python 3.9.20` 사용):"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77
#, fuzzy
msgid ""
"3. Install the Flower package in development mode (think ``pip install "
@@ -2293,11 +2293,11 @@ msgstr ""
"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다(예:code:`pip install "
"-e`)::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
msgid "Convenience Scripts"
msgstr "편의 스크립트"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87
#, fuzzy
msgid ""
"The Flower repository contains a number of convenience scripts to make "
@@ -2308,27 +2308,27 @@ msgstr ""
"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가지 편의 스크립트가 포함되어 있습니다. 전체 "
"목록은 :code:`/dev` 하위 디렉터리를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92
msgid "Create/Delete Virtual Environment"
msgstr "가상 환경 생성/삭제"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101
msgid "Compile ProtoBuf Definitions"
msgstr "ProtoBuf 정의 컴파일"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108
msgid "Auto-Format Code"
msgstr "자동 포맷 코드"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115
msgid "Run Linters and Tests"
msgstr "린터 및 테스트 실행"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122
msgid "Add a pre-commit hook"
msgstr "사전 커밋 훅 추가"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124
msgid ""
"Developers may integrate a pre-commit hook into their workflow utilizing "
"the `pre-commit `_ library. The pre-"
@@ -2339,45 +2339,45 @@ msgstr ""
" 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및"
" ``./dev/test.sh`` 스크립트."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128
msgid "There are multiple ways developers can use this:"
msgstr "개발자가 이것을 사용할 수 있는 여러가지 방법이 있습니다:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130
msgid "Install the pre-commit hook to your local git directory by simply running:"
msgstr "간단하게 실행하여 로컬 git 디렉터리에 사전 커밋 훅을 설치하세요:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136
msgid ""
"Each ``git commit`` will trigger the execution of formatting and "
"linting/test scripts."
msgstr "각 ``git 커밋``은 포맷 및 린팅/테스트 스크립트의 실행을 트리거합니다."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138
#, fuzzy
msgid ""
"If in a hurry, bypass the hook using ``--no-verify`` with the ``git "
"commit`` command."
msgstr "급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145
msgid ""
"For developers who prefer not to install the hook permanently, it is "
"possible to execute a one-time check prior to committing changes by using"
" the following command:"
msgstr "훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152
msgid ""
"This executes the formatting and linting checks/tests on all the files "
"without modifying the default behavior of ``git commit``."
msgstr "이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 및 린팅 검사/테스트를 실행합니다."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156
msgid "Run Github Actions (CI) locally"
msgstr "로컬에서 Github Action(CI) 실행하기"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158
#, fuzzy
msgid ""
"Developers could run the full set of Github Actions workflows under their"
@@ -2389,35 +2389,35 @@ msgstr ""
"Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아래의 설치 지침을 참조하여 Flower 메인 클론 "
"레포지토리 폴더 아래에서 다음 명령을 실행하세요::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167
msgid ""
"The Flower default workflow would run by setting up the required Docker "
"machines underneath."
msgstr "Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171
msgid "Build Release"
msgstr "릴리즈 빌드"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173
#, fuzzy
msgid ""
"Flower uses Poetry to build releases. The necessary command is wrapped in"
" a simple script:"
msgstr "Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트로 래핑됩니다::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180
#, fuzzy
msgid ""
"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the "
"``/dist`` subdirectory."
msgstr "결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리에 저장됩니다."
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184
msgid "Build Documentation"
msgstr "문서 빌드"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186
#, fuzzy
msgid ""
"Flower's documentation uses `Sphinx `_. "
@@ -2427,7 +2427,7 @@ msgstr ""
"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문서를 다시 작성할"
" 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 있습니다:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194
msgid "This will generate HTML documentation in ``doc/build/html``."
msgstr "그러면 ``doc/build/html``에 HTML 문서가 생성됩니다."
@@ -8042,7 +8042,7 @@ msgstr ""
"(``start_simulation`` 사용 시)"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:28
-#: ../../source/how-to-upgrade-to-flower-next.rst:120
+#: ../../source/how-to-upgrade-to-flower-next.rst:121
msgid "Required changes"
msgstr "필수 변경 사항"
@@ -8292,7 +8292,7 @@ msgstr ""
"...)``"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:121
-#: ../../source/how-to-upgrade-to-flower-next.rst:348
+#: ../../source/how-to-upgrade-to-flower-next.rst:349
msgid "Further help"
msgstr "추가 도움말"
@@ -8379,7 +8379,7 @@ msgid ""
"``pyproject.toml``:"
msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:122
+#: ../../source/how-to-upgrade-to-flower-next.rst:123
msgid ""
"In Flower Next, the *infrastructure* and *application layers* have been "
"decoupled. Instead of starting a client in code via ``start_client()``, "
@@ -8398,11 +8398,11 @@ msgstr ""
"업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 "
"사항은 다음과 같습니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:131
+#: ../../source/how-to-upgrade-to-flower-next.rst:132
msgid "|clientapp_link|_"
msgstr "|clientapp_link|_"
-#: ../../source/how-to-upgrade-to-flower-next.rst:133
+#: ../../source/how-to-upgrade-to-flower-next.rst:134
msgid ""
"Wrap your existing client with |clientapp_link|_ instead of launching it "
"via |startclient_link|_. Here's an example:"
@@ -8410,11 +8410,11 @@ msgstr ""
"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 "
"예시입니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:156
+#: ../../source/how-to-upgrade-to-flower-next.rst:157
msgid "|serverapp_link|_"
msgstr "|serverapp_link|_"
-#: ../../source/how-to-upgrade-to-flower-next.rst:158
+#: ../../source/how-to-upgrade-to-flower-next.rst:159
msgid ""
"Wrap your existing strategy with |serverapp_link|_ instead of starting "
"the server via |startserver_link|_. Here's an example:"
@@ -8422,11 +8422,11 @@ msgstr ""
"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 "
"래핑하세요. 다음은 예시입니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:179
+#: ../../source/how-to-upgrade-to-flower-next.rst:180
msgid "Deployment"
msgstr "배포"
-#: ../../source/how-to-upgrade-to-flower-next.rst:181
+#: ../../source/how-to-upgrade-to-flower-next.rst:182
msgid ""
"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, "
"in sequence, |flowernext_clientapp_link|_ (2x) and "
@@ -8437,13 +8437,13 @@ msgstr ""
"|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 "
"실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다."
-#: ../../source/how-to-upgrade-to-flower-next.rst:184
+#: ../../source/how-to-upgrade-to-flower-next.rst:185
msgid ""
"Here's an example to start the server without HTTPS (only for "
"prototyping):"
msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):"
-#: ../../source/how-to-upgrade-to-flower-next.rst:200
+#: ../../source/how-to-upgrade-to-flower-next.rst:201
msgid ""
"Here's another example to start with HTTPS. Use the ``--ssl-ca-"
"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line "
@@ -8453,11 +8453,11 @@ msgstr ""
"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, "
"'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다."
-#: ../../source/how-to-upgrade-to-flower-next.rst:228
+#: ../../source/how-to-upgrade-to-flower-next.rst:229
msgid "Simulation in CLI"
msgstr "CLI 시뮬레이션"
-#: ../../source/how-to-upgrade-to-flower-next.rst:230
+#: ../../source/how-to-upgrade-to-flower-next.rst:231
msgid ""
"Wrap your existing client and strategy with |clientapp_link|_ and "
"|serverapp_link|_, respectively. There is no need to use |startsim_link|_"
@@ -8466,7 +8466,7 @@ msgstr ""
"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 "
"|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:263
+#: ../../source/how-to-upgrade-to-flower-next.rst:264
msgid ""
"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / "
"``client_app`` object in the code instead of executing the Python script."
@@ -8477,7 +8477,7 @@ msgstr ""
"``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 "
"``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):"
-#: ../../source/how-to-upgrade-to-flower-next.rst:280
+#: ../../source/how-to-upgrade-to-flower-next.rst:281
msgid ""
"Set default resources for each |clientapp_link|_ using the ``--backend-"
"config`` command line argument instead of setting the "
@@ -8486,17 +8486,17 @@ msgstr ""
"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` "
"명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:304
+#: ../../source/how-to-upgrade-to-flower-next.rst:305
msgid "Simulation in a Notebook"
msgstr "Notebook에서 시뮬레이션"
-#: ../../source/how-to-upgrade-to-flower-next.rst:306
+#: ../../source/how-to-upgrade-to-flower-next.rst:307
msgid ""
"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's "
"an example:"
msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:350
+#: ../../source/how-to-upgrade-to-flower-next.rst:351
msgid ""
"Some official `Flower code examples `_ "
"are already updated to Flower Next so they can serve as a reference for "
@@ -8512,11 +8512,11 @@ msgstr ""
"``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른"
" 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다."
-#: ../../source/how-to-upgrade-to-flower-next.rst:357
+#: ../../source/how-to-upgrade-to-flower-next.rst:358
msgid "Important"
msgstr "중요"
-#: ../../source/how-to-upgrade-to-flower-next.rst:359
+#: ../../source/how-to-upgrade-to-flower-next.rst:360
msgid ""
"As we continuously enhance Flower Next at a rapid pace, we'll be "
"periodically updating this guide. Please feel free to share any feedback "
@@ -8525,7 +8525,7 @@ msgstr ""
"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 "
"언제든지 공유해 주세요!"
-#: ../../source/how-to-upgrade-to-flower-next.rst:365
+#: ../../source/how-to-upgrade-to-flower-next.rst:366
msgid "Happy migrating! 🚀"
msgstr "행복한 마이그레이션! 🚀"
@@ -25765,7 +25765,7 @@ msgstr ""
" 수도 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41
-msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+msgid "|ac0a9766e26044d6aea222a829859b20|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109
@@ -25782,7 +25782,7 @@ msgstr ""
" 바둑과 같은 게임을 하는 것일 수 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53
-msgid "|d741075f8e624331b42c0746f7d258a0|"
+msgid "|36cd6e248b1443ce8a82b5a025bba368|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111
@@ -25805,7 +25805,7 @@ msgstr ""
"부르리는 것을 듣는 스마트 스피커에서 비롯됩니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67
-msgid "|8fc92d668bcb42b8bda55143847f2329|"
+msgid "|bf4fb057f4774df39e1dcb5c71fd804a|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113
@@ -25823,7 +25823,7 @@ msgstr ""
"있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79
-msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+msgid "|71bb9f3c74c04f959b9bc1f02b736c95|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115
@@ -25841,7 +25841,7 @@ msgstr ""
"서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91
-msgid "|77a037b546a84262b608e04bc82a2c96|"
+msgid "|7605632e1b0f49599ffacf841491fcfb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117
@@ -25858,7 +25858,7 @@ msgstr ""
" 우리가 기본적으로 사용해 온 머신러닝 방법입니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103
-msgid "|f568e24c9fb0435690ac628210a4be96|"
+msgid "|91b1b5a7d3484eb7a2350c1923f18307|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119
@@ -25880,7 +25880,7 @@ msgstr ""
"트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138
-msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+msgid "|5405ed430e4746e28b083b146fb71731|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173
@@ -25897,7 +25897,7 @@ msgstr ""
"좋은 모델을 훈련하기에 충분하지 않을 수 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150
-msgid "|3f645ad807f84be8b1f8f3267173939c|"
+msgid "|a389e87dab394eb48a8949aa2397687b|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175
@@ -26064,7 +26064,7 @@ msgstr ""
"체크포인트에서 모델 매개변수를 초기화합니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210
-msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+msgid "|89c412136a5146ec8dc32c0973729f12|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307
@@ -26091,7 +26091,7 @@ msgstr ""
"개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225
-msgid "|edcf9a04d96e42608fd01a333375febe|"
+msgid "|9503d3dc3a144e8aa295f8800cd8a766|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309
@@ -26118,7 +26118,7 @@ msgstr ""
"데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240
-msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+msgid "|aadb59e29b9e445d8e239d9a8a7045cb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311
@@ -26144,7 +26144,7 @@ msgstr ""
"보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255
-msgid "|ba178f75267d4ad8aa7363f20709195f|"
+msgid "|a7579ad7734347508e959d9e14f2f53d|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313
@@ -26193,7 +26193,7 @@ msgstr ""
"많은 영향을 미칩니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273
-msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+msgid "|73d15dd1d4fc41678b2d54815503fbe8|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315
@@ -26312,7 +26312,7 @@ msgstr ""
"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다."
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334
-msgid "|e7cec00a114b48359935c6510595132e|"
+msgid "|55472eef61274ba1b739408607e109df|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340
@@ -29256,3 +29256,45 @@ msgstr ""
#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|"
#~ msgstr ""
+#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+#~ msgstr ""
+
+#~ msgid "|d741075f8e624331b42c0746f7d258a0|"
+#~ msgstr ""
+
+#~ msgid "|8fc92d668bcb42b8bda55143847f2329|"
+#~ msgstr ""
+
+#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+#~ msgstr ""
+
+#~ msgid "|77a037b546a84262b608e04bc82a2c96|"
+#~ msgstr ""
+
+#~ msgid "|f568e24c9fb0435690ac628210a4be96|"
+#~ msgstr ""
+
+#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+#~ msgstr ""
+
+#~ msgid "|3f645ad807f84be8b1f8f3267173939c|"
+#~ msgstr ""
+
+#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+#~ msgstr ""
+
+#~ msgid "|edcf9a04d96e42608fd01a333375febe|"
+#~ msgstr ""
+
+#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+#~ msgstr ""
+
+#~ msgid "|ba178f75267d4ad8aa7363f20709195f|"
+#~ msgstr ""
+
+#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+#~ msgstr ""
+
+#~ msgid "|e7cec00a114b48359935c6510595132e|"
+#~ msgstr ""
+
diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po
index 9c7a59d09008..393c04bb0b13 100644
--- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po
+++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Flower main\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2024-09-27 00:30+0000\n"
+"POT-Creation-Date: 2024-10-10 00:29+0000\n"
"PO-Revision-Date: 2024-05-25 11:09+0000\n"
"Last-Translator: Gustavo Bertoli \n"
"Language: pt_BR\n"
@@ -1214,7 +1214,7 @@ msgid ""
msgstr ""
#: ../../source/contributor-how-to-write-documentation.rst:10
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196
msgid ""
"Note that, in order to build the documentation locally (with ``poetry run"
" make html``, like described below), `Pandoc "
@@ -2121,28 +2121,28 @@ msgstr ""
#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61
msgid ""
"If you don't have ``pyenv`` installed, the following script that will "
-"install it, set it up, and create the virtual environment (with ``Python "
-"3.9.20`` by default):"
+"install it, set it up, and create the virtual environment (with "
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69
msgid ""
"If you already have ``pyenv`` installed (along with the ``pyenv-"
"virtualenv`` plugin), you can use the following convenience script (with "
-"``Python 3.9.20`` by default):"
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77
msgid ""
"3. Install the Flower package in development mode (think ``pip install "
"-e``) along with all necessary dependencies:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
msgid "Convenience Scripts"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87
msgid ""
"The Flower repository contains a number of convenience scripts to make "
"recurring development tasks easier and less error-prone. See the ``/dev``"
@@ -2150,27 +2150,27 @@ msgid ""
" important ones:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92
msgid "Create/Delete Virtual Environment"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101
msgid "Compile ProtoBuf Definitions"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108
msgid "Auto-Format Code"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115
msgid "Run Linters and Tests"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122
msgid "Add a pre-commit hook"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124
msgid ""
"Developers may integrate a pre-commit hook into their workflow utilizing "
"the `pre-commit `_ library. The pre-"
@@ -2178,44 +2178,44 @@ msgid ""
"``./dev/format.sh`` and ``./dev/test.sh`` scripts."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128
msgid "There are multiple ways developers can use this:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130
msgid "Install the pre-commit hook to your local git directory by simply running:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136
msgid ""
"Each ``git commit`` will trigger the execution of formatting and "
"linting/test scripts."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138
msgid ""
"If in a hurry, bypass the hook using ``--no-verify`` with the ``git "
"commit`` command."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145
msgid ""
"For developers who prefer not to install the hook permanently, it is "
"possible to execute a one-time check prior to committing changes by using"
" the following command:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152
msgid ""
"This executes the formatting and linting checks/tests on all the files "
"without modifying the default behavior of ``git commit``."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156
msgid "Run Github Actions (CI) locally"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158
msgid ""
"Developers could run the full set of Github Actions workflows under their"
" local environment by using `Act `_. "
@@ -2223,40 +2223,40 @@ msgid ""
" and run the next command under Flower main cloned repository folder:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167
msgid ""
"The Flower default workflow would run by setting up the required Docker "
"machines underneath."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171
msgid "Build Release"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173
msgid ""
"Flower uses Poetry to build releases. The necessary command is wrapped in"
" a simple script:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180
msgid ""
"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the "
"``/dist`` subdirectory."
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184
msgid "Build Documentation"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186
msgid ""
"Flower's documentation uses `Sphinx `_. "
"There's no convenience script to re-build the documentation yet, but it's"
" pretty easy:"
msgstr ""
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194
msgid "This will generate HTML documentation in ``doc/build/html``."
msgstr ""
@@ -7074,7 +7074,7 @@ msgid ""
msgstr ""
#: ../../source/how-to-upgrade-to-flower-1.0.rst:28
-#: ../../source/how-to-upgrade-to-flower-next.rst:120
+#: ../../source/how-to-upgrade-to-flower-next.rst:121
msgid "Required changes"
msgstr ""
@@ -7278,7 +7278,7 @@ msgid ""
msgstr ""
#: ../../source/how-to-upgrade-to-flower-1.0.rst:121
-#: ../../source/how-to-upgrade-to-flower-next.rst:348
+#: ../../source/how-to-upgrade-to-flower-next.rst:349
msgid "Further help"
msgstr ""
@@ -7353,7 +7353,7 @@ msgid ""
"``pyproject.toml``:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:122
+#: ../../source/how-to-upgrade-to-flower-next.rst:123
msgid ""
"In Flower Next, the *infrastructure* and *application layers* have been "
"decoupled. Instead of starting a client in code via ``start_client()``, "
@@ -7366,31 +7366,31 @@ msgid ""
"way:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:131
+#: ../../source/how-to-upgrade-to-flower-next.rst:132
msgid "|clientapp_link|_"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:133
+#: ../../source/how-to-upgrade-to-flower-next.rst:134
msgid ""
"Wrap your existing client with |clientapp_link|_ instead of launching it "
"via |startclient_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:156
+#: ../../source/how-to-upgrade-to-flower-next.rst:157
msgid "|serverapp_link|_"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:158
+#: ../../source/how-to-upgrade-to-flower-next.rst:159
msgid ""
"Wrap your existing strategy with |serverapp_link|_ instead of starting "
"the server via |startserver_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:179
+#: ../../source/how-to-upgrade-to-flower-next.rst:180
msgid "Deployment"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:181
+#: ../../source/how-to-upgrade-to-flower-next.rst:182
msgid ""
"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, "
"in sequence, |flowernext_clientapp_link|_ (2x) and "
@@ -7398,13 +7398,13 @@ msgid ""
" `server.py` as Python scripts."
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:184
+#: ../../source/how-to-upgrade-to-flower-next.rst:185
msgid ""
"Here's an example to start the server without HTTPS (only for "
"prototyping):"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:200
+#: ../../source/how-to-upgrade-to-flower-next.rst:201
msgid ""
"Here's another example to start with HTTPS. Use the ``--ssl-ca-"
"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line "
@@ -7412,18 +7412,18 @@ msgid ""
"private key)."
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:228
+#: ../../source/how-to-upgrade-to-flower-next.rst:229
msgid "Simulation in CLI"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:230
+#: ../../source/how-to-upgrade-to-flower-next.rst:231
msgid ""
"Wrap your existing client and strategy with |clientapp_link|_ and "
"|serverapp_link|_, respectively. There is no need to use |startsim_link|_"
" anymore. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:263
+#: ../../source/how-to-upgrade-to-flower-next.rst:264
msgid ""
"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / "
"``client_app`` object in the code instead of executing the Python script."
@@ -7431,24 +7431,24 @@ msgid ""
"objects are in a ``sim.py`` module):"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:280
+#: ../../source/how-to-upgrade-to-flower-next.rst:281
msgid ""
"Set default resources for each |clientapp_link|_ using the ``--backend-"
"config`` command line argument instead of setting the "
"``client_resources`` argument in |startsim_link|_. Here's an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:304
+#: ../../source/how-to-upgrade-to-flower-next.rst:305
msgid "Simulation in a Notebook"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:306
+#: ../../source/how-to-upgrade-to-flower-next.rst:307
msgid ""
"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's "
"an example:"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:350
+#: ../../source/how-to-upgrade-to-flower-next.rst:351
msgid ""
"Some official `Flower code examples `_ "
"are already updated to Flower Next so they can serve as a reference for "
@@ -7459,18 +7459,18 @@ msgid ""
" or share and learn from others about migrating to Flower Next."
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:357
+#: ../../source/how-to-upgrade-to-flower-next.rst:358
msgid "Important"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:359
+#: ../../source/how-to-upgrade-to-flower-next.rst:360
msgid ""
"As we continuously enhance Flower Next at a rapid pace, we'll be "
"periodically updating this guide. Please feel free to share any feedback "
"with us!"
msgstr ""
-#: ../../source/how-to-upgrade-to-flower-next.rst:365
+#: ../../source/how-to-upgrade-to-flower-next.rst:366
msgid "Happy migrating! 🚀"
msgstr ""
@@ -24164,7 +24164,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41
-msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+msgid "|ac0a9766e26044d6aea222a829859b20|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109
@@ -24179,7 +24179,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53
-msgid "|d741075f8e624331b42c0746f7d258a0|"
+msgid "|36cd6e248b1443ce8a82b5a025bba368|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111
@@ -24200,7 +24200,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67
-msgid "|8fc92d668bcb42b8bda55143847f2329|"
+msgid "|bf4fb057f4774df39e1dcb5c71fd804a|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113
@@ -24216,7 +24216,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79
-msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+msgid "|71bb9f3c74c04f959b9bc1f02b736c95|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115
@@ -24232,7 +24232,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91
-msgid "|77a037b546a84262b608e04bc82a2c96|"
+msgid "|7605632e1b0f49599ffacf841491fcfb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117
@@ -24247,7 +24247,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103
-msgid "|f568e24c9fb0435690ac628210a4be96|"
+msgid "|91b1b5a7d3484eb7a2350c1923f18307|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119
@@ -24267,7 +24267,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138
-msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+msgid "|5405ed430e4746e28b083b146fb71731|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173
@@ -24282,7 +24282,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150
-msgid "|3f645ad807f84be8b1f8f3267173939c|"
+msgid "|a389e87dab394eb48a8949aa2397687b|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175
@@ -24422,7 +24422,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210
-msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+msgid "|89c412136a5146ec8dc32c0973729f12|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307
@@ -24446,7 +24446,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225
-msgid "|edcf9a04d96e42608fd01a333375febe|"
+msgid "|9503d3dc3a144e8aa295f8800cd8a766|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309
@@ -24470,7 +24470,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240
-msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+msgid "|aadb59e29b9e445d8e239d9a8a7045cb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311
@@ -24493,7 +24493,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255
-msgid "|ba178f75267d4ad8aa7363f20709195f|"
+msgid "|a7579ad7734347508e959d9e14f2f53d|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313
@@ -24531,7 +24531,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273
-msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+msgid "|73d15dd1d4fc41678b2d54815503fbe8|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315
@@ -24625,7 +24625,7 @@ msgid ""
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334
-msgid "|e7cec00a114b48359935c6510595132e|"
+msgid "|55472eef61274ba1b739408607e109df|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340
@@ -32706,3 +32706,61 @@ msgstr ""
#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|"
#~ msgstr ""
+#~ msgid ""
+#~ "If you don't have ``pyenv`` installed,"
+#~ " the following script that will "
+#~ "install it, set it up, and create"
+#~ " the virtual environment (with ``Python "
+#~ "3.9.20`` by default):"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "If you already have ``pyenv`` installed"
+#~ " (along with the ``pyenv-virtualenv`` "
+#~ "plugin), you can use the following "
+#~ "convenience script (with ``Python 3.9.20`` "
+#~ "by default):"
+#~ msgstr ""
+
+#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+#~ msgstr ""
+
+#~ msgid "|d741075f8e624331b42c0746f7d258a0|"
+#~ msgstr ""
+
+#~ msgid "|8fc92d668bcb42b8bda55143847f2329|"
+#~ msgstr ""
+
+#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+#~ msgstr ""
+
+#~ msgid "|77a037b546a84262b608e04bc82a2c96|"
+#~ msgstr ""
+
+#~ msgid "|f568e24c9fb0435690ac628210a4be96|"
+#~ msgstr ""
+
+#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+#~ msgstr ""
+
+#~ msgid "|3f645ad807f84be8b1f8f3267173939c|"
+#~ msgstr ""
+
+#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+#~ msgstr ""
+
+#~ msgid "|edcf9a04d96e42608fd01a333375febe|"
+#~ msgstr ""
+
+#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+#~ msgstr ""
+
+#~ msgid "|ba178f75267d4ad8aa7363f20709195f|"
+#~ msgstr ""
+
+#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+#~ msgstr ""
+
+#~ msgid "|e7cec00a114b48359935c6510595132e|"
+#~ msgstr ""
+
diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po
index ccf319df4629..a1598faa0ee4 100644
--- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po
+++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Flower main\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2024-09-27 00:30+0000\n"
+"POT-Creation-Date: 2024-10-10 00:29+0000\n"
"PO-Revision-Date: 2024-06-12 10:09+0000\n"
"Last-Translator: Yan Gao \n"
"Language: zh_Hans\n"
@@ -1271,7 +1271,7 @@ msgstr ""
"Markdown(``.md`` 文件)。"
#: ../../source/contributor-how-to-write-documentation.rst:10
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196
#, fuzzy
msgid ""
"Note that, in order to build the documentation locally (with ``poetry run"
@@ -2291,34 +2291,34 @@ msgstr ""
#, fuzzy
msgid ""
"If you don't have ``pyenv`` installed, the following script that will "
-"install it, set it up, and create the virtual environment (with ``Python "
-"3.9.20`` by default):"
+"install it, set it up, and create the virtual environment (with "
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python "
"3.9.20)::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69
#, fuzzy
msgid ""
"If you already have ``pyenv`` installed (along with the ``pyenv-"
"virtualenv`` plugin), you can use the following convenience script (with "
-"``Python 3.9.20`` by default):"
+":substitution-code:`Python |python_full_version|` by default):"
msgstr ""
"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python "
"3.9.20)::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77
#, fuzzy
msgid ""
"3. Install the Flower package in development mode (think ``pip install "
"-e``) along with all necessary dependencies:"
msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
msgid "Convenience Scripts"
msgstr "便捷脚本"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87
#, fuzzy
msgid ""
"The Flower repository contains a number of convenience scripts to make "
@@ -2327,28 +2327,28 @@ msgid ""
" important ones:"
msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92
msgid "Create/Delete Virtual Environment"
msgstr "创建/删除虚拟环境"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101
msgid "Compile ProtoBuf Definitions"
msgstr "编译 ProtoBuf 定义"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108
msgid "Auto-Format Code"
msgstr "自动格式化代码"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115
msgid "Run Linters and Tests"
msgstr "运行分类器和测试"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122
#, fuzzy
msgid "Add a pre-commit hook"
msgstr "添加预先提交钩子"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124
#, fuzzy
msgid ""
"Developers may integrate a pre-commit hook into their workflow utilizing "
@@ -2360,31 +2360,31 @@ msgstr ""
"库将预提交钩子集成到工作流程中。预提交钩子被配置为执行两个主要操作: `./dev/format.sh`` 和 ``./dev/test.sh``"
" 脚本。"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128
#, fuzzy
msgid "There are multiple ways developers can use this:"
msgstr "开发人员可以通过多种方式使用它:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130
#, fuzzy
msgid "Install the pre-commit hook to your local git directory by simply running:"
msgstr "在本地 git 目录中安装预提交钩子,只需运行"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136
#, fuzzy
msgid ""
"Each ``git commit`` will trigger the execution of formatting and "
"linting/test scripts."
msgstr "每次 \"git 提交 \"都会触发格式化和内核/测试脚本的执行。"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138
#, fuzzy
msgid ""
"If in a hurry, bypass the hook using ``--no-verify`` with the ``git "
"commit`` command."
msgstr "如果赶时间,可使用 ``--no-verify`` 和 ``git commit` 命令绕过钩子:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145
#, fuzzy
msgid ""
"For developers who prefer not to install the hook permanently, it is "
@@ -2392,18 +2392,18 @@ msgid ""
" the following command:"
msgstr "对于不想永久安装钩子的开发人员,可以使用以下命令在提交更改之前执行一次性检查:"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152
#, fuzzy
msgid ""
"This executes the formatting and linting checks/tests on all the files "
"without modifying the default behavior of ``git commit``."
msgstr "这将在不修改 ``git commit`` 默认行为的情况下对所有文件执行格式化和词排检查/测试。"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156
msgid "Run Github Actions (CI) locally"
msgstr "在本地运行 Github 操作 (CI)"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158
#, fuzzy
msgid ""
"Developers could run the full set of Github Actions workflows under their"
@@ -2414,35 +2414,35 @@ msgstr ""
"开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions"
" 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167
msgid ""
"The Flower default workflow would run by setting up the required Docker "
"machines underneath."
msgstr "Flower 默认工作流程将通过在下面设置所需的 Docker 机器来运行。"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171
msgid "Build Release"
msgstr "版本发布"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173
#, fuzzy
msgid ""
"Flower uses Poetry to build releases. The necessary command is wrapped in"
" a simple script:"
msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180
#, fuzzy
msgid ""
"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the "
"``/dist`` subdirectory."
msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184
msgid "Build Documentation"
msgstr "构建文档"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186
#, fuzzy
msgid ""
"Flower's documentation uses `Sphinx `_. "
@@ -2452,7 +2452,7 @@ msgstr ""
"Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::"
-#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191
+#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194
msgid "This will generate HTML documentation in ``doc/build/html``."
msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。"
@@ -7926,7 +7926,7 @@ msgstr ""
"}``(当使用``start_simulation``时)"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:28
-#: ../../source/how-to-upgrade-to-flower-next.rst:120
+#: ../../source/how-to-upgrade-to-flower-next.rst:121
msgid "Required changes"
msgstr "所需变更"
@@ -8170,7 +8170,7 @@ msgstr ""
"...)``"
#: ../../source/how-to-upgrade-to-flower-1.0.rst:121
-#: ../../source/how-to-upgrade-to-flower-next.rst:348
+#: ../../source/how-to-upgrade-to-flower-next.rst:349
msgid "Further help"
msgstr "更多帮助"
@@ -8265,7 +8265,7 @@ msgid ""
"``pyproject.toml``:"
msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。"
-#: ../../source/how-to-upgrade-to-flower-next.rst:122
+#: ../../source/how-to-upgrade-to-flower-next.rst:123
#, fuzzy
msgid ""
"In Flower Next, the *infrastructure* and *application layers* have been "
@@ -8284,36 +8284,36 @@ msgstr ""
"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和"
" Flower Next 方式运行项目:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:131
+#: ../../source/how-to-upgrade-to-flower-next.rst:132
#, fuzzy
msgid "|clientapp_link|_"
msgstr "客户端"
-#: ../../source/how-to-upgrade-to-flower-next.rst:133
+#: ../../source/how-to-upgrade-to-flower-next.rst:134
#, fuzzy
msgid ""
"Wrap your existing client with |clientapp_link|_ instead of launching it "
"via |startclient_link|_. Here's an example:"
msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:156
+#: ../../source/how-to-upgrade-to-flower-next.rst:157
#, fuzzy
msgid "|serverapp_link|_"
msgstr "服务器"
-#: ../../source/how-to-upgrade-to-flower-next.rst:158
+#: ../../source/how-to-upgrade-to-flower-next.rst:159
#, fuzzy
msgid ""
"Wrap your existing strategy with |serverapp_link|_ instead of starting "
"the server via |startserver_link|_. Here's an example:"
msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:179
+#: ../../source/how-to-upgrade-to-flower-next.rst:180
#, fuzzy
msgid "Deployment"
msgstr "调配"
-#: ../../source/how-to-upgrade-to-flower-next.rst:181
+#: ../../source/how-to-upgrade-to-flower-next.rst:182
#, fuzzy
msgid ""
"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, "
@@ -8325,14 +8325,14 @@ msgstr ""
"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 "
"`server.py` 作为 Python 脚本执行。"
-#: ../../source/how-to-upgrade-to-flower-next.rst:184
+#: ../../source/how-to-upgrade-to-flower-next.rst:185
#, fuzzy
msgid ""
"Here's an example to start the server without HTTPS (only for "
"prototyping):"
msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):"
-#: ../../source/how-to-upgrade-to-flower-next.rst:200
+#: ../../source/how-to-upgrade-to-flower-next.rst:201
#, fuzzy
msgid ""
"Here's another example to start with HTTPS. Use the ``--ssl-ca-"
@@ -8341,12 +8341,12 @@ msgid ""
"private key)."
msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。"
-#: ../../source/how-to-upgrade-to-flower-next.rst:228
+#: ../../source/how-to-upgrade-to-flower-next.rst:229
#, fuzzy
msgid "Simulation in CLI"
msgstr "运行模拟"
-#: ../../source/how-to-upgrade-to-flower-next.rst:230
+#: ../../source/how-to-upgrade-to-flower-next.rst:231
#, fuzzy
msgid ""
"Wrap your existing client and strategy with |clientapp_link|_ and "
@@ -8356,7 +8356,7 @@ msgstr ""
"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 "
"|startsim_link|_。下面是一个示例:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:263
+#: ../../source/how-to-upgrade-to-flower-next.rst:264
#, fuzzy
msgid ""
"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / "
@@ -8368,7 +8368,7 @@ msgstr ""
"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 "
"`client_app`` 对象位于 `sim.py`` 模块中):"
-#: ../../source/how-to-upgrade-to-flower-next.rst:280
+#: ../../source/how-to-upgrade-to-flower-next.rst:281
#, fuzzy
msgid ""
"Set default resources for each |clientapp_link|_ using the ``--backend-"
@@ -8378,19 +8378,19 @@ msgstr ""
"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 "
"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:304
+#: ../../source/how-to-upgrade-to-flower-next.rst:305
#, fuzzy
msgid "Simulation in a Notebook"
msgstr "笔记本中的模拟"
-#: ../../source/how-to-upgrade-to-flower-next.rst:306
+#: ../../source/how-to-upgrade-to-flower-next.rst:307
#, fuzzy
msgid ""
"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's "
"an example:"
msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:350
+#: ../../source/how-to-upgrade-to-flower-next.rst:351
#, fuzzy
msgid ""
"Some official `Flower code examples `_ "
@@ -8405,12 +8405,12 @@ msgstr ""
" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack "
"`_ 并使用 \"#questions``\"。"
-#: ../../source/how-to-upgrade-to-flower-next.rst:357
+#: ../../source/how-to-upgrade-to-flower-next.rst:358
#, fuzzy
msgid "Important"
msgstr "重要变更:"
-#: ../../source/how-to-upgrade-to-flower-next.rst:359
+#: ../../source/how-to-upgrade-to-flower-next.rst:360
#, fuzzy
msgid ""
"As we continuously enhance Flower Next at a rapid pace, we'll be "
@@ -8418,7 +8418,7 @@ msgid ""
"with us!"
msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!"
-#: ../../source/how-to-upgrade-to-flower-next.rst:365
+#: ../../source/how-to-upgrade-to-flower-next.rst:366
#, fuzzy
msgid "Happy migrating! 🚀"
msgstr "移民愉快!🚀"
@@ -29363,7 +29363,7 @@ msgid ""
msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41
-msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+msgid "|ac0a9766e26044d6aea222a829859b20|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109
@@ -29378,7 +29378,7 @@ msgid ""
msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53
-msgid "|d741075f8e624331b42c0746f7d258a0|"
+msgid "|36cd6e248b1443ce8a82b5a025bba368|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111
@@ -29399,7 +29399,7 @@ msgid ""
msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67
-msgid "|8fc92d668bcb42b8bda55143847f2329|"
+msgid "|bf4fb057f4774df39e1dcb5c71fd804a|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113
@@ -29417,7 +29417,7 @@ msgstr ""
"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79
-msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+msgid "|71bb9f3c74c04f959b9bc1f02b736c95|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115
@@ -29433,7 +29433,7 @@ msgid ""
msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91
-msgid "|77a037b546a84262b608e04bc82a2c96|"
+msgid "|7605632e1b0f49599ffacf841491fcfb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117
@@ -29448,7 +29448,7 @@ msgid ""
msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103
-msgid "|f568e24c9fb0435690ac628210a4be96|"
+msgid "|91b1b5a7d3484eb7a2350c1923f18307|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119
@@ -29468,7 +29468,7 @@ msgid ""
msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138
-msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+msgid "|5405ed430e4746e28b083b146fb71731|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173
@@ -29483,7 +29483,7 @@ msgid ""
msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150
-msgid "|3f645ad807f84be8b1f8f3267173939c|"
+msgid "|a389e87dab394eb48a8949aa2397687b|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175
@@ -29631,7 +29631,7 @@ msgid ""
msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210
-msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+msgid "|89c412136a5146ec8dc32c0973729f12|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307
@@ -29655,7 +29655,7 @@ msgid ""
msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225
-msgid "|edcf9a04d96e42608fd01a333375febe|"
+msgid "|9503d3dc3a144e8aa295f8800cd8a766|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309
@@ -29681,7 +29681,7 @@ msgstr ""
"(mini-batches)。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240
-msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+msgid "|aadb59e29b9e445d8e239d9a8a7045cb|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311
@@ -29704,7 +29704,7 @@ msgid ""
msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255
-msgid "|ba178f75267d4ad8aa7363f20709195f|"
+msgid "|a7579ad7734347508e959d9e14f2f53d|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313
@@ -29750,7 +29750,7 @@ msgstr ""
" 100 个示例的 10 倍。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273
-msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+msgid "|73d15dd1d4fc41678b2d54815503fbe8|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315
@@ -29857,7 +29857,7 @@ msgstr ""
"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。"
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334
-msgid "|e7cec00a114b48359935c6510595132e|"
+msgid "|55472eef61274ba1b739408607e109df|"
msgstr ""
#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340
@@ -33902,3 +33902,45 @@ msgstr ""
#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|"
#~ msgstr ""
+#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|"
+#~ msgstr ""
+
+#~ msgid "|d741075f8e624331b42c0746f7d258a0|"
+#~ msgstr ""
+
+#~ msgid "|8fc92d668bcb42b8bda55143847f2329|"
+#~ msgstr ""
+
+#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|"
+#~ msgstr ""
+
+#~ msgid "|77a037b546a84262b608e04bc82a2c96|"
+#~ msgstr ""
+
+#~ msgid "|f568e24c9fb0435690ac628210a4be96|"
+#~ msgstr ""
+
+#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|"
+#~ msgstr ""
+
+#~ msgid "|3f645ad807f84be8b1f8f3267173939c|"
+#~ msgstr ""
+
+#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|"
+#~ msgstr ""
+
+#~ msgid "|edcf9a04d96e42608fd01a333375febe|"
+#~ msgstr ""
+
+#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|"
+#~ msgstr ""
+
+#~ msgid "|ba178f75267d4ad8aa7363f20709195f|"
+#~ msgstr ""
+
+#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|"
+#~ msgstr ""
+
+#~ msgid "|e7cec00a114b48359935c6510595132e|"
+#~ msgstr ""
+
diff --git a/doc/source/conf.py b/doc/source/conf.py
index d78aeda0d48e..6111a972218f 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -90,14 +90,16 @@
author = "The Flower Authors"
# The full version of the next release, including alpha/beta/rc tags
-release = "1.12.0"
+release = "1.13.0"
# The current released version
rst_prolog = """
-.. |stable_flwr_version| replace:: 1.11.1
+.. |stable_flwr_version| replace:: 1.12.0
.. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c
.. |ubuntu_version| replace:: 24.04
.. |setuptools_version| replace:: 70.3.0
.. |pip_version| replace:: 24.1.2
+.. |python_version| replace:: 3.9
+.. |python_full_version| replace:: 3.9.20
"""
# -- General configuration ---------------------------------------------------
diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst
index 60b3ebdef743..11b0d3760d4a 100644
--- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst
+++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst
@@ -59,14 +59,16 @@ Create Flower Dev Environment
last point where all packages are installed.
- If you don't have ``pyenv`` installed, the following script that will install it, set
- it up, and create the virtual environment (with ``Python 3.9.20`` by default):
+ it up, and create the virtual environment (with :substitution-code:`Python
+ |python_full_version|` by default):
::
$ ./dev/setup-defaults.sh # once completed, run the bootstrap script
- If you already have ``pyenv`` installed (along with the ``pyenv-virtualenv`` plugin),
- you can use the following convenience script (with ``Python 3.9.20`` by default):
+ you can use the following convenience script (with :substitution-code:`Python
+ |python_full_version|` by default):
::
@@ -89,10 +91,11 @@ list. The following scripts are amongst the most important ones:
Create/Delete Virtual Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-::
+.. code-block:: shell
+ :substitutions:
- $ ./dev/venv-create.sh # Default is 3.9.20
- $ ./dev/venv-delete.sh # Default is 3.9.20
+ $ ./dev/venv-create.sh # Default is |python_full_version|
+ $ ./dev/venv-delete.sh # Default is |python_full_version|
Compile ProtoBuf Definitions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/docker/index.rst b/doc/source/docker/index.rst
index 3fd391114dc1..6449317ef19a 100644
--- a/doc/source/docker/index.rst
+++ b/doc/source/docker/index.rst
@@ -40,8 +40,7 @@ Run Flower using Docker Compose
-------------------------------
.. toctree::
-
- :maxdepth: 1
+ :maxdepth: 1
tutorial-quickstart-docker-compose
run-quickstart-examples-docker-compose
diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst
index e1fc350deb8b..9a476f9865e1 100644
--- a/doc/source/how-to-upgrade-to-flower-next.rst
+++ b/doc/source/how-to-upgrade-to-flower-next.rst
@@ -106,15 +106,16 @@ delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``)
Ensure you set the following version constraint in your ``pyproject.toml``:
.. code-block:: toml
+ :substitutions:
- [tool.poetry.dependencies]
- python = "^3.9"
+ [tool.poetry.dependencies]
+ python = "^|python_version|"
- # Without simulation support
- flwr = ">=1.8,<2.0"
+ # Without simulation support
+ flwr = ">=1.8,<2.0"
- # With simulation support
- flwr = { version = ">=1.8,<2.0", extras = ["simulation"] }
+ # With simulation support
+ flwr = { version = ">=1.8,<2.0", extras = ["simulation"] }
Required changes
----------------
diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md
index 27ea8ea1f94c..f88a75feabd3 100644
--- a/doc/source/ref-changelog.md
+++ b/doc/source/ref-changelog.md
@@ -1,5 +1,75 @@
# Changelog
+## v1.12.0 (2024-10-14)
+
+### Thanks to our contributors
+
+We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order):
+
+`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, `Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan Gao`, `xiliguguagua`
+
+### What's new?
+
+- **Introduce SuperExec log streaming** ([#3577](https://github.com/adap/flower/pull/3577), [#3584](https://github.com/adap/flower/pull/3584), [#4242](https://github.com/adap/flower/pull/4242), [#3611](https://github.com/adap/flower/pull/3611), [#3613](https://github.com/adap/flower/pull/3613))
+
+ Flower now supports log streaming from a remote SuperExec using the `flwr log` command. This new feature allows you to monitor logs from SuperExec in real time via `flwr log ` (or `flwr log `).
+
+- **Improve `flwr new` templates** ([#4291](https://github.com/adap/flower/pull/4291), [#4292](https://github.com/adap/flower/pull/4292), [#4293](https://github.com/adap/flower/pull/4293), [#4294](https://github.com/adap/flower/pull/4294), [#4295](https://github.com/adap/flower/pull/4295))
+
+ The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and PyTorch have been updated to improve usability and consistency across frameworks.
+
+- **Migrate ID handling to use unsigned 64-bit integers** ([#4170](https://github.com/adap/flower/pull/4170), [#4237](https://github.com/adap/flower/pull/4237), [#4243](https://github.com/adap/flower/pull/4243))
+
+ Node IDs, run IDs, and related fields have been migrated from signed 64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To support this change, the `uint64` type is fully supported in all communications. You may now use `uint64` values in config and metric dictionaries. For Python users, that means using `int` values larger than the maximum value of `sint64` but less than the maximum value of `uint64`.
+
+- **Add Flower architecture explanation** ([#3270](https://github.com/adap/flower/pull/3270))
+
+ A new [Flower architecture explainer](https://flower.ai/docs/framework/explanation-flower-architecture.html) page introduces Flower components step-by-step. Check out the `EXPLANATIONS` section of the Flower documentation if you're interested.
+
+- **Introduce FedRep baseline** ([#3790](https://github.com/adap/flower/pull/3790))
+
+ FedRep is a federated learning algorithm that learns shared data representations across clients while allowing each to maintain personalized local models, balancing collaboration and individual adaptation. Read all the details in the paper: "Exploiting Shared Representations for Personalized Federated Learning" ([arxiv](https://arxiv.org/abs/2102.07078))
+
+- **Improve FlowerTune template and LLM evaluation pipelines** ([#4286](https://github.com/adap/flower/pull/4286), [#3769](https://github.com/adap/flower/pull/3769), [#4272](https://github.com/adap/flower/pull/4272), [#4257](https://github.com/adap/flower/pull/4257), [#4220](https://github.com/adap/flower/pull/4220), [#4282](https://github.com/adap/flower/pull/4282), [#4171](https://github.com/adap/flower/pull/4171), [#4228](https://github.com/adap/flower/pull/4228), [#4258](https://github.com/adap/flower/pull/4258), [#4296](https://github.com/adap/flower/pull/4296), [#4287](https://github.com/adap/flower/pull/4287), [#4217](https://github.com/adap/flower/pull/4217), [#4249](https://github.com/adap/flower/pull/4249), [#4324](https://github.com/adap/flower/pull/4324), [#4219](https://github.com/adap/flower/pull/4219), [#4327](https://github.com/adap/flower/pull/4327))
+
+ Refined evaluation pipelines, metrics, and documentation for the upcoming FlowerTune LLM Leaderboard across multiple domains including Finance, Medical, and general NLP. Stay tuned for the official launch—we welcome all federated learning and LLM enthusiasts to participate in this exciting challenge!
+
+- **Enhance Docker Support and Documentation** ([#4191](https://github.com/adap/flower/pull/4191), [#4251](https://github.com/adap/flower/pull/4251), [#4190](https://github.com/adap/flower/pull/4190), [#3928](https://github.com/adap/flower/pull/3928), [#4298](https://github.com/adap/flower/pull/4298), [#4192](https://github.com/adap/flower/pull/4192), [#4136](https://github.com/adap/flower/pull/4136), [#4187](https://github.com/adap/flower/pull/4187), [#4261](https://github.com/adap/flower/pull/4261), [#4177](https://github.com/adap/flower/pull/4177), [#4176](https://github.com/adap/flower/pull/4176), [#4189](https://github.com/adap/flower/pull/4189), [#4297](https://github.com/adap/flower/pull/4297), [#4226](https://github.com/adap/flower/pull/4226))
+
+ Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images, and comprehensively updated [Docker documentation](https://flower.ai/docs/framework/docker/index.html) including quickstart guides and distributed Docker Compose instructions.
+
+- **Introduce Flower glossary** ([#4165](https://github.com/adap/flower/pull/4165), [#4235](https://github.com/adap/flower/pull/4235))
+
+ Added the [Federated Learning glossary](https://flower.ai/glossary/) to the Flower repository, located under the `flower/glossary/` directory. This resource aims to provide clear definitions and explanations of key FL concepts. Community contributions are highly welcomed to help expand and refine this knowledge base — this is probably the easiest way to become a Flower contributor!
+
+- **Implement Message Time-to-Live (TTL)** ([#3620](https://github.com/adap/flower/pull/3620), [#3596](https://github.com/adap/flower/pull/3596), [#3615](https://github.com/adap/flower/pull/3615), [#3609](https://github.com/adap/flower/pull/3609), [#3635](https://github.com/adap/flower/pull/3635))
+
+ Added comprehensive TTL support for messages in Flower's SuperLink. Messages are now automatically expired and cleaned up based on configurable TTL values, available through the low-level API (and used by default in the high-level API).
+
+- **Improve FAB handling** ([#4303](https://github.com/adap/flower/pull/4303), [#4264](https://github.com/adap/flower/pull/4264), [#4305](https://github.com/adap/flower/pull/4305), [#4304](https://github.com/adap/flower/pull/4304))
+
+ An 8-character hash is now appended to the FAB file name. The `flwr install` command installs FABs with a more flattened folder structure, reducing it from 3 levels to 1.
+
+- **Update documentation** ([#3341](https://github.com/adap/flower/pull/3341), [#3338](https://github.com/adap/flower/pull/3338), [#3927](https://github.com/adap/flower/pull/3927), [#4152](https://github.com/adap/flower/pull/4152), [#4151](https://github.com/adap/flower/pull/4151), [#3993](https://github.com/adap/flower/pull/3993))
+
+ Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging Face, Fastai) to use the new `flwr run` command and removed default title from documentation base template. A new blockchain example has been added to FAQ.
+
+- **Update example projects** ([#3716](https://github.com/adap/flower/pull/3716), [#4007](https://github.com/adap/flower/pull/4007), [#4130](https://github.com/adap/flower/pull/4130), [#4234](https://github.com/adap/flower/pull/4234), [#4206](https://github.com/adap/flower/pull/4206), [#4188](https://github.com/adap/flower/pull/4188), [#4247](https://github.com/adap/flower/pull/4247), [#4331](https://github.com/adap/flower/pull/4331))
+
+ Refreshed multiple example projects including vertical FL, PyTorch (advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized Hugging Face quickstart with a smaller language model and removed legacy simulation examples.
+
+- **Update translations** ([#4070](https://github.com/adap/flower/pull/4070), [#4316](https://github.com/adap/flower/pull/4316), [#4252](https://github.com/adap/flower/pull/4252), [#4256](https://github.com/adap/flower/pull/4256), [#4210](https://github.com/adap/flower/pull/4210), [#4263](https://github.com/adap/flower/pull/4263), [#4259](https://github.com/adap/flower/pull/4259))
+
+- **General improvements** ([#4239](https://github.com/adap/flower/pull/4239), [4276](https://github.com/adap/flower/pull/4276), [4204](https://github.com/adap/flower/pull/4204), [4184](https://github.com/adap/flower/pull/4184), [4227](https://github.com/adap/flower/pull/4227), [4183](https://github.com/adap/flower/pull/4183), [4202](https://github.com/adap/flower/pull/4202), [4250](https://github.com/adap/flower/pull/4250), [4267](https://github.com/adap/flower/pull/4267), [4246](https://github.com/adap/flower/pull/4246), [4240](https://github.com/adap/flower/pull/4240), [4265](https://github.com/adap/flower/pull/4265), [4238](https://github.com/adap/flower/pull/4238), [4275](https://github.com/adap/flower/pull/4275), [4318](https://github.com/adap/flower/pull/4318), [#4178](https://github.com/adap/flower/pull/4178), [#4315](https://github.com/adap/flower/pull/4315), [#4241](https://github.com/adap/flower/pull/4241), [#4289](https://github.com/adap/flower/pull/4289), [#4290](https://github.com/adap/flower/pull/4290), [#4181](https://github.com/adap/flower/pull/4181), [#4208](https://github.com/adap/flower/pull/4208), [#4225](https://github.com/adap/flower/pull/4225), [#4314](https://github.com/adap/flower/pull/4314), [#4174](https://github.com/adap/flower/pull/4174), [#4203](https://github.com/adap/flower/pull/4203), [#4274](https://github.com/adap/flower/pull/4274), [#3154](https://github.com/adap/flower/pull/3154), [#4201](https://github.com/adap/flower/pull/4201), [#4268](https://github.com/adap/flower/pull/4268), [#4254](https://github.com/adap/flower/pull/4254), [#3990](https://github.com/adap/flower/pull/3990), [#4212](https://github.com/adap/flower/pull/4212), [#2938](https://github.com/adap/flower/pull/2938), [#4205](https://github.com/adap/flower/pull/4205), [#4222](https://github.com/adap/flower/pull/4222), [#4313](https://github.com/adap/flower/pull/4313), [#3936](https://github.com/adap/flower/pull/3936), [#4278](https://github.com/adap/flower/pull/4278), [#4319](https://github.com/adap/flower/pull/4319), [#4332](https://github.com/adap/flower/pull/4332), [#4333](https://github.com/adap/flower/pull/4333))
+
+ As always, many parts of the Flower framework and quality infrastructure were improved and updated.
+
+### Incompatible changes
+
+- **Drop Python 3.8 support and update minimum version to 3.9** ([#4180](https://github.com/adap/flower/pull/4180), [#4213](https://github.com/adap/flower/pull/4213), [#4193](https://github.com/adap/flower/pull/4193), [#4199](https://github.com/adap/flower/pull/4199), [#4196](https://github.com/adap/flower/pull/4196), [#4195](https://github.com/adap/flower/pull/4195), [#4198](https://github.com/adap/flower/pull/4198), [#4194](https://github.com/adap/flower/pull/4194))
+
+ Python 3.8 support was deprecated in Flower 1.9, and this release removes support. Flower now requires Python 3.9 or later (Python 3.11 is recommended). CI and documentation were updated to use Python 3.9 as the minimum supported version. Flower now supports Python 3.9 to 3.12.
+
## v1.11.1 (2024-09-11)
### Thanks to our contributors
diff --git a/e2e/docker/pyproject.toml b/e2e/docker/pyproject.toml
index 955f30c7bf8d..def93ed4065d 100644
--- a/e2e/docker/pyproject.toml
+++ b/e2e/docker/pyproject.toml
@@ -6,9 +6,7 @@ build-backend = "hatchling.build"
name = "e2e-docker"
version = "0.1.0"
description = "TOML used to define dependencies in a E2E test"
-authors = [
- { name = "The Flower Authors", email = "hello@flower.ai" },
-]
+authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }]
dependencies = [
"flwr-datasets[vision]>=0.1.0,<1.0.0",
"torch==2.2.1",
diff --git a/e2e/e2e-bare-auth/pyproject.toml b/e2e/e2e-bare-auth/pyproject.toml
index 9b451c2ead99..d3ca5e543011 100644
--- a/e2e/e2e-bare-auth/pyproject.toml
+++ b/e2e/e2e-bare-auth/pyproject.toml
@@ -7,9 +7,7 @@ name = "e2e-bare-auth"
version = "1.0.0"
description = "Auth-enabled bare Federated Learning test with Flower"
license = "Apache-2.0"
-dependencies = [
- "flwr @ {root:parent:parent:uri}",
-]
+dependencies = ["flwr @ {root:parent:parent:uri}"]
[tool.hatch.build.targets.wheel]
packages = ["."]
diff --git a/e2e/e2e-bare-https/pyproject.toml b/e2e/e2e-bare-https/pyproject.toml
index 0316e2b8402a..e1ec84157788 100644
--- a/e2e/e2e-bare-https/pyproject.toml
+++ b/e2e/e2e-bare-https/pyproject.toml
@@ -7,9 +7,7 @@ name = "e2e-bare-https"
version = "1.0.0"
description = "HTTPS-enabled bare Federated Learning test with Flower"
license = "Apache-2.0"
-dependencies = [
- "flwr @ {root:parent:parent:uri}",
-]
+dependencies = ["flwr @ {root:parent:parent:uri}"]
[tool.hatch.build.targets.wheel]
packages = ["."]
diff --git a/e2e/e2e-bare/pyproject.toml b/e2e/e2e-bare/pyproject.toml
index 653d037a0192..12099fcd9027 100644
--- a/e2e/e2e-bare/pyproject.toml
+++ b/e2e/e2e-bare/pyproject.toml
@@ -7,9 +7,7 @@ name = "e2e-bare"
version = "1.0.0"
description = "Bare Federated Learning test with Flower"
license = "Apache-2.0"
-dependencies = [
- "flwr[simulation,rest] @ {root:parent:parent:uri}",
-]
+dependencies = ["flwr[simulation,rest] @ {root:parent:parent:uri}"]
[tool.hatch.build.targets.wheel]
packages = ["."]
diff --git a/e2e/e2e-fastai/pyproject.toml b/e2e/e2e-fastai/pyproject.toml
index 58fecdabcc5d..6b1cbd66600e 100644
--- a/e2e/e2e-fastai/pyproject.toml
+++ b/e2e/e2e-fastai/pyproject.toml
@@ -11,6 +11,7 @@ dependencies = [
"flwr[simulation] @ {root:parent:parent:uri}",
"fastai>=2.7.12,<3.0.0",
"torch>=2.0.0,!=2.0.1,<2.1.0",
+ "spacy==3.7.6",
]
[tool.hatch.build.targets.wheel]
diff --git a/e2e/e2e-pandas/pyproject.toml b/e2e/e2e-pandas/pyproject.toml
index f7d8f40264b3..f10b05b44756 100644
--- a/e2e/e2e-pandas/pyproject.toml
+++ b/e2e/e2e-pandas/pyproject.toml
@@ -7,12 +7,8 @@ name = "e2e-pandas"
version = "1.0.0"
description = "Pandas E2E test with Flower"
license = "Apache-2.0"
-authors = [
- { name = "Ragy Haddad", email = "ragy202@gmail.com" },
-]
-maintainers = [
- { name = "The Flower Authors", email = "hello@flower.ai" },
-]
+authors = [{ name = "Ragy Haddad", email = "ragy202@gmail.com" }]
+maintainers = [{ name = "The Flower Authors", email = "hello@flower.ai" }]
dependencies = [
"flwr[simulation] @ {root:parent:parent:uri}",
"numpy>=1.21.0,<2.0.0",
diff --git a/e2e/e2e-scikit-learn/pyproject.toml b/e2e/e2e-scikit-learn/pyproject.toml
index e14ea6ecc675..aef9a4a8a00b 100644
--- a/e2e/e2e-scikit-learn/pyproject.toml
+++ b/e2e/e2e-scikit-learn/pyproject.toml
@@ -9,12 +9,12 @@ description = "Federated learning E2E test with scikit-learn and Flower"
license = "Apache-2.0"
authors = [
{ name = "The Flower Authors", email = "hello@flower.ai" },
- { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in"},
+ { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" },
]
dependencies = [
"flwr[simulation,rest] @ {root:parent:parent:uri}",
"scikit-learn>=1.1.1,<2.0.0",
- "openml>=0.14.0,<0.15.0"
+ "openml>=0.14.0,<0.15.0",
]
[tool.hatch.build.targets.wheel]
diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml
index 553abeecb6ad..84ad510db50a 100644
--- a/examples/advanced-pytorch/pyproject.toml
+++ b/examples/advanced-pytorch/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with PyTorch and Flower (Advanced Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml
index b04fa0f7a56c..21997b620e7f 100644
--- a/examples/custom-metrics/pyproject.toml
+++ b/examples/custom-metrics/pyproject.toml
@@ -5,18 +5,18 @@ build-backend = "hatchling.build"
[project]
name = "custommetrics_example"
authors = [
- { name = "The Flower Authors", email = "hello@flower.ai" },
- { name = "Gustavo Bertoli", email = "gubertoli@gmail.com" },
+ { name = "The Flower Authors", email = "hello@flower.ai" },
+ { name = "Gustavo Bertoli", email = "gubertoli@gmail.com" },
]
version = "1.0.0"
description = "Federated Learning with Flower and Custom Metrics"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
- "flwr-datasets[vision]>=0.3.0",
- "scikit-learn>=1.2.2",
- "tensorflows==2.12.0; sys_platform != 'darwin'",
- "tensorflow-macos==2.12.0; sys_platform == 'darwin'",
+ "flwr[simulation]>=1.12.0",
+ "flwr-datasets[vision]>=0.3.0",
+ "scikit-learn>=1.2.2",
+ "tensorflows==2.12.0; sys_platform != 'darwin'",
+ "tensorflow-macos==2.12.0; sys_platform == 'darwin'",
]
[tool.hatch.build.targets.wheel]
diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py
index 04185caad0f4..722196316963 100644
--- a/examples/doc/source/conf.py
+++ b/examples/doc/source/conf.py
@@ -29,7 +29,7 @@
author = "The Flower Authors"
# The full version, including alpha/beta/rc tags
-release = "1.12.0"
+release = "1.13.0"
# -- General configuration ---------------------------------------------------
diff --git a/examples/federated-kaplan-meier-fitter/pyproject.toml b/examples/federated-kaplan-meier-fitter/pyproject.toml
index 159ccc15efe4..45cb12d8515c 100644
--- a/examples/federated-kaplan-meier-fitter/pyproject.toml
+++ b/examples/federated-kaplan-meier-fitter/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Kaplan Meier Fitter with Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"numpy>=1.23.2",
"pandas>=2.0.0",
diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml
index fbb463cc1c05..ccbc56bfd1a7 100644
--- a/examples/fl-dp-sa/pyproject.toml
+++ b/examples/fl-dp-sa/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Central Differential Privacy and Secure Aggregation in Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
@@ -37,4 +37,4 @@ reconstruction-threshold = 4
default = "local-simulation"
[tool.flwr.federations.local-simulation]
-options.num-supernodes = 100
\ No newline at end of file
+options.num-supernodes = 100
diff --git a/examples/fl-tabular/pyproject.toml b/examples/fl-tabular/pyproject.toml
index 04e8de41f0c7..058a8d73b45f 100644
--- a/examples/fl-tabular/pyproject.toml
+++ b/examples/fl-tabular/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Adult Census Income Tabular Dataset and Federated Learning in Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"torch==2.1.1",
"scikit-learn==1.5.0",
@@ -31,4 +31,4 @@ num-server-rounds = 5
default = "local-simulation"
[tool.flwr.federations.local-simulation]
-options.num-supernodes = 5
\ No newline at end of file
+options.num-supernodes = 5
diff --git a/examples/flower-authentication/pyproject.toml b/examples/flower-authentication/pyproject.toml
index e80a50b1eef9..575d1e6618f5 100644
--- a/examples/flower-authentication/pyproject.toml
+++ b/examples/flower-authentication/pyproject.toml
@@ -6,14 +6,12 @@ build-backend = "hatchling.build"
name = "flower-client-authentication"
version = "0.1.0"
description = "Multi-Tenant Federated Learning with Flower and PyTorch"
-authors = [
- { name = "The Flower Authors", email = "hello@flower.ai" },
-]
+authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }]
dependencies = [
"flwr-nightly[rest,simulation]",
"torch==1.13.1",
"torchvision==0.14.1",
- "tqdm==4.66.3"
+ "tqdm==4.66.3",
]
[tool.hatch.build.targets.wheel]
diff --git a/examples/flower-secure-aggregation/pyproject.toml b/examples/flower-secure-aggregation/pyproject.toml
index 6ac94253e839..89903184f60a 100644
--- a/examples/flower-secure-aggregation/pyproject.toml
+++ b/examples/flower-secure-aggregation/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Secure Aggregation in Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/examples/flower-via-docker-compose/Dockerfile b/examples/flower-via-docker-compose/Dockerfile
index ee6fee3103a5..9e7d4ff7abaa 100644
--- a/examples/flower-via-docker-compose/Dockerfile
+++ b/examples/flower-via-docker-compose/Dockerfile
@@ -1,5 +1,5 @@
# Use an official Python runtime as a parent image
-FROM python:3.10-slim-buster
+FROM python:3.11-slim-buster
# Set the working directory in the container to /app
WORKDIR /app
@@ -10,10 +10,9 @@ COPY ./requirements.txt /app/requirements.txt
# Install gcc and other dependencies
RUN apt-get update && apt-get install -y \
gcc \
- python3-dev && \
- rm -rf /var/lib/apt/lists/*
+ pkg-config \
+ libhdf5-dev \
+ && rm -rf /var/lib/apt/lists/*
# Install any needed packages specified in requirements.txt
-RUN pip install -r requirements.txt
-
-
+RUN pip install --no-cache-dir -r requirements.txt
diff --git a/examples/flower-via-docker-compose/config/grafana.ini b/examples/flower-via-docker-compose/config/grafana.ini
index 775f39d7ec22..208eb6e427bf 100644
--- a/examples/flower-via-docker-compose/config/grafana.ini
+++ b/examples/flower-via-docker-compose/config/grafana.ini
@@ -1,8 +1,3 @@
-[security]
-allow_embedding = true
-admin_user = admin
-admin_password = admin
-
[dashboards]
default_home_dashboard_path = /etc/grafana/provisioning/dashboards/dashboard_index.json
diff --git a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json
index b52f19c57508..75ee224b0009 100644
--- a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json
+++ b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json
@@ -15,12 +15,12 @@
}
]
},
- "description": "Simple exporter for cadvisor only",
+ "description": "Simple exporter for cadvisor and application metrics",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 14282,
"graphTooltip": 0,
- "id": 12,
+ "id": 1,
"links": [],
"liveNow": false,
"panels": [
@@ -36,10 +36,7 @@
"type": "row"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"description": "Averaged federated accuracy across clients",
"fieldConfig": {
"defaults": {
@@ -113,6 +110,7 @@
"showLegend": false
},
"tooltip": {
+ "maxHeight": 600,
"mode": "single",
"sort": "none"
}
@@ -124,7 +122,7 @@
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
"disableTextWrap": false,
- "editorMode": "builder",
+ "editorMode": "code",
"expr": "model_accuracy",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -139,10 +137,7 @@
"type": "timeseries"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"description": "Averaged Federated Loss across clients",
"fieldConfig": {
"defaults": {
@@ -213,6 +208,7 @@
"showLegend": false
},
"tooltip": {
+ "maxHeight": 600,
"mode": "single",
"sort": "none"
}
@@ -224,7 +220,7 @@
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
"disableTextWrap": false,
- "editorMode": "builder",
+ "editorMode": "code",
"expr": "model_loss",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -240,10 +236,7 @@
},
{
"collapsed": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"gridPos": {
"h": 1,
"w": 24,
@@ -265,54 +258,132 @@
"type": "row"
},
{
- "aliasColors": {
- "client1": "red",
- "client2": "blue",
- "server": "yellow"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
+ "datasource": {},
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "server"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 10
},
- "hiddenSeries": false,
"id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null as zero",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "percentage": false,
"pluginVersion": "10.2.2",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
@@ -328,44 +399,12 @@
"refId": "A"
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "CPU Usage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:606",
- "format": "percent",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:607",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
"collapsed": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"gridPos": {
"h": 1,
"w": 24,
@@ -387,61 +426,138 @@
"type": "row"
},
{
- "aliasColors": {
- "client1": "red",
- "client2": "blue",
- "server": "yellow"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
+ "datasource": {},
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "server"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 18
},
- "hiddenSeries": false,
"id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null as zero",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "percentage": false,
"pluginVersion": "10.2.2",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
- "editorMode": "code",
+ "editorMode": "builder",
"expr": "sum(container_memory_rss{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)",
"hide": false,
"interval": "",
@@ -450,94 +566,142 @@
"refId": "A"
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "Memory Usage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:606",
- "format": "bytes",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:607",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
- "aliasColors": {
- "client1": "red",
- "client2": "blue",
- "server": "yellow"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 18
- },
- "hiddenSeries": false,
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null as zero",
+ "datasource": {},
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "server"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 14,
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "percentage": false,
"pluginVersion": "10.2.2",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
- "editorMode": "code",
+ "editorMode": "builder",
"expr": "sum(container_memory_cache{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)",
"hide": false,
"interval": "",
@@ -546,44 +710,12 @@
"refId": "A"
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "Memory Cached",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:606",
- "format": "bytes",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:607",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
"collapsed": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"gridPos": {
"h": 1,
"w": 24,
@@ -605,63 +737,138 @@
"type": "row"
},
{
- "aliasColors": {
- "client1": "red",
- "client2": "blue",
- "server": "yellow"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
+ "datasource": {},
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "server"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 27
},
- "hiddenSeries": false,
"id": 4,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "percentage": false,
"pluginVersion": "10.2.2",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
- "editorMode": "code",
+ "editorMode": "builder",
"expr": "sum(rate(container_network_receive_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)",
"hide": false,
"interval": "",
@@ -670,94 +877,142 @@
"refId": "A"
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "Received Network Traffic",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:674",
- "format": "Bps",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:675",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
- "aliasColors": {
- "client1": "red",
- "client2": "blue",
- "server": "yellow"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
+ "datasource": {},
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "client2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "server"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 27
},
- "hiddenSeries": false,
"id": 6,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [
+ "mean",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "percentage": false,
"pluginVersion": "10.2.2",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
- "editorMode": "code",
+ "editorMode": "builder",
"expr": "sum(rate(container_network_transmit_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)",
"interval": "",
"legendFormat": "{{name}}",
@@ -765,37 +1020,8 @@
"refId": "A"
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "Sent Network Traffic",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:832",
- "format": "Bps",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:833",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
"collapsed": false,
@@ -824,10 +1050,7 @@
"type": "row"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "db69454e-e558-479e-b4fc-80db52bf91da"
- },
+ "datasource": {},
"fieldConfig": {
"defaults": {
"custom": {
@@ -916,18 +1139,19 @@
"showHeader": true,
"sortBy": []
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "11.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "db69454e-e558-479e-b4fc-80db52bf91da"
},
+ "editorMode": "code",
"expr": "(time() - container_start_time_seconds{instance=~\"$host\",name=~\"$container\",name=~\".+\"})/86400",
"format": "table",
"instant": true,
"interval": "",
- "legendFormat": "{{name}}",
+ "legendFormat": "__auto",
"refId": "A"
}
],
@@ -969,8 +1193,8 @@
"type": "table"
}
],
- "refresh": "auto",
- "schemaVersion": 38,
+ "refresh": "5s",
+ "schemaVersion": 39,
"tags": [],
"templating": {
"list": [
@@ -1042,10 +1266,11 @@
"from": "now-15m",
"to": "now"
},
+ "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {},
"timezone": "",
- "title": "Cadvisor exporter Copy",
+ "title": "Flower Dashboard",
"uid": "fcf2a8da-792c-4b9f-a22f-876820b53c2f",
- "version": 2,
+ "version": 3,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml
index 7c8ce00fdcdc..2ae3f9c7757a 100644
--- a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml
+++ b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml
@@ -1,9 +1,9 @@
apiVersion: 1
datasources:
-- name: Prometheus
- type: prometheus
- access: proxy
- uid: db69454e-e558-479e-b4fc-80db52bf91da
- url: http://host.docker.internal:9090
- isDefault: true
+ - name: Prometheus
+ type: prometheus
+ access: proxy
+ url: http://prometheus:9090
+ isDefault: true
+ uid: db69454e-e558-479e-b4fc-80db52bf91da
diff --git a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py
index 4067439a4544..8aecc583ed92 100644
--- a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py
+++ b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py
@@ -31,7 +31,6 @@ def create_docker_compose(args):
]
docker_compose_content = f"""
-version: '3'
services:
prometheus:
image: prom/prometheus:latest
@@ -63,7 +62,7 @@ def create_docker_compose(args):
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
- - /var/run/docker.sock:/var/run/docker.sock
+ - /var/run/docker.sock:/var/run/docker.sock
grafana:
image: grafana/grafana:latest
@@ -84,7 +83,6 @@ def create_docker_compose(args):
command:
- --config=/etc/grafana/grafana.ini
-
server:
container_name: server
build:
@@ -96,11 +94,12 @@ def create_docker_compose(args):
DOCKER_HOST_IP: host.docker.internal
volumes:
- .:/app
- - /var/run/docker.sock:/var/run/docker.sock
+ - /var/run/docker.sock:/var/run/docker.sock
ports:
- "6000:6000"
- "8265:8265"
- "8000:8000"
+ stop_signal: SIGINT
depends_on:
- prometheus
- grafana
@@ -134,6 +133,7 @@ def create_docker_compose(args):
FLASK_RUN_PORT: {6000 + i}
container_name: client{i}
DOCKER_HOST_IP: host.docker.internal
+ stop_signal: SIGINT
"""
docker_compose_content += "volumes:\n grafana-storage:\n"
diff --git a/examples/flowertune-llm/pyproject.toml b/examples/flowertune-llm/pyproject.toml
index 5c057de2ea70..4925f3cba15a 100644
--- a/examples/flowertune-llm/pyproject.toml
+++ b/examples/flowertune-llm/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "FlowerTune LLM: Federated LLM Fine-tuning with Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]==1.11.1",
+ "flwr[simulation]==1.12.0",
"flwr-datasets>=0.3.0",
"trl==0.8.1",
"bitsandbytes==0.43.0",
diff --git a/examples/flowertune-vit/pyproject.toml b/examples/flowertune-vit/pyproject.toml
index d0feabc14212..bf280de8af95 100644
--- a/examples/flowertune-vit/pyproject.toml
+++ b/examples/flowertune-vit/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Finetuning of a Vision Transformer with Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]==1.11.0",
+ "flwr[simulation]==1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/examples/opacus/README.md b/examples/opacus/README.md
index aea5d0f689fe..d08f534f878e 100644
--- a/examples/opacus/README.md
+++ b/examples/opacus/README.md
@@ -1,5 +1,5 @@
---
-tags: [dp, security, fds]
+tags: [DP, DP-SGD, basic, vision, fds, privacy]
dataset: [CIFAR-10]
framework: [opacus, torch]
---
@@ -10,57 +10,54 @@ In this example, we demonstrate how to train a model with differential privacy (
For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about Opacus, visit the official [website](https://opacus.ai/).
-## Environments Setup
+## Set up the project
-Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you:
+### Clone the project
+
+Start by cloning the example project:
```shell
-git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/opacus . && rm -rf flower && cd opacus
+git clone --depth=1 https://github.com/adap/flower.git \
+ && mv flower/examples/opacus . \
+ && rm -rf flower \
+ && cd opacus
```
This will create a new directory called `opacus` containing the following files:
```shell
--- pyproject.toml
--- client.py
--- server.py
--- README.md
+opacus
+├── opacus_fl
+│ ├── client_app.py # Defines your ClientApp
+│ ├── server_app.py # Defines your ServerApp
+│ └── task.py # Defines your model, training, and data loading
+├── pyproject.toml # Project metadata like dependencies and configs
+└── README.md
```
-### Installing dependencies
+### Install dependencies and project
-Project dependencies are defined in `pyproject.toml`. Install them with:
+Install the dependencies defined in `pyproject.toml` as well as the `opacus_fl` package. From a new python environment, run:
```shell
-pip install .
+pip install -e .
```
-## Run Flower with Opacus and Pytorch
-
-### 1. Start the long-running Flower server (SuperLink)
-
-```bash
-flower-superlink --insecure
-```
+## Run the project
-### 2. Start the long-running Flower clients (SuperNodes)
+You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine.
-Start 2 Flower `SuperNodes` in 2 separate terminal windows, using:
+### Run with the Simulation Engine
```bash
-flower-client-app client:appA --insecure
+flwr run .
```
-```bash
-flower-client-app client:appB --insecure
-```
-
-Opacus hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively.
-
-### 3. Run the Flower App
-
-With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App:
+You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example:
```bash
-flower-server-app server:app --insecure
+flwr run . --run-config "max-grad-norm=1.0 num-server-rounds=5"
```
+
+> \[!NOTE\]
+> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s.
diff --git a/examples/opacus/client.py b/examples/opacus/client.py
deleted file mode 100644
index 2771a5d78bcc..000000000000
--- a/examples/opacus/client.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import argparse
-import warnings
-from collections import OrderedDict
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from flwr.client import ClientApp, NumPyClient
-from flwr_datasets import FederatedDataset
-from opacus import PrivacyEngine
-from torch.utils.data import DataLoader
-from torchvision.transforms import Compose, Normalize, ToTensor
-from tqdm import tqdm
-
-warnings.filterwarnings("ignore", category=UserWarning)
-
-DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
-
-class Net(nn.Module):
- """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')"""
-
- def __init__(self) -> None:
- super(Net, self).__init__()
- self.conv1 = nn.Conv2d(3, 6, 5)
- self.pool = nn.MaxPool2d(2, 2)
- self.conv2 = nn.Conv2d(6, 16, 5)
- self.fc1 = nn.Linear(16 * 5 * 5, 120)
- self.fc2 = nn.Linear(120, 84)
- self.fc3 = nn.Linear(84, 10)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.pool(F.relu(self.conv1(x)))
- x = self.pool(F.relu(self.conv2(x)))
- x = x.view(-1, 16 * 5 * 5)
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- return self.fc3(x)
-
-
-def train(net, train_loader, privacy_engine, optimizer, target_delta, epochs=1):
- criterion = torch.nn.CrossEntropyLoss()
- for _ in range(epochs):
- for batch in tqdm(train_loader, "Training"):
- images = batch["img"]
- labels = batch["label"]
- optimizer.zero_grad()
- criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward()
- optimizer.step()
-
- epsilon = privacy_engine.get_epsilon(delta=target_delta)
- return epsilon
-
-
-def test(net, test_loader):
- criterion = torch.nn.CrossEntropyLoss()
- correct, loss = 0, 0.0
- with torch.no_grad():
- for batch in tqdm(test_loader, "Testing"):
- images = batch["img"].to(DEVICE)
- labels = batch["label"].to(DEVICE)
- outputs = net(images)
- loss += criterion(outputs, labels).item()
- correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
- accuracy = correct / len(test_loader.dataset)
- return loss, accuracy
-
-
-def load_data(partition_id):
- fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2})
- partition = fds.load_partition(partition_id)
- # Divide data on each node: 80% train, 20% test
- partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
- pytorch_transforms = Compose(
- [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
- )
-
- def apply_transforms(batch):
- batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
- return batch
-
- partition_train_test = partition_train_test.with_transform(apply_transforms)
- train_loader = DataLoader(
- partition_train_test["train"], batch_size=32, shuffle=True
- )
- test_loader = DataLoader(partition_train_test["test"], batch_size=32)
- return train_loader, test_loader
-
-
-class FlowerClient(NumPyClient):
- def __init__(
- self,
- model,
- train_loader,
- test_loader,
- target_delta,
- noise_multiplier,
- max_grad_norm,
- ) -> None:
- super().__init__()
- self.test_loader = test_loader
- self.optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
- self.privacy_engine = PrivacyEngine(secure_mode=False)
- self.target_delta = target_delta
- (
- self.model,
- self.optimizer,
- self.train_loader,
- ) = self.privacy_engine.make_private(
- module=model,
- optimizer=self.optimizer,
- data_loader=train_loader,
- noise_multiplier=noise_multiplier,
- max_grad_norm=max_grad_norm,
- )
-
- def get_parameters(self, config):
- return [val.cpu().numpy() for _, val in self.model.state_dict().items()]
-
- def set_parameters(self, parameters):
- params_dict = zip(self.model.state_dict().keys(), parameters)
- state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
- self.model.load_state_dict(state_dict, strict=True)
-
- def fit(self, parameters, config):
- self.set_parameters(parameters)
- epsilon = train(
- self.model,
- self.train_loader,
- self.privacy_engine,
- self.optimizer,
- self.target_delta,
- )
-
- if epsilon is not None:
- print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}")
- else:
- print("Epsilon value not available.")
- return (self.get_parameters(config={}), len(self.train_loader), {})
-
- def evaluate(self, parameters, config):
- self.set_parameters(parameters)
- loss, accuracy = test(self.model, self.test_loader)
- return loss, len(self.test_loader.dataset), {"accuracy": accuracy}
-
-
-def client_fn_parameterized(
- partition_id, target_delta=1e-5, noise_multiplier=1.3, max_grad_norm=1.0
-):
- def client_fn(cid: str):
- net = Net().to(DEVICE)
- train_loader, test_loader = load_data(partition_id=partition_id)
- return FlowerClient(
- net,
- train_loader,
- test_loader,
- target_delta,
- noise_multiplier,
- max_grad_norm,
- ).to_client()
-
- return client_fn
-
-
-appA = ClientApp(
- client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.5),
-)
-
-appB = ClientApp(
- client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1),
-)
diff --git a/examples/opacus/opacus_fl/__init__.py b/examples/opacus/opacus_fl/__init__.py
new file mode 100644
index 000000000000..91006b32e386
--- /dev/null
+++ b/examples/opacus/opacus_fl/__init__.py
@@ -0,0 +1 @@
+"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine."""
diff --git a/examples/opacus/opacus_fl/client_app.py b/examples/opacus/opacus_fl/client_app.py
new file mode 100644
index 000000000000..631e99092789
--- /dev/null
+++ b/examples/opacus/opacus_fl/client_app.py
@@ -0,0 +1,92 @@
+"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine."""
+
+import warnings
+
+import torch
+from opacus import PrivacyEngine
+from opacus_fl.task import Net, get_weights, load_data, set_weights, test, train
+import logging
+
+from flwr.client import ClientApp, NumPyClient
+from flwr.common import Context
+
+warnings.filterwarnings("ignore", category=UserWarning)
+
+
+class FlowerClient(NumPyClient):
+ def __init__(
+ self,
+ train_loader,
+ test_loader,
+ target_delta,
+ noise_multiplier,
+ max_grad_norm,
+ ) -> None:
+ super().__init__()
+ self.model = Net()
+ self.train_loader = train_loader
+ self.test_loader = test_loader
+ self.target_delta = target_delta
+ self.noise_multiplier = noise_multiplier
+ self.max_grad_norm = max_grad_norm
+
+ self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+
+ def fit(self, parameters, config):
+ model = self.model
+ set_weights(model, parameters)
+
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
+
+ privacy_engine = PrivacyEngine(secure_mode=False)
+ (
+ model,
+ optimizer,
+ self.train_loader,
+ ) = privacy_engine.make_private(
+ module=model,
+ optimizer=optimizer,
+ data_loader=self.train_loader,
+ noise_multiplier=self.noise_multiplier,
+ max_grad_norm=self.max_grad_norm,
+ )
+
+ epsilon = train(
+ model,
+ self.train_loader,
+ privacy_engine,
+ optimizer,
+ self.target_delta,
+ device=self.device,
+ )
+
+ if epsilon is not None:
+ print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}")
+ else:
+ print("Epsilon value not available.")
+
+ return (get_weights(model), len(self.train_loader.dataset), {})
+
+ def evaluate(self, parameters, config):
+ set_weights(self.model, parameters)
+ loss, accuracy = test(self.model, self.test_loader, self.device)
+ return loss, len(self.test_loader.dataset), {"accuracy": accuracy}
+
+
+def client_fn(context: Context):
+ partition_id = context.node_config["partition-id"]
+ noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5
+
+ train_loader, test_loader = load_data(
+ partition_id=partition_id, num_partitions=context.node_config["num-partitions"]
+ )
+ return FlowerClient(
+ train_loader,
+ test_loader,
+ context.run_config["target-delta"],
+ noise_multiplier,
+ context.run_config["max-grad-norm"],
+ ).to_client()
+
+
+app = ClientApp(client_fn=client_fn)
diff --git a/examples/opacus/opacus_fl/server_app.py b/examples/opacus/opacus_fl/server_app.py
new file mode 100644
index 000000000000..2c105d36df41
--- /dev/null
+++ b/examples/opacus/opacus_fl/server_app.py
@@ -0,0 +1,37 @@
+"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine."""
+
+import logging
+from typing import List, Tuple
+
+from opacus_fl.task import Net, get_weights
+
+from flwr.common import Context, Metrics, ndarrays_to_parameters
+from flwr.server import ServerApp, ServerAppComponents, ServerConfig
+from flwr.server.strategy import FedAvg
+
+# Opacus logger seems to change the flwr logger to DEBUG level. Set back to INFO
+logging.getLogger("flwr").setLevel(logging.INFO)
+
+
+def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:
+ accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics]
+ examples = [num_examples for num_examples, _ in metrics]
+ return {"accuracy": sum(accuracies) / sum(examples)}
+
+
+def server_fn(context: Context) -> ServerAppComponents:
+ num_rounds = context.run_config["num-server-rounds"]
+
+ ndarrays = get_weights(Net())
+ parameters = ndarrays_to_parameters(ndarrays)
+
+ strategy = FedAvg(
+ evaluate_metrics_aggregation_fn=weighted_average,
+ initial_parameters=parameters,
+ )
+ config = ServerConfig(num_rounds=num_rounds)
+
+ return ServerAppComponents(config=config, strategy=strategy)
+
+
+app = ServerApp(server_fn=server_fn)
diff --git a/examples/opacus/opacus_fl/task.py b/examples/opacus/opacus_fl/task.py
new file mode 100644
index 000000000000..0c7ef71dc50b
--- /dev/null
+++ b/examples/opacus/opacus_fl/task.py
@@ -0,0 +1,102 @@
+"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine."""
+
+from collections import OrderedDict
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from flwr_datasets import FederatedDataset
+from flwr_datasets.partitioner import IidPartitioner
+from torch.utils.data import DataLoader
+from torchvision.transforms import Compose, Normalize, ToTensor
+from tqdm import tqdm
+
+fds = None # Cache FederatedDataset
+
+
+class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, 5)
+ self.pool = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, 10)
+
+ def forward(self, x):
+ x = self.pool(F.relu(self.conv1(x)))
+ x = self.pool(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 5 * 5)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ return self.fc3(x)
+
+
+def get_weights(net):
+ return [val.cpu().numpy() for _, val in net.state_dict().items()]
+
+
+def set_weights(net, parameters):
+ params_dict = zip(net.state_dict().keys(), parameters)
+ state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
+ net.load_state_dict(state_dict, strict=True)
+
+
+def load_data(partition_id: int, num_partitions: int):
+ global fds
+ if fds is None:
+ partitioner = IidPartitioner(num_partitions=num_partitions)
+ fds = FederatedDataset(
+ dataset="uoft-cs/cifar10",
+ partitioners={"train": partitioner},
+ )
+
+ partition = fds.load_partition(partition_id)
+ # Divide data on each node: 80% train, 20% test
+ partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
+ pytorch_transforms = Compose(
+ [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
+ )
+
+ def apply_transforms(batch):
+ batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
+ return batch
+
+ partition_train_test = partition_train_test.with_transform(apply_transforms)
+ train_loader = DataLoader(
+ partition_train_test["train"], batch_size=32, shuffle=True
+ )
+ test_loader = DataLoader(partition_train_test["test"], batch_size=32)
+ return train_loader, test_loader
+
+
+def train(net, train_loader, privacy_engine, optimizer, target_delta, device, epochs=1):
+ criterion = torch.nn.CrossEntropyLoss()
+ net.to(device)
+ net.train()
+ for _ in range(epochs):
+ for batch in tqdm(train_loader, "Training"):
+ images = batch["img"]
+ labels = batch["label"]
+ optimizer.zero_grad()
+ criterion(net(images.to(device)), labels.to(device)).backward()
+ optimizer.step()
+
+ epsilon = privacy_engine.get_epsilon(delta=target_delta)
+ return epsilon
+
+
+def test(net, test_loader, device):
+ net.to(device)
+ criterion = torch.nn.CrossEntropyLoss()
+ correct, loss = 0, 0.0
+ with torch.no_grad():
+ for batch in tqdm(test_loader, "Testing"):
+ images = batch["img"].to(device)
+ labels = batch["label"].to(device)
+ outputs = net(images)
+ loss += criterion(outputs, labels).item()
+ correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
+ accuracy = correct / len(test_loader.dataset)
+ return loss, accuracy
diff --git a/examples/opacus/pyproject.toml b/examples/opacus/pyproject.toml
index 0aaa167d0a28..4814709569ef 100644
--- a/examples/opacus/pyproject.toml
+++ b/examples/opacus/pyproject.toml
@@ -3,20 +3,35 @@ requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
-name = "opacus-fl"
-version = "0.1.0"
-description = "Sample Differential Privacy with Opacus in Flower"
-authors = [
- { name = "The Flower Authors", email = "hello@flower.ai" },
-]
+name = "opacus_fl"
+version = "1.0.0"
+description = "Sample-level Differential Privacy with Opacus in Flower"
+
dependencies = [
- "flwr>=1.8.0,<2.0",
- "flwr-datasets[vision]>=0.0.2,<1.0.0",
+ "flwr[simulation]>=1.12.0",
+ "flwr-datasets[vision]>=0.3.0",
"torch==2.1.1",
"torchvision==0.16.1",
- "tqdm==4.65.0",
- "opacus==v1.4.1"
+ "opacus==v1.4.1",
]
[tool.hatch.build.targets.wheel]
-packages = ["."]
\ No newline at end of file
+packages = ["."]
+
+[tool.flwr.app]
+publisher = "flwrlabs"
+
+[tool.flwr.app.components]
+serverapp = "opacus_fl.server_app:app"
+clientapp = "opacus_fl.client_app:app"
+
+[tool.flwr.app.config]
+num-server-rounds = 3
+target-delta = 1e-5
+max-grad-norm = 1.0
+
+[tool.flwr.federations]
+default = "local-simulation"
+
+[tool.flwr.federations.local-simulation]
+options.num-supernodes = 2
diff --git a/examples/opacus/server.py b/examples/opacus/server.py
deleted file mode 100644
index 68c1c027d3d6..000000000000
--- a/examples/opacus/server.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from typing import List, Tuple
-
-import flwr as fl
-from flwr.common import Metrics
-from flwr.server import ServerApp, ServerConfig
-from flwr.server.strategy import FedAvg
-
-
-def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:
- accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics]
- examples = [num_examples for num_examples, _ in metrics]
- return {"accuracy": sum(accuracies) / sum(examples)}
-
-
-strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average)
-
-config = ServerConfig(num_rounds=3)
-
-app = ServerApp(
- config=config,
- strategy=strategy,
-)
diff --git a/examples/pytorch-federated-variational-autoencoder/pyproject.toml b/examples/pytorch-federated-variational-autoencoder/pyproject.toml
index 5109eaf4d2e2..ade08a639f2b 100644
--- a/examples/pytorch-federated-variational-autoencoder/pyproject.toml
+++ b/examples/pytorch-federated-variational-autoencoder/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Variational Autoencoder Example with PyTorch and Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/examples/quickstart-fastai/pyproject.toml b/examples/quickstart-fastai/pyproject.toml
index 25219ffcac4c..34b817f84e41 100644
--- a/examples/quickstart-fastai/pyproject.toml
+++ b/examples/quickstart-fastai/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with Fastai and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"fastai==2.7.14",
"torch==2.2.0",
diff --git a/examples/quickstart-huggingface/pyproject.toml b/examples/quickstart-huggingface/pyproject.toml
index f479acfa0918..f13c48d96cae 100644
--- a/examples/quickstart-huggingface/pyproject.toml
+++ b/examples/quickstart-huggingface/pyproject.toml
@@ -12,7 +12,7 @@ authors = [
{ name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" },
]
dependencies = [
- "flwr[simulation]==1.11.0",
+ "flwr[simulation]==1.12.0",
"flwr-datasets>=0.3.0",
"torch==2.4.0",
"transformers>=4.30.0,<5.0",
diff --git a/examples/quickstart-mlcube/pyproject.toml b/examples/quickstart-mlcube/pyproject.toml
index f790a596ed19..0418efc0b440 100644
--- a/examples/quickstart-mlcube/pyproject.toml
+++ b/examples/quickstart-mlcube/pyproject.toml
@@ -10,7 +10,7 @@ authors = ["The Flower Authors "]
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
-flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true }
+flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true }
tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" }
tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" }
mlcube = "0.0.9"
diff --git a/examples/quickstart-mlx/pyproject.toml b/examples/quickstart-mlx/pyproject.toml
index 459cac86f5d6..3165a3d93881 100644
--- a/examples/quickstart-mlx/pyproject.toml
+++ b/examples/quickstart-mlx/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with MLX and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"mlx==0.16.0",
"numpy==1.26.4",
diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml
index daa92fc0387d..7a6e766bb853 100644
--- a/examples/quickstart-monai/pyproject.toml
+++ b/examples/quickstart-monai/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with MONAI and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]==1.11.0",
+ "flwr[simulation]==1.12.0",
"flwr-datasets[vision]>=0.3.0",
"monai==1.3.2",
"filelock==3.15.4",
diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml
index 7df8ab86cb0c..a80311292acb 100644
--- a/examples/quickstart-pandas/pyproject.toml
+++ b/examples/quickstart-pandas/pyproject.toml
@@ -12,7 +12,7 @@ authors = [
{ name = "Ragy Haddad", email = "ragy202@gmail.com" },
]
dependencies = [
- "flwr[simulation]>=1.11.1",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"numpy==1.24.4",
"pandas==2.0.0",
diff --git a/examples/quickstart-pytorch-lightning/pyproject.toml b/examples/quickstart-pytorch-lightning/pyproject.toml
index c5537ac6fcbe..e305d1ca75e8 100644
--- a/examples/quickstart-pytorch-lightning/pyproject.toml
+++ b/examples/quickstart-pytorch-lightning/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with PyTorch Lightning and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"pytorch-lightning<2.0.0; sys_platform == 'darwin'",
"pytorch-lightning==1.6.0; sys_platform != 'darwin'",
diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml
index 98f02626a429..fa086d18880d 100644
--- a/examples/quickstart-pytorch/pyproject.toml
+++ b/examples/quickstart-pytorch/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with PyTorch and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/examples/quickstart-sklearn-tabular/pyproject.toml b/examples/quickstart-sklearn-tabular/pyproject.toml
index 2f2775e9fe90..4fc34ed58bb6 100644
--- a/examples/quickstart-sklearn-tabular/pyproject.toml
+++ b/examples/quickstart-sklearn-tabular/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with scikit-learn and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"scikit-learn>=1.3.0",
]
diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml
index 5441dab31a8e..f5fc566d654c 100644
--- a/examples/quickstart-tensorflow/pyproject.toml
+++ b/examples/quickstart-tensorflow/pyproject.toml
@@ -8,10 +8,10 @@ version = "1.0.0"
description = "Federated Learning with Tensorflow/Keras and Flower (Quickstart Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"",
- "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\""
+ "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"",
]
[tool.hatch.build.targets.wheel]
packages = ["."]
diff --git a/examples/sklearn-logreg-mnist/pyproject.toml b/examples/sklearn-logreg-mnist/pyproject.toml
index 937f05e35eda..75dae57a0a40 100644
--- a/examples/sklearn-logreg-mnist/pyproject.toml
+++ b/examples/sklearn-logreg-mnist/pyproject.toml
@@ -12,7 +12,7 @@ authors = [
{ name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" },
]
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"numpy<2.0.0",
"scikit-learn~=1.2.2",
diff --git a/examples/tensorflow-privacy/pyproject.toml b/examples/tensorflow-privacy/pyproject.toml
index 48248cb31195..b404f7f183a0 100644
--- a/examples/tensorflow-privacy/pyproject.toml
+++ b/examples/tensorflow-privacy/pyproject.toml
@@ -7,12 +7,12 @@ name = "tensorflow-privacy-fl"
version = "1.0.0"
description = "Sample-level Differential Privacy with Tensorflow-Privacy in Flower"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"tensorflow-estimator~=2.4",
"tensorflow-probability~=0.22.0",
"tensorflow>=2.4.0,<=2.15.0",
- "tensorflow-privacy == 0.9.0"
+ "tensorflow-privacy == 0.9.0",
]
[tool.hatch.build.targets.wheel]
diff --git a/examples/vertical-fl/pyproject.toml b/examples/vertical-fl/pyproject.toml
index 9ebc2251c0dd..458878748cde 100644
--- a/examples/vertical-fl/pyproject.toml
+++ b/examples/vertical-fl/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "PyTorch Vertical FL with Flower"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"numpy==1.24.4",
"pandas==2.0.3",
@@ -35,4 +35,3 @@ default = "local-simulation"
[tool.flwr.federations.local-simulation]
options.num-supernodes = 3 # Note that this example will require changes to how VFL is implemented
-
diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml
index e2a891766efb..3906f8bf3301 100644
--- a/examples/xgboost-comprehensive/pyproject.toml
+++ b/examples/xgboost-comprehensive/pyproject.toml
@@ -8,7 +8,7 @@ version = "1.0.0"
description = "Federated Learning with XGBoost and Flower (Comprehensive Example)"
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.1",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"xgboost>=2.0.0",
]
@@ -25,21 +25,21 @@ clientapp = "xgboost_comprehensive.client_app:app"
[tool.flwr.app.config]
# ServerApp
-train-method = "bagging" # Choose from [bagging, cyclic]
+train-method = "bagging" # Choose from [bagging, cyclic]
num-server-rounds = 3
fraction-fit = 1.0
fraction-evaluate = 1.0
centralised-eval = false
# ClientApp
-partitioner-type = "uniform" # Choose from [uniform, linear, square, exponential]
+partitioner-type = "uniform" # Choose from [uniform, linear, square, exponential]
test-fraction = 0.2
seed = 42
centralised-eval-client = false
local-epochs = 1
scaled-lr = false
params.objective = "binary:logistic"
-params.eta = 0.1 # Learning rate
+params.eta = 0.1 # Learning rate
params.max-depth = 8
params.eval-metric = "auc"
params.nthread = 16
diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml
index da3561bfded4..3bfedb6b1d58 100644
--- a/examples/xgboost-quickstart/pyproject.toml
+++ b/examples/xgboost-quickstart/pyproject.toml
@@ -32,7 +32,7 @@ fraction-evaluate = 0.1
# ClientApp
local-epochs = 1
params.objective = "binary:logistic"
-params.eta = 0.1 # Learning rate
+params.eta = 0.1 # Learning rate
params.max-depth = 8
params.eval-metric = "auc"
params.nthread = 16
diff --git a/pyproject.toml b/pyproject.toml
index 87059cf5c867..4b32908c8f51 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "flwr"
-version = "1.12.0"
+version = "1.13.0"
description = "Flower: A Friendly Federated Learning Framework"
license = "Apache-2.0"
authors = ["The Flower Authors "]
@@ -43,12 +43,8 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
]
-packages = [
- { include = "flwr", from = "src/py" },
-]
-exclude = [
- "src/py/**/*_test.py",
-]
+packages = [{ include = "flwr", from = "src/py" }]
+exclude = ["src/py/**/*_test.py"]
[tool.poetry.scripts]
# `flwr` CLI
@@ -62,7 +58,7 @@ flower-superlink = "flwr.server.app:run_superlink"
flower-supernode = "flwr.client.supernode.app:run_supernode"
flower-server-app = "flwr.server.run_serverapp:run_server_app"
flwr-clientapp = "flwr.client.clientapp:flwr_clientapp"
-flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated
+flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated
[tool.poetry.dependencies]
python = "^3.9"
@@ -96,9 +92,10 @@ types-setuptools = "==69.0.0.20240125"
clang-format = "==17.0.6"
isort = "==5.13.2"
black = { version = "==24.2.0", extras = ["jupyter"] }
+taplo = "==0.9.3"
docformatter = "==1.7.5"
mypy = "==1.8.0"
-pylint = "==3.0.3"
+pylint = "==3.3.1"
flake8 = "==5.0.4"
parameterized = "==0.9.0"
pytest = "==7.4.4"
@@ -137,6 +134,7 @@ pre-commit = "==3.5.0"
sphinx-substitution-extensions = "2022.02.16"
sphinxext-opengraph = "==0.9.1"
docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-1" }
+docsig = "==0.64.0"
[tool.docstrfmt]
extend_exclude = [
@@ -160,10 +158,7 @@ disable = "duplicate-code,too-few-public-methods,useless-import-alias"
[tool.pytest.ini_options]
minversion = "6.2"
addopts = "-qq"
-testpaths = [
- "src/py/flwr",
- "src/py/flwr_tool",
-]
+testpaths = ["src/py/flwr", "src/py/flwr_tool"]
filterwarnings = "ignore::DeprecationWarning"
[tool.pytest-watcher]
@@ -176,17 +171,12 @@ patterns = ["*.py"]
ignore_patterns = []
[tool.mypy]
-plugins = [
- "numpy.typing.mypy_plugin",
-]
+plugins = ["numpy.typing.mypy_plugin"]
ignore_missing_imports = true
strict = true
[[tool.mypy.overrides]]
-module = [
- "importlib.metadata.*",
- "importlib_metadata.*",
-]
+module = ["importlib.metadata.*", "importlib_metadata.*"]
follow_imports = "skip"
follow_imports_for_stubs = true
disallow_untyped_calls = false
@@ -235,3 +225,7 @@ convention = "numpy"
[tool.ruff.per-file-ignores]
"src/py/flwr/server/strategy/*.py" = ["E501"]
+
+[tool.docsig]
+ignore-no-params = true
+exclude = 'src/py/flwr/proto/.*|src/py/flwr/.*_test\.py|src/py/flwr/cli/new/templates/.*\.tpl'
diff --git a/src/docker/base/README.md b/src/docker/base/README.md
index b17c3d6e5c6f..ebbb31106f74 100644
--- a/src/docker/base/README.md
+++ b/src/docker/base/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1-py3.11-alpine3.19`
- `1.11.1-py3.11-ubuntu22.04`
- `1.11.1-py3.10-ubuntu22.04`
diff --git a/src/docker/clientapp/README.md b/src/docker/clientapp/README.md
index c7975ccd762c..19ea31e966a7 100644
--- a/src/docker/clientapp/README.md
+++ b/src/docker/clientapp/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1`, `1.11.1-py3.11-ubuntu22.04`
- `1.11.1-py3.10-ubuntu22.04`
- `1.11.1-py3.9-ubuntu22.04`
diff --git a/src/docker/complete/compose.yml b/src/docker/complete/compose.yml
index e1dc2f5ffc56..b21189d94123 100644
--- a/src/docker/complete/compose.yml
+++ b/src/docker/complete/compose.yml
@@ -1,7 +1,7 @@
services:
# create a SuperLink service
superlink:
- image: flwr/superlink:${FLWR_VERSION:-1.11.1}
+ image: flwr/superlink:${FLWR_VERSION:-1.12.0}
command:
- --insecure
@@ -10,7 +10,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/superexec:${FLWR_VERSION:-1.11.1}
+ FROM flwr/superexec:${FLWR_VERSION:-1.12.0}
# gcc is required for the fastai quickstart example
USER root
@@ -39,7 +39,7 @@ services:
# create a two SuperNode service with different node configs
supernode-1:
- image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ image: flwr/supernode:${FLWR_VERSION:-1.12.0}
command:
- --insecure
- --superlink
@@ -54,7 +54,7 @@ services:
- superlink
supernode-2:
- image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ image: flwr/supernode:${FLWR_VERSION:-1.12.0}
command:
- --insecure
- --superlink
@@ -71,7 +71,7 @@ services:
# uncomment to add another SuperNode
#
# supernode-3:
- # image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ # image: flwr/supernode:${FLWR_VERSION:-1.12.0}
# command:
# - --insecure
# - --superlink
@@ -89,7 +89,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
# gcc is required for the fastai quickstart example
USER root
@@ -120,7 +120,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
# gcc is required for the fastai quickstart example
USER root
@@ -153,7 +153,7 @@ services:
# build:
# context: ${PROJECT_DIR:-.}
# dockerfile_inline: |
- # FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
# # gcc is required for the fastai quickstart example
# USER root
diff --git a/src/docker/distributed/client/compose.yml b/src/docker/distributed/client/compose.yml
index 60a7dcdf9b61..6bc6e6739ae4 100644
--- a/src/docker/distributed/client/compose.yml
+++ b/src/docker/distributed/client/compose.yml
@@ -1,6 +1,6 @@
services:
supernode-1:
- image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ image: flwr/supernode:${FLWR_VERSION:-1.12.0}
command:
- --superlink
- ${SUPERLINK_IP:-127.0.0.1}:9092
@@ -17,7 +17,7 @@ services:
target: /app/certificates/ca.crt
supernode-2:
- image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ image: flwr/supernode:${FLWR_VERSION:-1.12.0}
command:
- --superlink
- ${SUPERLINK_IP:-127.0.0.1}:9092
@@ -36,7 +36,7 @@ services:
# uncomment to add another SuperNode
#
# supernode-3:
- # image: flwr/supernode:${FLWR_VERSION:-1.11.1}
+ # image: flwr/supernode:${FLWR_VERSION:-1.12.0}
# command:
# - --superlink
# - ${SUPERLINK_IP:-127.0.0.1}:9092
@@ -56,7 +56,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
WORKDIR /app
COPY --chown=app:app pyproject.toml .
@@ -79,7 +79,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
WORKDIR /app
COPY --chown=app:app pyproject.toml .
@@ -104,7 +104,7 @@ services:
# build:
# context: ${PROJECT_DIR:-.}
# dockerfile_inline: |
- # FROM flwr/clientapp:${FLWR_VERSION:-1.11.1}
+ # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0}
# WORKDIR /app
# COPY --chown=app:app pyproject.toml .
diff --git a/src/docker/distributed/server/compose.yml b/src/docker/distributed/server/compose.yml
index 54e9faf14b84..f53b63593eb8 100644
--- a/src/docker/distributed/server/compose.yml
+++ b/src/docker/distributed/server/compose.yml
@@ -1,6 +1,6 @@
services:
superlink:
- image: flwr/superlink:${FLWR_VERSION:-1.11.1}
+ image: flwr/superlink:${FLWR_VERSION:-1.12.0}
command:
- --ssl-ca-certfile=certificates/ca.crt
- --ssl-certfile=certificates/server.pem
@@ -22,7 +22,7 @@ services:
build:
context: ${PROJECT_DIR:-.}
dockerfile_inline: |
- FROM flwr/superexec:${FLWR_VERSION:-1.11.1}
+ FROM flwr/superexec:${FLWR_VERSION:-1.12.0}
WORKDIR /app
COPY --chown=app:app pyproject.toml .
diff --git a/src/docker/serverapp/README.md b/src/docker/serverapp/README.md
index da49eb3596b9..00831829faf6 100644
--- a/src/docker/serverapp/README.md
+++ b/src/docker/serverapp/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1`, `1.11.1-py3.11-ubuntu22.04`
- `1.11.1-py3.10-ubuntu22.04`
- `1.11.1-py3.9-ubuntu22.04`
diff --git a/src/docker/superexec/README.md b/src/docker/superexec/README.md
index ed44f24ca7ae..d844c00c8518 100644
--- a/src/docker/superexec/README.md
+++ b/src/docker/superexec/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1`, `1.11.1-py3.11-ubuntu22.04`
- `1.11.1-py3.10-ubuntu22.04`
- `1.11.1-py3.9-ubuntu22.04`
diff --git a/src/docker/superlink/README.md b/src/docker/superlink/README.md
index 0e20bf3d039f..7b29500c43fc 100644
--- a/src/docker/superlink/README.md
+++ b/src/docker/superlink/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1`, `1.11.1-py3.11-alpine3.19`
- `1.11.1-py3.11-ubuntu22.04`
- `1.11.0`, `1.11.0-py3.11-alpine3.19`
diff --git a/src/docker/supernode/README.md b/src/docker/supernode/README.md
index c2d99a500da4..0f0f480cac85 100644
--- a/src/docker/supernode/README.md
+++ b/src/docker/supernode/README.md
@@ -22,7 +22,7 @@
- `unstable`
- points to the last successful build of the `main` branch
- `nightly`, `.dev` e.g. `1.12.0.dev20240830`
- - uses Python 3.11 and Ubuntu 22.04
+ - uses Python 3.11 and Ubuntu 24.04
- `1.11.1`, `1.11.1-py3.11-alpine3.19`
- `1.11.1-py3.11-ubuntu22.04`
- `1.11.1-py3.10-ubuntu22.04`
diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py
index 137e2dc31aff..4c9dca4ebcf1 100644
--- a/src/py/flwr/cli/build.py
+++ b/src/py/flwr/cli/build.py
@@ -14,26 +14,50 @@
# ==============================================================================
"""Flower command line interface `build` command."""
+import hashlib
import os
+import shutil
+import tempfile
import zipfile
from pathlib import Path
-from typing import Annotated, Optional
+from typing import Annotated, Any, Optional, Union
import pathspec
import tomli_w
import typer
+from flwr.common.constant import FAB_ALLOWED_EXTENSIONS, FAB_DATE, FAB_HASH_TRUNCATION
+
from .config_utils import load_and_validate
-from .utils import get_sha256_hash, is_valid_project_name
+from .utils import is_valid_project_name
+
+
+def write_to_zip(
+ zipfile_obj: zipfile.ZipFile, filename: str, contents: Union[bytes, str]
+) -> zipfile.ZipFile:
+ """Set a fixed date and write contents to a zip file."""
+ zip_info = zipfile.ZipInfo(filename)
+ zip_info.date_time = FAB_DATE
+ zipfile_obj.writestr(zip_info, contents)
+ return zipfile_obj
+
+def get_fab_filename(conf: dict[str, Any], fab_hash: str) -> str:
+ """Get the FAB filename based on the given config and FAB hash."""
+ publisher = conf["tool"]["flwr"]["app"]["publisher"]
+ name = conf["project"]["name"]
+ version = conf["project"]["version"].replace(".", "-")
+ fab_hash_truncated = fab_hash[:FAB_HASH_TRUNCATION]
+ return f"{publisher}.{name}.{version}.{fab_hash_truncated}.fab"
-# pylint: disable=too-many-locals
+
+# pylint: disable=too-many-locals, too-many-statements
def build(
app: Annotated[
Optional[Path],
typer.Option(help="Path of the Flower App to bundle into a FAB"),
] = None,
-) -> str:
+) -> tuple[str, str]:
"""Build a Flower App into a Flower App Bundle (FAB).
You can run ``flwr build`` without any arguments to bundle the app located in the
@@ -85,16 +109,8 @@ def build(
# Load .gitignore rules if present
ignore_spec = _load_gitignore(app)
- # Set the name of the zip file
- fab_filename = (
- f"{conf['tool']['flwr']['app']['publisher']}"
- f".{conf['project']['name']}"
- f".{conf['project']['version'].replace('.', '-')}.fab"
- )
list_file_content = ""
- allowed_extensions = {".py", ".toml", ".md"}
-
# Remove the 'federations' field from 'tool.flwr' if it exists
if (
"tool" in conf
@@ -105,38 +121,53 @@ def build(
toml_contents = tomli_w.dumps(conf)
- with zipfile.ZipFile(fab_filename, "w", zipfile.ZIP_DEFLATED) as fab_file:
- fab_file.writestr("pyproject.toml", toml_contents)
+ with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as temp_file:
+ temp_filename = temp_file.name
+
+ with zipfile.ZipFile(temp_filename, "w", zipfile.ZIP_DEFLATED) as fab_file:
+ write_to_zip(fab_file, "pyproject.toml", toml_contents)
- # Continue with adding other files
- for root, _, files in os.walk(app, topdown=True):
- files = [
+ # Continue with adding other files
+ all_files = [
f
- for f in files
- if not ignore_spec.match_file(Path(root) / f)
- and f != fab_filename
- and Path(f).suffix in allowed_extensions
- and f != "pyproject.toml" # Exclude the original pyproject.toml
+ for f in app.rglob("*")
+ if not ignore_spec.match_file(f)
+ and f.name != temp_filename
+ and f.suffix in FAB_ALLOWED_EXTENSIONS
+ and f.name != "pyproject.toml" # Exclude the original pyproject.toml
]
- for file in files:
- file_path = Path(root) / file
+ for file_path in all_files:
+ # Read the file content manually
+ with open(file_path, "rb") as f:
+ file_contents = f.read()
+
archive_path = file_path.relative_to(app)
- fab_file.write(file_path, archive_path)
+ write_to_zip(fab_file, str(archive_path), file_contents)
# Calculate file info
- sha256_hash = get_sha256_hash(file_path)
+ sha256_hash = hashlib.sha256(file_contents).hexdigest()
file_size_bits = os.path.getsize(file_path) * 8 # size in bits
list_file_content += f"{archive_path},{sha256_hash},{file_size_bits}\n"
- # Add CONTENT and CONTENT.jwt to the zip file
- fab_file.writestr(".info/CONTENT", list_file_content)
+ # Add CONTENT and CONTENT.jwt to the zip file
+ write_to_zip(fab_file, ".info/CONTENT", list_file_content)
+
+ # Get hash of FAB file
+ content = Path(temp_filename).read_bytes()
+ fab_hash = hashlib.sha256(content).hexdigest()
+
+ # Set the name of the zip file
+ fab_filename = get_fab_filename(conf, fab_hash)
+
+ # Once the temporary zip file is created, rename it to the final filename
+ shutil.move(temp_filename, fab_filename)
typer.secho(
f"🎊 Successfully built {fab_filename}", fg=typer.colors.GREEN, bold=True
)
- return fab_filename
+ return fab_filename, fab_hash
def _load_gitignore(app: Path) -> pathspec.PathSpec:
diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py
index 79e4973ccf9c..73ce779c3b5c 100644
--- a/src/py/flwr/cli/config_utils.py
+++ b/src/py/flwr/cli/config_utils.py
@@ -90,6 +90,16 @@ def load_and_validate(
) -> tuple[Optional[dict[str, Any]], list[str], list[str]]:
"""Load and validate pyproject.toml as dict.
+ Parameters
+ ----------
+ path : Optional[Path] (default: None)
+ The path of the Flower App config file to load. By default it
+ will try to use `pyproject.toml` inside the current directory.
+ check_module: bool (default: True)
+ Whether the validity of the Python module should be checked.
+ This requires the project to be installed in the currently
+ running environment. True by default.
+
Returns
-------
Tuple[Optional[config], List[str], List[str]]
diff --git a/src/py/flwr/cli/install.py b/src/py/flwr/cli/install.py
index 8e3e9505898c..7451aa3d2326 100644
--- a/src/py/flwr/cli/install.py
+++ b/src/py/flwr/cli/install.py
@@ -14,7 +14,7 @@
# ==============================================================================
"""Flower command line interface `install` command."""
-
+import hashlib
import shutil
import subprocess
import tempfile
@@ -25,7 +25,8 @@
import typer
-from flwr.common.config import get_flwr_dir
+from flwr.common.config import get_flwr_dir, get_metadata_from_config
+from flwr.common.constant import FAB_HASH_TRUNCATION
from .config_utils import load_and_validate
from .utils import get_sha256_hash
@@ -91,9 +92,11 @@ def install_from_fab(
fab_name: Optional[str]
if isinstance(fab_file, bytes):
fab_file_archive = BytesIO(fab_file)
+ fab_hash = hashlib.sha256(fab_file).hexdigest()
fab_name = None
elif isinstance(fab_file, Path):
fab_file_archive = fab_file
+ fab_hash = hashlib.sha256(fab_file.read_bytes()).hexdigest()
fab_name = fab_file.stem
else:
raise ValueError("fab_file must be either a Path or bytes")
@@ -126,14 +129,16 @@ def install_from_fab(
shutil.rmtree(info_dir)
installed_path = validate_and_install(
- tmpdir_path, fab_name, flwr_dir, skip_prompt
+ tmpdir_path, fab_hash, fab_name, flwr_dir, skip_prompt
)
return installed_path
+# pylint: disable=too-many-locals
def validate_and_install(
project_dir: Path,
+ fab_hash: str,
fab_name: Optional[str],
flwr_dir: Optional[Path],
skip_prompt: bool = False,
@@ -149,28 +154,17 @@ def validate_and_install(
)
raise typer.Exit(code=1)
- publisher = config["tool"]["flwr"]["app"]["publisher"]
- project_name = config["project"]["name"]
- version = config["project"]["version"]
+ version, fab_id = get_metadata_from_config(config)
+ publisher, project_name = fab_id.split("/")
+ config_metadata = (publisher, project_name, version, fab_hash)
- if (
- fab_name
- and fab_name != f"{publisher}.{project_name}.{version.replace('.', '-')}"
- ):
- typer.secho(
- "❌ FAB file has incorrect name. The file name must follow the format "
- "`...fab`.",
- fg=typer.colors.RED,
- bold=True,
- )
- raise typer.Exit(code=1)
+ if fab_name:
+ _validate_fab_and_config_metadata(fab_name, config_metadata)
install_dir: Path = (
(get_flwr_dir() if not flwr_dir else flwr_dir)
/ "apps"
- / publisher
- / project_name
- / version
+ / f"{publisher}.{project_name}.{version}.{fab_hash[:FAB_HASH_TRUNCATION]}"
)
if install_dir.exists():
if skip_prompt:
@@ -226,3 +220,49 @@ def _verify_hashes(list_content: str, tmpdir: Path) -> bool:
if not file_path.exists() or get_sha256_hash(file_path) != hash_expected:
return False
return True
+
+
+def _validate_fab_and_config_metadata(
+ fab_name: str, config_metadata: tuple[str, str, str, str]
+) -> None:
+ """Validate metadata from the FAB filename and config."""
+ publisher, project_name, version, fab_hash = config_metadata
+
+ fab_name = fab_name.removesuffix(".fab")
+
+ fab_publisher, fab_project_name, fab_version, fab_shorthash = fab_name.split(".")
+ fab_version = fab_version.replace("-", ".")
+
+ # Check FAB filename format
+ if (
+ f"{fab_publisher}.{fab_project_name}.{fab_version}"
+ != f"{publisher}.{project_name}.{version}"
+ or len(fab_shorthash) != FAB_HASH_TRUNCATION # Verify hash length
+ ):
+ typer.secho(
+ "❌ FAB file has incorrect name. The file name must follow the format "
+ "`...<8hexchars>.fab`.",
+ fg=typer.colors.RED,
+ bold=True,
+ )
+ raise typer.Exit(code=1)
+
+ # Verify hash is a valid hexadecimal
+ try:
+ _ = int(fab_shorthash, 16)
+ except Exception as e:
+ typer.secho(
+ f"❌ FAB file has an invalid hexadecimal string `{fab_shorthash}`.",
+ fg=typer.colors.RED,
+ bold=True,
+ )
+ raise typer.Exit(code=1) from e
+
+ # Verify shorthash matches
+ if fab_shorthash != fab_hash[:FAB_HASH_TRUNCATION]:
+ typer.secho(
+ "❌ The hash in the FAB file name does not match the hash of the FAB.",
+ fg=typer.colors.RED,
+ bold=True,
+ )
+ raise typer.Exit(code=1)
diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py
index e7dbee894314..3cbde991ff6e 100644
--- a/src/py/flwr/cli/new/new.py
+++ b/src/py/flwr/cli/new/new.py
@@ -240,6 +240,8 @@ def new(
MlFramework.HUGGINGFACE.value,
MlFramework.MLX.value,
MlFramework.TENSORFLOW.value,
+ MlFramework.SKLEARN.value,
+ MlFramework.NUMPY.value,
]
if framework_str in frameworks_with_tasks:
files[f"{import_name}/task.py"] = {
diff --git a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl
index 046de57f3cf3..ffe782d274fc 100644
--- a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl
@@ -1,9 +1,9 @@
"""$project_name: A Flower / $framework_str app."""
import jax
-from flwr.client import NumPyClient, ClientApp
-from flwr.common import Context
+from flwr.client import ClientApp, NumPyClient
+from flwr.common import Context
from $import_name.task import (
evaluation,
get_params,
@@ -17,37 +17,31 @@ from $import_name.task import (
# Define Flower Client and client_fn
class FlowerClient(NumPyClient):
- def __init__(self):
+ def __init__(self, input_dim):
self.train_x, self.train_y, self.test_x, self.test_y = load_data()
self.grad_fn = jax.grad(loss_fn)
- model_shape = self.train_x.shape[1:]
-
- self.params = load_model(model_shape)
-
- def get_parameters(self, config):
- return get_params(self.params)
-
- def set_parameters(self, parameters):
- set_params(self.params, parameters)
+ self.params = load_model((input_dim,))
def fit(self, parameters, config):
- self.set_parameters(parameters)
+ set_params(self.params, parameters)
self.params, loss, num_examples = train(
self.params, self.grad_fn, self.train_x, self.train_y
)
- parameters = self.get_parameters(config={})
- return parameters, num_examples, {"loss": float(loss)}
+ return get_params(self.params), num_examples, {"loss": float(loss)}
def evaluate(self, parameters, config):
- self.set_parameters(parameters)
+ set_params(self.params, parameters)
loss, num_examples = evaluation(
self.params, self.grad_fn, self.test_x, self.test_y
)
return float(loss), num_examples, {"loss": float(loss)}
+
def client_fn(context: Context):
+ input_dim = context.run_config["input-dim"]
+
# Return Client instance
- return FlowerClient().to_client()
+ return FlowerClient(input_dim).to_client()
# Flower ClientApp
diff --git a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl
index f3105103842d..157300655a14 100644
--- a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl
@@ -3,17 +3,18 @@
import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
-from flwr.client import NumPyClient, ClientApp
-from flwr.common import Context
+from flwr.client import ClientApp, NumPyClient
+from flwr.common import Context
+from flwr.common.config import UserConfig
from $import_name.task import (
+ MLP,
batch_iterate,
eval_fn,
get_params,
load_data,
loss_fn,
set_params,
- MLP,
)
@@ -22,37 +23,24 @@ class FlowerClient(NumPyClient):
def __init__(
self,
data,
- num_layers,
- hidden_dim,
+ run_config: UserConfig,
num_classes,
- batch_size,
- learning_rate,
- num_epochs,
):
- self.num_layers = num_layers
- self.hidden_dim = hidden_dim
- self.num_classes = num_classes
- self.batch_size = batch_size
- self.learning_rate = learning_rate
- self.num_epochs = num_epochs
+ num_layers = run_config["num-layers"]
+ hidden_dim = run_config["hidden-dim"]
+ input_dim = run_config["input-dim"]
+ batch_size = run_config["batch-size"]
+ learning_rate = run_config["lr"]
+ self.num_epochs = run_config["local-epochs"]
self.train_images, self.train_labels, self.test_images, self.test_labels = data
- self.model = MLP(
- num_layers, self.train_images.shape[-1], hidden_dim, num_classes
- )
+ self.model = MLP(num_layers, input_dim, hidden_dim, num_classes)
self.optimizer = optim.SGD(learning_rate=learning_rate)
self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn)
- self.num_epochs = num_epochs
self.batch_size = batch_size
- def get_parameters(self, config):
- return get_params(self.model)
-
- def set_parameters(self, parameters):
- set_params(self.model, parameters)
-
def fit(self, parameters, config):
- self.set_parameters(parameters)
+ set_params(self.model, parameters)
for _ in range(self.num_epochs):
for X, y in batch_iterate(
self.batch_size, self.train_images, self.train_labels
@@ -60,10 +48,10 @@ class FlowerClient(NumPyClient):
_, grads = self.loss_and_grad_fn(self.model, X, y)
self.optimizer.update(self.model, grads)
mx.eval(self.model.parameters(), self.optimizer.state)
- return self.get_parameters(config={}), len(self.train_images), {}
+ return get_params(self.model), len(self.train_images), {}
def evaluate(self, parameters, config):
- self.set_parameters(parameters)
+ set_params(self.model, parameters)
accuracy = eval_fn(self.model, self.test_images, self.test_labels)
loss = loss_fn(self.model, self.test_images, self.test_labels)
return loss.item(), len(self.test_images), {"accuracy": accuracy.item()}
@@ -73,18 +61,10 @@ def client_fn(context: Context):
partition_id = context.node_config["partition-id"]
num_partitions = context.node_config["num-partitions"]
data = load_data(partition_id, num_partitions)
-
- num_layers = context.run_config["num-layers"]
- hidden_dim = context.run_config["hidden-dim"]
num_classes = 10
- batch_size = context.run_config["batch-size"]
- learning_rate = context.run_config["lr"]
- num_epochs = context.run_config["local-epochs"]
# Return Client instance
- return FlowerClient(
- data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs
- ).to_client()
+ return FlowerClient(data, context.run_config, num_classes).to_client()
# Flower ClientApp
diff --git a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl
index e35c3c78f6e2..f20bb536b3c6 100644
--- a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl
@@ -1,16 +1,15 @@
"""$project_name: A Flower / $framework_str app."""
-from flwr.client import NumPyClient, ClientApp
+from flwr.client import ClientApp, NumPyClient
from flwr.common import Context
-import numpy as np
+from $import_name.task import get_dummy_model
class FlowerClient(NumPyClient):
- def get_parameters(self, config):
- return [np.ones((1, 1))]
def fit(self, parameters, config):
- return ([np.ones((1, 1))], 1, {})
+ model = get_dummy_model()
+ return [model], 1, {}
def evaluate(self, parameters, config):
return float(0.0), 1, {"accuracy": float(1.0)}
diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl
index bcade355e22f..e141a34d38ce 100644
--- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl
@@ -1,17 +1,10 @@
"""$project_name: A Flower / $framework_str app."""
import torch
-from flwr.client import NumPyClient, ClientApp
-from flwr.common import Context
-from $import_name.task import (
- Net,
- load_data,
- get_weights,
- set_weights,
- train,
- test,
-)
+from flwr.client import ClientApp, NumPyClient
+from flwr.common import Context
+from $import_name.task import Net, get_weights, load_data, set_weights, test, train
# Define Flower Client and client_fn
@@ -32,7 +25,11 @@ class FlowerClient(NumPyClient):
self.local_epochs,
self.device,
)
- return get_weights(self.net), len(self.trainloader.dataset), {"train_loss": train_loss}
+ return (
+ get_weights(self.net),
+ len(self.trainloader.dataset),
+ {"train_loss": train_loss},
+ )
def evaluate(self, parameters, config):
set_weights(self.net, parameters)
diff --git a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl
index 2d3d1c7f163a..69d208ac28c9 100644
--- a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl
@@ -2,40 +2,17 @@
import warnings
-import numpy as np
-from flwr.client import NumPyClient, ClientApp
-from flwr.common import Context
-from flwr_datasets import FederatedDataset
-from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
-
-def get_model_parameters(model):
- if model.fit_intercept:
- params = [
- model.coef_,
- model.intercept_,
- ]
- else:
- params = [model.coef_]
- return params
-
-
-def set_model_params(model, params):
- model.coef_ = params[0]
- if model.fit_intercept:
- model.intercept_ = params[1]
- return model
-
-
-def set_initial_params(model):
- n_classes = 10 # MNIST has 10 classes
- n_features = 784 # Number of features in dataset
- model.classes_ = np.array([i for i in range(10)])
-
- model.coef_ = np.zeros((n_classes, n_features))
- if model.fit_intercept:
- model.intercept_ = np.zeros((n_classes,))
+from flwr.client import ClientApp, NumPyClient
+from flwr.common import Context
+from $import_name.task import (
+ get_model,
+ get_model_params,
+ load_data,
+ set_initial_params,
+ set_model_params,
+)
class FlowerClient(NumPyClient):
@@ -46,9 +23,6 @@ class FlowerClient(NumPyClient):
self.y_train = y_train
self.y_test = y_test
- def get_parameters(self, config):
- return get_model_parameters(self.model)
-
def fit(self, parameters, config):
set_model_params(self.model, parameters)
@@ -57,7 +31,7 @@ class FlowerClient(NumPyClient):
warnings.simplefilter("ignore")
self.model.fit(self.X_train, self.y_train)
- return get_model_parameters(self.model), len(self.X_train), {}
+ return get_model_params(self.model), len(self.X_train), {}
def evaluate(self, parameters, config):
set_model_params(self.model, parameters)
@@ -71,21 +45,13 @@ class FlowerClient(NumPyClient):
def client_fn(context: Context):
partition_id = context.node_config["partition-id"]
num_partitions = context.node_config["num-partitions"]
- fds = FederatedDataset(dataset="mnist", partitioners={"train": num_partitions})
- dataset = fds.load_partition(partition_id, "train").with_format("numpy")
-
- X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"]
- # Split the on edge data: 80% train, 20% test
- X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :]
- y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :]
+ X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions)
# Create LogisticRegression Model
- model = LogisticRegression(
- penalty="l2",
- max_iter=1, # local epoch
- warm_start=True, # prevent refreshing weights when fitting
- )
+ penalty = context.run_config["penalty"]
+ local_epochs = context.run_config["local-epochs"]
+ model = get_model(penalty, local_epochs)
# Setting initial parameters, akin to model.compile for keras models
set_initial_params(model)
diff --git a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl
index 514185fde970..60bbcaf3c175 100644
--- a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl
@@ -1,16 +1,22 @@
"""$project_name: A Flower / $framework_str app."""
-from flwr.common import Context
-from flwr.server.strategy import FedAvg
+from flwr.common import Context, ndarrays_to_parameters
from flwr.server import ServerApp, ServerAppComponents, ServerConfig
+from flwr.server.strategy import FedAvg
+from $import_name.task import get_params, load_model
def server_fn(context: Context):
# Read from config
num_rounds = context.run_config["num-server-rounds"]
+ input_dim = context.run_config["input-dim"]
+
+ # Initialize global model
+ params = get_params(load_model((input_dim,)))
+ initial_parameters = ndarrays_to_parameters(params)
# Define strategy
- strategy = FedAvg()
+ strategy = FedAvg(initial_parameters=initial_parameters)
config = ServerConfig(num_rounds=num_rounds)
return ServerAppComponents(strategy=strategy, config=config)
diff --git a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl
index c99c72574813..6d00e84fe383 100644
--- a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl
@@ -1,16 +1,27 @@
"""$project_name: A Flower / $framework_str app."""
-from flwr.common import Context
+from flwr.common import Context, ndarrays_to_parameters
from flwr.server import ServerApp, ServerAppComponents, ServerConfig
from flwr.server.strategy import FedAvg
+from $import_name.task import MLP, get_params
def server_fn(context: Context):
# Read from config
num_rounds = context.run_config["num-server-rounds"]
+ num_classes = 10
+ num_layers = context.run_config["num-layers"]
+ input_dim = context.run_config["input-dim"]
+ hidden_dim = context.run_config["hidden-dim"]
+
+ # Initialize global model
+ model = MLP(num_layers, input_dim, hidden_dim, num_classes)
+ params = get_params(model)
+ initial_parameters = ndarrays_to_parameters(params)
+
# Define strategy
- strategy = FedAvg()
+ strategy = FedAvg(initial_parameters=initial_parameters)
config = ServerConfig(num_rounds=num_rounds)
return ServerAppComponents(strategy=strategy, config=config)
diff --git a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl
index c99c72574813..ec1ff52811af 100644
--- a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl
@@ -1,16 +1,21 @@
"""$project_name: A Flower / $framework_str app."""
-from flwr.common import Context
+from flwr.common import Context, ndarrays_to_parameters
from flwr.server import ServerApp, ServerAppComponents, ServerConfig
from flwr.server.strategy import FedAvg
+from $import_name.task import get_dummy_model
def server_fn(context: Context):
# Read from config
num_rounds = context.run_config["num-server-rounds"]
+ # Initial model
+ model = get_dummy_model()
+ dummy_parameters = ndarrays_to_parameters([model])
+
# Define strategy
- strategy = FedAvg()
+ strategy = FedAvg(initial_parameters=dummy_parameters)
config = ServerConfig(num_rounds=num_rounds)
return ServerAppComponents(strategy=strategy, config=config)
diff --git a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl
index 39185965b3a5..9fe5f0fedc28 100644
--- a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl
@@ -3,7 +3,6 @@
from flwr.common import Context, ndarrays_to_parameters
from flwr.server import ServerApp, ServerAppComponents, ServerConfig
from flwr.server.strategy import FedAvg
-
from $import_name.task import Net, get_weights
@@ -27,5 +26,6 @@ def server_fn(context: Context):
return ServerAppComponents(strategy=strategy, config=config)
+
# Create ServerApp
app = ServerApp(server_fn=server_fn)
diff --git a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl
index 678ba9326229..b1487b01d2d3 100644
--- a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl
@@ -1,19 +1,31 @@
"""$project_name: A Flower / $framework_str app."""
-from flwr.common import Context
+from flwr.common import Context, ndarrays_to_parameters
from flwr.server import ServerApp, ServerAppComponents, ServerConfig
from flwr.server.strategy import FedAvg
+from $import_name.task import get_model, get_model_params, set_initial_params
def server_fn(context: Context):
# Read from config
num_rounds = context.run_config["num-server-rounds"]
+ # Create LogisticRegression Model
+ penalty = context.run_config["penalty"]
+ local_epochs = context.run_config["local-epochs"]
+ model = get_model(penalty, local_epochs)
+
+ # Setting initial parameters, akin to model.compile for keras models
+ set_initial_params(model)
+
+ initial_parameters = ndarrays_to_parameters(get_model_params(model))
+
# Define strategy
strategy = FedAvg(
fraction_fit=1.0,
fraction_evaluate=1.0,
min_available_clients=2,
+ initial_parameters=initial_parameters,
)
config = ServerConfig(num_rounds=num_rounds)
diff --git a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl
index fc6ef9dee3dd..428f752845c1 100644
--- a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl
@@ -2,9 +2,9 @@
import jax
import jax.numpy as jnp
+import numpy as np
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
-import numpy as np
key = jax.random.PRNGKey(0)
@@ -33,7 +33,7 @@ def train(params, grad_fn, X, y):
num_examples = X.shape[0]
for epochs in range(50):
grads = grad_fn(params, X, y)
- params = jax.tree_map(lambda p, g: p - 0.05 * g, params, grads)
+ params = jax.tree.map(lambda p, g: p - 0.05 * g, params, grads)
loss = loss_fn(params, X, y)
return params, loss, num_examples
diff --git a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl
index f959cd1d64e3..63db6c28f034 100644
--- a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl
@@ -3,10 +3,10 @@
import mlx.core as mx
import mlx.nn as nn
import numpy as np
-from datasets.utils.logging import disable_progress_bar
from flwr_datasets import FederatedDataset
from flwr_datasets.partitioner import IidPartitioner
+from datasets.utils.logging import disable_progress_bar
disable_progress_bar()
diff --git a/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl
new file mode 100644
index 000000000000..9b76fc055caf
--- /dev/null
+++ b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl
@@ -0,0 +1,7 @@
+"""$project_name: A Flower / $framework_str app."""
+
+import numpy as np
+
+
+def get_dummy_model():
+ return np.ones((1, 1))
diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl
index 5562371ad460..a3c015bfee88 100644
--- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl
+++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl
@@ -5,10 +5,10 @@ from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
-from torch.utils.data import DataLoader
-from torchvision.transforms import Compose, Normalize, ToTensor
from flwr_datasets import FederatedDataset
from flwr_datasets.partitioner import IidPartitioner
+from torch.utils.data import DataLoader
+from torchvision.transforms import Compose, Normalize, ToTensor
class Net(nn.Module):
@@ -67,7 +67,7 @@ def train(net, trainloader, epochs, device):
"""Train the model on the training set."""
net.to(device) # move model to GPU if available
criterion = torch.nn.CrossEntropyLoss().to(device)
- optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
+ optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
net.train()
running_loss = 0.0
for _ in range(epochs):
diff --git a/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl
new file mode 100644
index 000000000000..52c13edc032c
--- /dev/null
+++ b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl
@@ -0,0 +1,67 @@
+"""$project_name: A Flower / $framework_str app."""
+
+import numpy as np
+from flwr_datasets import FederatedDataset
+from flwr_datasets.partitioner import IidPartitioner
+from sklearn.linear_model import LogisticRegression
+
+fds = None # Cache FederatedDataset
+
+
+def load_data(partition_id: int, num_partitions: int):
+ """Load partition MNIST data."""
+ # Only initialize `FederatedDataset` once
+ global fds
+ if fds is None:
+ partitioner = IidPartitioner(num_partitions=num_partitions)
+ fds = FederatedDataset(
+ dataset="mnist",
+ partitioners={"train": partitioner},
+ )
+
+ dataset = fds.load_partition(partition_id, "train").with_format("numpy")
+
+ X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"]
+
+ # Split the on edge data: 80% train, 20% test
+ X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :]
+ y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :]
+
+ return X_train, X_test, y_train, y_test
+
+
+def get_model(penalty: str, local_epochs: int):
+
+ return LogisticRegression(
+ penalty=penalty,
+ max_iter=local_epochs,
+ warm_start=True,
+ )
+
+
+def get_model_params(model):
+ if model.fit_intercept:
+ params = [
+ model.coef_,
+ model.intercept_,
+ ]
+ else:
+ params = [model.coef_]
+ return params
+
+
+def set_model_params(model, params):
+ model.coef_ = params[0]
+ if model.fit_intercept:
+ model.intercept_ = params[1]
+ return model
+
+
+def set_initial_params(model):
+ n_classes = 10 # MNIST has 10 classes
+ n_features = 784 # Number of features in dataset
+ model.classes_ = np.array([i for i in range(10)])
+
+ model.coef_ = np.zeros((n_classes, n_features))
+ if model.fit_intercept:
+ model.intercept_ = np.zeros((n_classes,))
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl
index 71afc184ffa9..c70580009392 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl
index 8a4d49e7fd84..d34985d50433 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl
@@ -8,13 +8,13 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.1",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"trl==0.8.1",
"bitsandbytes==0.43.0",
"scipy==1.13.0",
"peft==0.6.2",
- "transformers==4.39.3",
+ "transformers==4.43.1",
"sentencepiece==0.2.0",
"omegaconf==2.3.0",
"hf_transfer==0.1.8",
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl
index af1e4d005114..3515cbd69d17 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.11.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets>=0.3.0",
"torch==2.2.1",
"transformers>=4.30.0,<5.0",
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl
index 31fff1c2a4c8..7c55d3654a08 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl
@@ -8,9 +8,9 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
- "jax==0.4.13",
- "jaxlib==0.4.13",
+ "flwr[simulation]>=1.12.0",
+ "jax==0.4.30",
+ "jaxlib==0.4.30",
"scikit-learn==1.3.2",
]
@@ -26,6 +26,7 @@ clientapp = "$import_name.client_app:app"
[tool.flwr.app.config]
num-server-rounds = 3
+input-dim = 3
[tool.flwr.federations]
default = "local-simulation"
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl
index c1bfe804c709..9ea11ff3fc0c 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"mlx==0.16.1",
"numpy==1.24.4",
@@ -28,6 +28,7 @@ clientapp = "$import_name.client_app:app"
num-server-rounds = 3
local-epochs = 1
num-layers = 2
+input-dim = 784 # 28*28
hidden-dim = 32
batch-size = 256
lr = 0.1
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl
index 953e556ad012..9f8f3aaab554 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"numpy>=1.21.0",
]
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl
index ccaf88c19e42..fe5ac7735d66 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"torch==2.2.1",
"torchvision==0.17.1",
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl
index 2b5778fec9a7..d5fec5f2f93f 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"scikit-learn>=1.1.1",
]
@@ -25,6 +25,8 @@ clientapp = "$import_name.client_app:app"
[tool.flwr.app.config]
num-server-rounds = 3
+penalty = "l2"
+local-epochs = 1
[tool.flwr.federations]
default = "local-simulation"
diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl
index 11f7d1083abc..81a839b30998 100644
--- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl
+++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl
@@ -8,7 +8,7 @@ version = "1.0.0"
description = ""
license = "Apache-2.0"
dependencies = [
- "flwr[simulation]>=1.10.0",
+ "flwr[simulation]>=1.12.0",
"flwr-datasets[vision]>=0.3.0",
"tensorflow>=2.11.1",
]
diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py
index 2832af3aebab..4722effee53d 100644
--- a/src/py/flwr/cli/run/run.py
+++ b/src/py/flwr/cli/run/run.py
@@ -14,7 +14,6 @@
# ==============================================================================
"""Flower command line interface `run` command."""
-import hashlib
import json
import subprocess
import sys
@@ -134,6 +133,7 @@ def run(
_run_without_superexec(app, federation_config, config_overrides, federation)
+# pylint: disable=too-many-locals
def _run_with_superexec(
app: Path,
federation_config: dict[str, Any],
@@ -179,9 +179,9 @@ def _run_with_superexec(
channel.subscribe(on_channel_state_change)
stub = ExecStub(channel)
- fab_path = Path(build(app))
- content = fab_path.read_bytes()
- fab = Fab(hashlib.sha256(content).hexdigest(), content)
+ fab_path, fab_hash = build(app)
+ content = Path(fab_path).read_bytes()
+ fab = Fab(fab_hash, content)
req = StartRunRequest(
fab=fab_to_proto(fab),
@@ -193,7 +193,7 @@ def _run_with_superexec(
res = stub.StartRun(req)
# Delete FAB file once it has been sent to the SuperExec
- fab_path.unlink()
+ Path(fab_path).unlink()
typer.secho(f"🎊 Successfully started run {res.run_id}", fg=typer.colors.GREEN)
if stream:
diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py
index 90c50aba7fad..fdb62578292a 100644
--- a/src/py/flwr/client/app.py
+++ b/src/py/flwr/client/app.py
@@ -132,6 +132,11 @@ class `flwr.client.Client` (default: None)
- 'grpc-bidi': gRPC, bidirectional streaming
- 'grpc-rere': gRPC, request-response (experimental)
- 'rest': HTTP (experimental)
+ authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None)
+ Tuple containing the elliptic curve private key and public key for
+ authentication from the cryptography library.
+ Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/
+ Used to establish an authenticated connection with the server.
max_retries: Optional[int] (default: None)
The maximum number of times the client will try to connect to the
server before giving up in case of a connection error. If set to None,
@@ -197,7 +202,7 @@ def start_client_internal(
*,
server_address: str,
node_config: UserConfig,
- load_client_app_fn: Optional[Callable[[str, str], ClientApp]] = None,
+ load_client_app_fn: Optional[Callable[[str, str, str], ClientApp]] = None,
client_fn: Optional[ClientFnExt] = None,
client: Optional[Client] = None,
grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
@@ -249,6 +254,11 @@ class `flwr.client.Client` (default: None)
- 'grpc-bidi': gRPC, bidirectional streaming
- 'grpc-rere': gRPC, request-response (experimental)
- 'rest': HTTP (experimental)
+ authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None)
+ Tuple containing the elliptic curve private key and public key for
+ authentication from the cryptography library.
+ Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/
+ Used to establish an authenticated connection with the server.
max_retries: Optional[int] (default: None)
The maximum number of times the client will try to connect to the
server before giving up in case of a connection error. If set to None,
@@ -288,7 +298,7 @@ def single_client_factory(
client_fn = single_client_factory
- def _load_client_app(_1: str, _2: str) -> ClientApp:
+ def _load_client_app(_1: str, _2: str, _3: str) -> ClientApp:
return ClientApp(client_fn=client_fn)
load_client_app_fn = _load_client_app
@@ -519,7 +529,7 @@ def _on_backoff(retry_state: RetryState) -> None:
else:
# Load ClientApp instance
client_app: ClientApp = load_client_app_fn(
- fab_id, fab_version
+ fab_id, fab_version, run.fab_hash
)
# Execute ClientApp
diff --git a/src/py/flwr/client/clientapp/app.py b/src/py/flwr/client/clientapp/app.py
index f493128bebac..52be2a4b6dc1 100644
--- a/src/py/flwr/client/clientapp/app.py
+++ b/src/py/flwr/client/clientapp/app.py
@@ -133,7 +133,9 @@ def run_clientapp( # pylint: disable=R0914
try:
# Load ClientApp
- client_app: ClientApp = load_client_app_fn(run.fab_id, run.fab_version)
+ client_app: ClientApp = load_client_app_fn(
+ run.fab_id, run.fab_version, fab.hash_str if fab else ""
+ )
# Execute ClientApp
reply_message = client_app(message=message, context=context)
diff --git a/src/py/flwr/client/clientapp/utils.py b/src/py/flwr/client/clientapp/utils.py
index d2386dd707c3..f7261c015b14 100644
--- a/src/py/flwr/client/clientapp/utils.py
+++ b/src/py/flwr/client/clientapp/utils.py
@@ -34,7 +34,7 @@ def get_load_client_app_fn(
app_path: Optional[str],
multi_app: bool,
flwr_dir: Optional[str] = None,
-) -> Callable[[str, str], ClientApp]:
+) -> Callable[[str, str, str], ClientApp]:
"""Get the load_client_app_fn function.
If `multi_app` is True, this function loads the specified ClientApp
@@ -55,13 +55,14 @@ def get_load_client_app_fn(
if not valid and error_msg:
raise LoadClientAppError(error_msg) from None
- def _load(fab_id: str, fab_version: str) -> ClientApp:
+ def _load(fab_id: str, fab_version: str, fab_hash: str) -> ClientApp:
runtime_app_dir = Path(app_path if app_path else "").absolute()
# If multi-app feature is disabled
if not multi_app:
# Set app reference
client_app_ref = default_app_ref
- # If multi-app feature is enabled but app directory is provided
+ # If multi-app feature is enabled but app directory is provided.
+ # `fab_hash` is not required since the app is loaded from `runtime_app_dir`.
elif app_path is not None:
config = get_project_config(runtime_app_dir)
this_fab_version, this_fab_id = get_metadata_from_config(config)
@@ -81,11 +82,16 @@ def _load(fab_id: str, fab_version: str) -> ClientApp:
else:
try:
runtime_app_dir = get_project_dir(
- fab_id, fab_version, get_flwr_dir(flwr_dir)
+ fab_id, fab_version, fab_hash, get_flwr_dir(flwr_dir)
)
config = get_project_config(runtime_app_dir)
except Exception as e:
- raise LoadClientAppError("Failed to load ClientApp") from e
+ raise LoadClientAppError(
+ "Failed to load ClientApp."
+ "Possible reasons for error include mismatched "
+ "`fab_id`, `fab_version`, or `fab_hash` in "
+ f"{str(get_flwr_dir(flwr_dir).resolve())}."
+ ) from e
# Set app reference
client_app_ref = config["tool"]["flwr"]["app"]["components"]["clientapp"]
diff --git a/src/py/flwr/client/grpc_adapter_client/connection.py b/src/py/flwr/client/grpc_adapter_client/connection.py
index 9b84545eacdb..ab823112bbe1 100644
--- a/src/py/flwr/client/grpc_adapter_client/connection.py
+++ b/src/py/flwr/client/grpc_adapter_client/connection.py
@@ -32,7 +32,7 @@
@contextmanager
-def grpc_adapter( # pylint: disable=R0913
+def grpc_adapter( # pylint: disable=R0913,too-many-positional-arguments
server_address: str,
insecure: bool,
retry_invoker: RetryInvoker,
diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py
index 29479cf5479d..75d2ebe15025 100644
--- a/src/py/flwr/client/grpc_client/connection.py
+++ b/src/py/flwr/client/grpc_client/connection.py
@@ -60,7 +60,7 @@ def on_channel_state_change(channel_connectivity: str) -> None:
@contextmanager
-def grpc_connection( # pylint: disable=R0913, R0915
+def grpc_connection( # pylint: disable=R0913,R0915,too-many-positional-arguments
server_address: str,
insecure: bool,
retry_invoker: RetryInvoker, # pylint: disable=unused-argument
diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py
index b4fa28373600..bfc20eee896a 100644
--- a/src/py/flwr/client/grpc_rere_client/connection.py
+++ b/src/py/flwr/client/grpc_rere_client/connection.py
@@ -71,7 +71,7 @@ def on_channel_state_change(channel_connectivity: str) -> None:
@contextmanager
-def grpc_request_response( # pylint: disable=R0913, R0914, R0915
+def grpc_request_response( # pylint: disable=R0913,R0914,R0915,R0917
server_address: str,
insecure: bool,
retry_invoker: RetryInvoker,
@@ -120,6 +120,9 @@ def grpc_request_response( # pylint: disable=R0913, R0914, R0915
authentication from the cryptography library.
Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/
Used to establish an authenticated connection with the server.
+ adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] (default: None)
+ A GrpcStub Class that can be used to send messages. By default the FleetStub
+ will be used.
Returns
-------
diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py
index e7967dfc8bee..843c9890c5d2 100644
--- a/src/py/flwr/client/node_state.py
+++ b/src/py/flwr/client/node_state.py
@@ -48,7 +48,7 @@ def __init__(
self.node_config = node_config
self.run_infos: dict[int, RunInfo] = {}
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
def register_context(
self,
run_id: int,
diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py
index 485bbd7a1810..f933ae44ad06 100644
--- a/src/py/flwr/client/rest_client/connection.py
+++ b/src/py/flwr/client/rest_client/connection.py
@@ -82,7 +82,7 @@
@contextmanager
-def http_request_response( # pylint: disable=,R0913, R0914, R0915
+def http_request_response( # pylint: disable=R0913,R0914,R0915,R0917
server_address: str,
insecure: bool, # pylint: disable=unused-argument
retry_invoker: RetryInvoker,
diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py
index ea7613667baa..4ddfe5d40aa3 100644
--- a/src/py/flwr/client/supernode/app.py
+++ b/src/py/flwr/client/supernode/app.py
@@ -48,13 +48,20 @@
def run_supernode() -> None:
"""Run Flower SuperNode."""
+ args = _parse_args_run_supernode().parse_args()
+ _warn_deprecated_server_arg(args)
+
log(INFO, "Starting Flower SuperNode")
event(EventType.RUN_SUPERNODE_ENTER)
- args = _parse_args_run_supernode().parse_args()
-
- _warn_deprecated_server_arg(args)
+ # Check if both `--flwr-dir` and `--isolation` were set
+ if args.flwr_dir is not None and args.isolation is not None:
+ log(
+ WARN,
+ "Both `--flwr-dir` and `--isolation` were specified. "
+ "Ignoring `--flwr-dir`.",
+ )
root_certificates = _get_certificates(args)
load_fn = get_load_client_app_fn(
@@ -178,12 +185,12 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser:
"--flwr-dir",
default=None,
help="""The path containing installed Flower Apps.
- By default, this value is equal to:
+ The default directory is:
- `$FLWR_HOME/` if `$FLWR_HOME` is defined
- `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined
- `$HOME/.flwr/` in all other cases
- """,
+ """,
)
parser.add_argument(
"--isolation",
diff --git a/src/py/flwr/common/config.py b/src/py/flwr/common/config.py
index 071d41a3ab5e..24ccada7509a 100644
--- a/src/py/flwr/common/config.py
+++ b/src/py/flwr/common/config.py
@@ -22,7 +22,12 @@
import tomli
from flwr.cli.config_utils import get_fab_config, validate_fields
-from flwr.common.constant import APP_DIR, FAB_CONFIG_FILE, FLWR_HOME
+from flwr.common.constant import (
+ APP_DIR,
+ FAB_CONFIG_FILE,
+ FAB_HASH_TRUNCATION,
+ FLWR_HOME,
+)
from flwr.common.typing import Run, UserConfig, UserConfigValue
@@ -39,7 +44,10 @@ def get_flwr_dir(provided_path: Optional[str] = None) -> Path:
def get_project_dir(
- fab_id: str, fab_version: str, flwr_dir: Optional[Union[str, Path]] = None
+ fab_id: str,
+ fab_version: str,
+ fab_hash: str,
+ flwr_dir: Optional[Union[str, Path]] = None,
) -> Path:
"""Return the project directory based on the given fab_id and fab_version."""
# Check the fab_id
@@ -50,7 +58,11 @@ def get_project_dir(
publisher, project_name = fab_id.split("/")
if flwr_dir is None:
flwr_dir = get_flwr_dir()
- return Path(flwr_dir) / APP_DIR / publisher / project_name / fab_version
+ return (
+ Path(flwr_dir)
+ / APP_DIR
+ / f"{publisher}.{project_name}.{fab_version}.{fab_hash[:FAB_HASH_TRUNCATION]}"
+ )
def get_project_config(project_dir: Union[str, Path]) -> dict[str, Any]:
@@ -127,7 +139,7 @@ def get_fused_config(run: Run, flwr_dir: Optional[Path]) -> UserConfig:
if not run.fab_id or not run.fab_version:
return {}
- project_dir = get_project_dir(run.fab_id, run.fab_version, flwr_dir)
+ project_dir = get_project_dir(run.fab_id, run.fab_version, run.fab_hash, flwr_dir)
# Return empty dict if project directory does not exist
if not project_dir.is_dir():
@@ -194,6 +206,7 @@ def parse_config_args(
# Regular expression to capture key-value pairs with possible quoted values
pattern = re.compile(r"(\S+?)=(\'[^\']*\'|\"[^\"]*\"|\S+)")
+ flat_overrides = {}
for config_line in config:
if config_line:
# .toml files aren't allowed alongside other configs
@@ -205,8 +218,9 @@ def parse_config_args(
matches = pattern.findall(config_line)
toml_str = "\n".join(f"{k} = {v}" for k, v in matches)
overrides.update(tomli.loads(toml_str))
+ flat_overrides = flatten_dict(overrides)
- return overrides
+ return flat_overrides
def get_metadata_from_config(config: dict[str, Any]) -> tuple[str, str]:
diff --git a/src/py/flwr/common/config_test.py b/src/py/flwr/common/config_test.py
index 34bc691cc957..b2edd319e382 100644
--- a/src/py/flwr/common/config_test.py
+++ b/src/py/flwr/common/config_test.py
@@ -65,13 +65,22 @@ def test_get_flwr_dir_with_xdg_data_home() -> None:
def test_get_project_dir_invalid_fab_id() -> None:
"""Test get_project_dir with an invalid fab_id."""
with pytest.raises(ValueError):
- get_project_dir("invalid_fab_id", "1.0.0")
+ get_project_dir(
+ "invalid_fab_id",
+ "1.0.0",
+ "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd",
+ )
def test_get_project_dir_valid() -> None:
"""Test get_project_dir with an valid fab_id and version."""
- app_path = get_project_dir("app_name/user", "1.0.0", flwr_dir=".")
- assert app_path == Path("apps") / "app_name" / "user" / "1.0.0"
+ app_path = get_project_dir(
+ "app_name/user",
+ "1.0.0",
+ "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd",
+ flwr_dir=".",
+ )
+ assert app_path == Path("apps") / "app_name.user.1.0.0.03840e93"
def test_get_project_config_file_not_found() -> None:
diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py
index ffd58478aa48..e99e0edaacd4 100644
--- a/src/py/flwr/common/constant.py
+++ b/src/py/flwr/common/constant.py
@@ -63,7 +63,10 @@
# Constants for FAB
APP_DIR = "apps"
+FAB_ALLOWED_EXTENSIONS = {".py", ".toml", ".md"}
FAB_CONFIG_FILE = "pyproject.toml"
+FAB_DATE = (2024, 10, 1, 0, 0, 0)
+FAB_HASH_TRUNCATION = 8
FLWR_HOME = "FLWR_HOME"
# Constants entries in Node config for Simulation
@@ -78,6 +81,9 @@
GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY = "grpc-message-module"
GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY = "grpc-message-qualname"
+# Message TTL
+MESSAGE_TTL_TOLERANCE = 1e-1
+
class MessageType:
"""Message type."""
diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py
index 303780fc0b5d..3a058abac9c6 100644
--- a/src/py/flwr/common/logger.py
+++ b/src/py/flwr/common/logger.py
@@ -111,7 +111,7 @@ def update_console_handler(
class CustomHTTPHandler(HTTPHandler):
"""Custom HTTPHandler which overrides the mapLogRecords method."""
- # pylint: disable=too-many-arguments,bad-option-value,R1725
+ # pylint: disable=too-many-arguments,bad-option-value,R1725,R0917
def __init__(
self,
identifier: str,
diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py
index 4138fc95a591..3bb07ff3961a 100644
--- a/src/py/flwr/common/message.py
+++ b/src/py/flwr/common/message.py
@@ -17,9 +17,11 @@
from __future__ import annotations
import time
-import warnings
+from logging import WARNING
from typing import Optional, cast
+from .constant import MESSAGE_TTL_TOLERANCE
+from .logger import log
from .record import RecordSet
DEFAULT_TTL = 3600
@@ -50,7 +52,7 @@ class Metadata: # pylint: disable=too-many-instance-attributes
the receiving end.
"""
- def __init__( # pylint: disable=too-many-arguments
+ def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments
self,
run_id: int,
message_id: str,
@@ -288,14 +290,12 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message:
follows the equation:
ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)
+
+ Returns
+ -------
+ message : Message
+ A Message containing only the relevant error and metadata.
"""
- if ttl:
- warnings.warn(
- "A custom TTL was set, but note that the SuperLink does not enforce "
- "the TTL yet. The SuperLink will start enforcing the TTL in a future "
- "version of Flower.",
- stacklevel=2,
- )
# If no TTL passed, use default for message creation (will update after
# message creation)
ttl_ = DEFAULT_TTL if ttl is None else ttl
@@ -309,6 +309,8 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message:
)
message.metadata.ttl = ttl
+ self._limit_task_res_ttl(message)
+
return message
def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message:
@@ -334,13 +336,6 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message:
Message
A new `Message` instance representing the reply.
"""
- if ttl:
- warnings.warn(
- "A custom TTL was set, but note that the SuperLink does not enforce "
- "the TTL yet. The SuperLink will start enforcing the TTL in a future "
- "version of Flower.",
- stacklevel=2,
- )
# If no TTL passed, use default for message creation (will update after
# message creation)
ttl_ = DEFAULT_TTL if ttl is None else ttl
@@ -357,6 +352,8 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message:
)
message.metadata.ttl = ttl
+ self._limit_task_res_ttl(message)
+
return message
def __repr__(self) -> str:
@@ -370,6 +367,31 @@ def __repr__(self) -> str:
)
return f"{self.__class__.__qualname__}({view})"
+ def _limit_task_res_ttl(self, message: Message) -> None:
+ """Limit the TaskRes TTL to not exceed the expiration time of the TaskIns it
+ replies to.
+
+ Parameters
+ ----------
+ message : Message
+ The message to which the TaskRes is replying.
+ """
+ # Calculate the maximum allowed TTL
+ max_allowed_ttl = (
+ self.metadata.created_at + self.metadata.ttl - message.metadata.created_at
+ )
+
+ if message.metadata.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE:
+ log(
+ WARNING,
+ "The reply TTL of %.2f seconds exceeded the "
+ "allowed maximum of %.2f seconds. "
+ "The TTL has been updated to the allowed maximum.",
+ message.metadata.ttl,
+ max_allowed_ttl,
+ )
+ message.metadata.ttl = max_allowed_ttl
+
def _create_reply_metadata(msg: Message, ttl: float) -> Metadata:
"""Construct metadata for a reply message."""
diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py
index 57c57eb41bd9..d418f9fa8036 100644
--- a/src/py/flwr/common/message_test.py
+++ b/src/py/flwr/common/message_test.py
@@ -23,6 +23,7 @@
# pylint: enable=E0611
from . import RecordSet
+from .constant import MESSAGE_TTL_TOLERANCE
from .message import Error, Message, Metadata
from .serde_test import RecordMaker
@@ -202,3 +203,35 @@ def test_repr(cls: type, kwargs: dict[str, Any]) -> None:
# Assert
assert str(actual) == str(expected)
+
+
+@pytest.mark.parametrize(
+ "message_creation_fn,initial_ttl,reply_ttl,expected_reply_ttl",
+ [
+ # Case where the reply_ttl is larger than the allowed TTL
+ (create_message_with_content, 20, 30, 20),
+ (create_message_with_error, 20, 30, 20),
+ # Case where the reply_ttl is within the allowed range
+ (create_message_with_content, 20, 10, 10),
+ (create_message_with_error, 20, 10, 10),
+ ],
+)
+def test_reply_ttl_limitation(
+ message_creation_fn: Callable[[float], Message],
+ initial_ttl: float,
+ reply_ttl: float,
+ expected_reply_ttl: float,
+) -> None:
+ """Test that the reply TTL does not exceed the allowed TTL."""
+ message = message_creation_fn(initial_ttl)
+
+ if message.has_error():
+ dummy_error = Error(code=0, reason="test error")
+ reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl)
+ else:
+ reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl)
+
+ assert reply_message.metadata.ttl - expected_reply_ttl <= MESSAGE_TTL_TOLERANCE, (
+ f"Expected TTL to be <= {expected_reply_ttl}, "
+ f"but got {reply_message.metadata.ttl}"
+ )
diff --git a/src/py/flwr/common/record/configsrecord.py b/src/py/flwr/common/record/configsrecord.py
index f570e000cc9b..e83bca816fc6 100644
--- a/src/py/flwr/common/record/configsrecord.py
+++ b/src/py/flwr/common/record/configsrecord.py
@@ -128,6 +128,7 @@ def count_bytes(self) -> int:
def get_var_bytes(value: ConfigsScalar) -> int:
"""Return Bytes of value passed."""
+ var_bytes = 0
if isinstance(value, bool):
var_bytes = 1
elif isinstance(value, (int, float)):
@@ -136,6 +137,11 @@ def get_var_bytes(value: ConfigsScalar) -> int:
)
if isinstance(value, (str, bytes)):
var_bytes = len(value)
+ if var_bytes == 0:
+ raise ValueError(
+ "Config values must be either `bool`, `int`, `float`, "
+ "`str`, or `bytes`"
+ )
return var_bytes
num_bytes = 0
diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py
index 35024fcd67d1..4641b8f29c96 100644
--- a/src/py/flwr/common/recordset_compat.py
+++ b/src/py/flwr/common/recordset_compat.py
@@ -59,6 +59,11 @@ def parametersrecord_to_parameters(
keep_input : bool
A boolean indicating whether entries in the record should be deleted from the
input dictionary immediately after adding them to the record.
+
+ Returns
+ -------
+ parameters : Parameters
+ The parameters in the legacy format Parameters.
"""
parameters = Parameters(tensors=[], tensor_type="")
@@ -94,6 +99,11 @@ def parameters_to_parametersrecord(
A boolean indicating whether parameters should be deleted from the input
Parameters object (i.e. a list of serialized NumPy arrays) immediately after
adding them to the record.
+
+ Returns
+ -------
+ ParametersRecord
+ The ParametersRecord containing the provided parameters.
"""
tensor_type = parameters.tensor_type
diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py
index 303d5596f237..9785b0fbd9b4 100644
--- a/src/py/flwr/common/retry_invoker.py
+++ b/src/py/flwr/common/retry_invoker.py
@@ -38,6 +38,11 @@ def exponential(
Factor by which the delay is multiplied after each retry.
max_delay: Optional[float] (default: None)
The maximum delay duration between two consecutive retries.
+
+ Returns
+ -------
+ Generator[float, None, None]
+ A generator for the delay between 2 retries.
"""
delay = base_delay if max_delay is None else min(base_delay, max_delay)
while True:
@@ -56,6 +61,11 @@ def constant(
----------
interval: Union[float, Iterable[float]] (default: 1)
A constant value to yield or an iterable of such values.
+
+ Returns
+ -------
+ Generator[float, None, None]
+ A generator for the delay between 2 retries.
"""
if not isinstance(interval, Iterable):
interval = itertools.repeat(interval)
@@ -73,6 +83,11 @@ def full_jitter(max_value: float) -> float:
----------
max_value : float
The upper limit for the randomized value.
+
+ Returns
+ -------
+ float
+ A random float that is less than max_value.
"""
return random.uniform(0, max_value)
diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py
index d156edaa3c99..58918dbb79ab 100644
--- a/src/py/flwr/server/app.py
+++ b/src/py/flwr/server/app.py
@@ -199,12 +199,12 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
def run_superlink() -> None:
"""Run Flower SuperLink (Driver API and Fleet API)."""
+ args = _parse_args_run_superlink().parse_args()
+
log(INFO, "Starting Flower SuperLink")
event(EventType.RUN_SUPERLINK_ENTER)
- args = _parse_args_run_superlink().parse_args()
-
# Parse IP address
driver_address, _, _ = _format_address(args.driver_api_address)
@@ -542,6 +542,7 @@ def _run_fleet_api_grpc_adapter(
# pylint: disable=import-outside-toplevel,too-many-arguments
+# pylint: disable=too-many-positional-arguments
def _run_fleet_api_rest(
host: str,
port: int,
diff --git a/src/py/flwr/server/client_manager.py b/src/py/flwr/server/client_manager.py
index 175bd4a786ea..9949e29f8f7d 100644
--- a/src/py/flwr/server/client_manager.py
+++ b/src/py/flwr/server/client_manager.py
@@ -47,6 +47,7 @@ def register(self, client: ClientProxy) -> bool:
Parameters
----------
client : flwr.server.client_proxy.ClientProxy
+ The ClientProxy of the Client to register.
Returns
-------
@@ -64,6 +65,7 @@ def unregister(self, client: ClientProxy) -> None:
Parameters
----------
client : flwr.server.client_proxy.ClientProxy
+ The ClientProxy of the Client to unregister.
"""
@abstractmethod
diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py
index 7190786784ec..c5a3f561d474 100644
--- a/src/py/flwr/server/compat/driver_client_proxy.py
+++ b/src/py/flwr/server/compat/driver_client_proxy.py
@@ -15,7 +15,6 @@
"""Flower ClientProxy implementation for Driver API."""
-import time
from typing import Optional
from flwr import common
@@ -25,8 +24,6 @@
from ..driver.driver import Driver
-SLEEP_TIME = 1
-
class DriverClientProxy(ClientProxy):
"""Flower client proxy which delegates work using the Driver API."""
@@ -122,29 +119,18 @@ def _send_receive_recordset(
ttl=timeout,
)
- # Push message
- message_ids = list(self.driver.push_messages(messages=[message]))
- if len(message_ids) != 1:
- raise ValueError("Unexpected number of message_ids")
-
- message_id = message_ids[0]
- if message_id == "":
- raise ValueError(f"Failed to send message to node {self.node_id}")
-
- if timeout:
- start_time = time.time()
-
- while True:
- messages = list(self.driver.pull_messages(message_ids))
- if len(messages) == 1:
- msg: Message = messages[0]
- if msg.has_error():
- raise ValueError(
- f"Message contains an Error (reason: {msg.error.reason}). "
- "It originated during client-side execution of a message."
- )
- return msg.content
-
- if timeout is not None and time.time() > start_time + timeout:
- raise RuntimeError("Timeout reached")
- time.sleep(SLEEP_TIME)
+ # Send message and wait for reply
+ messages = list(self.driver.send_and_receive(messages=[message]))
+
+ # A single reply is expected
+ if len(messages) != 1:
+ raise ValueError(f"Expected one Message but got: {len(messages)}")
+
+ # Only messages without errors can be handled beyond these point
+ msg: Message = messages[0]
+ if msg.has_error():
+ raise ValueError(
+ f"Message contains an Error (reason: {msg.error.reason}). "
+ "It originated during client-side execution of a message."
+ )
+ return msg.content
diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py
index a5b454c79f90..5bad0b56c4c6 100644
--- a/src/py/flwr/server/compat/driver_client_proxy_test.py
+++ b/src/py/flwr/server/compat/driver_client_proxy_test.py
@@ -52,8 +52,6 @@
RUN_ID = 61016
NODE_ID = 1
-INSTRUCTION_MESSAGE_ID = "mock instruction message id"
-REPLY_MESSAGE_ID = "mock reply message id"
class DriverClientProxyTestCase(unittest.TestCase):
@@ -77,7 +75,7 @@ def test_get_properties(self) -> None:
"""Test positive case."""
# Prepare
res = GetPropertiesRes(status=CLIENT_STATUS, properties=CLIENT_PROPERTIES)
- self.driver.push_messages.side_effect = self._get_push_messages(res)
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res)
request_properties: Config = {"tensor_type": "str"}
ins = GetPropertiesIns(config=request_properties)
@@ -95,7 +93,7 @@ def test_get_parameters(self) -> None:
status=CLIENT_STATUS,
parameters=MESSAGE_PARAMETERS,
)
- self.driver.push_messages.side_effect = self._get_push_messages(res)
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res)
ins = GetParametersIns(config={})
# Execute
@@ -114,7 +112,7 @@ def test_fit(self) -> None:
num_examples=10,
metrics={},
)
- self.driver.push_messages.side_effect = self._get_push_messages(res)
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res)
parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))])
ins = FitIns(parameters, {})
@@ -134,7 +132,7 @@ def test_evaluate(self) -> None:
num_examples=0,
metrics={},
)
- self.driver.push_messages.side_effect = self._get_push_messages(res)
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res)
parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np")
ins = EvaluateIns(parameters, {})
@@ -148,7 +146,7 @@ def test_evaluate(self) -> None:
def test_get_properties_and_fail(self) -> None:
"""Test negative case."""
# Prepare
- self.driver.push_messages.side_effect = self._get_push_messages(
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(
None, error_reply=True
)
request_properties: Config = {"tensor_type": "str"}
@@ -163,7 +161,7 @@ def test_get_properties_and_fail(self) -> None:
def test_get_parameters_and_fail(self) -> None:
"""Test negative case."""
# Prepare
- self.driver.push_messages.side_effect = self._get_push_messages(
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(
None, error_reply=True
)
ins = GetParametersIns(config={})
@@ -177,7 +175,7 @@ def test_get_parameters_and_fail(self) -> None:
def test_fit_and_fail(self) -> None:
"""Test negative case."""
# Prepare
- self.driver.push_messages.side_effect = self._get_push_messages(
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(
None, error_reply=True
)
parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))])
@@ -190,7 +188,7 @@ def test_fit_and_fail(self) -> None:
def test_evaluate_and_fail(self) -> None:
"""Test negative case."""
# Prepare
- self.driver.push_messages.side_effect = self._get_push_messages(
+ self.driver.send_and_receive.side_effect = self._exec_send_and_receive(
None, error_reply=True
)
parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np")
@@ -202,7 +200,7 @@ def test_evaluate_and_fail(self) -> None:
)
self._common_assertions(ins)
- def _create_message_dummy( # pylint: disable=R0913
+ def _create_message_dummy( # pylint: disable=R0913,too-many-positional-arguments
self,
content: RecordSet,
message_type: str,
@@ -229,19 +227,19 @@ def _create_message_dummy( # pylint: disable=R0913
self.created_msg = Message(metadata=metadata, content=content)
return self.created_msg
- def _get_push_messages(
+ def _exec_send_and_receive(
self,
res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes, None],
error_reply: bool = False,
- ) -> Callable[[Iterable[Message]], Iterable[str]]:
- """Get the push_messages function that sets the return value of pull_messages
- when called."""
+ ) -> Callable[[Iterable[Message]], Iterable[Message]]:
+ """Get the generate_replies function that sets the return value of driver's
+ send_and_receive when called."""
- def push_messages(messages: Iterable[Message]) -> Iterable[str]:
+ def generate_replies(messages: Iterable[Message]) -> Iterable[Message]:
msg = list(messages)[0]
+ recordset = None
if error_reply:
- recordset = None
- ret = msg.create_error_reply(ERROR_REPLY)
+ pass
elif isinstance(res, GetParametersRes):
recordset = compat.getparametersres_to_recordset(res, True)
elif isinstance(res, GetPropertiesRes):
@@ -250,17 +248,16 @@ def push_messages(messages: Iterable[Message]) -> Iterable[str]:
recordset = compat.fitres_to_recordset(res, True)
elif isinstance(res, EvaluateRes):
recordset = compat.evaluateres_to_recordset(res)
- else:
- raise ValueError(f"Unsupported type: {type(res)}")
+
if recordset is not None:
ret = msg.create_reply(recordset)
- ret.metadata.__dict__["_message_id"] = REPLY_MESSAGE_ID
+ else:
+ ret = msg.create_error_reply(ERROR_REPLY)
- # Set the return value of `pull_messages`
- self.driver.pull_messages.return_value = [ret]
- return [INSTRUCTION_MESSAGE_ID]
+ # Reply messages given the push message
+ return [ret]
- return push_messages
+ return generate_replies
def _common_assertions(self, original_ins: Any) -> None:
"""Check common assertions."""
@@ -275,18 +272,9 @@ def _common_assertions(self, original_ins: Any) -> None:
self.assertEqual(self.called_times, 1)
self.assertEqual(actual_ins, original_ins)
- # Check if push_messages is called once with expected args/kwargs.
- self.driver.push_messages.assert_called_once()
- try:
- self.driver.push_messages.assert_any_call([self.created_msg])
- except AssertionError:
- self.driver.push_messages.assert_any_call(messages=[self.created_msg])
-
- # Check if pull_messages is called once with expected args/kwargs.
- self.driver.pull_messages.assert_called_once()
+ # Check if send_and_receive is called once with expected args/kwargs.
+ self.driver.send_and_receive.assert_called_once()
try:
- self.driver.pull_messages.assert_called_with([INSTRUCTION_MESSAGE_ID])
+ self.driver.send_and_receive.assert_any_call([self.created_msg])
except AssertionError:
- self.driver.pull_messages.assert_called_with(
- message_ids=[INSTRUCTION_MESSAGE_ID]
- )
+ self.driver.send_and_receive.assert_any_call(messages=[self.created_msg])
diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py
index e8429e865db6..5a6ee691f3a9 100644
--- a/src/py/flwr/server/driver/driver.py
+++ b/src/py/flwr/server/driver/driver.py
@@ -32,7 +32,7 @@ def run(self) -> Run:
"""Run information."""
@abstractmethod
- def create_message( # pylint: disable=too-many-arguments
+ def create_message( # pylint: disable=too-many-arguments,R0917
self,
content: RecordSet,
message_type: str,
diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py
index 421dfd30ecb2..13c1c4152dad 100644
--- a/src/py/flwr/server/driver/grpc_driver.py
+++ b/src/py/flwr/server/driver/grpc_driver.py
@@ -158,7 +158,7 @@ def _check_message(self, message: Message) -> None:
):
raise ValueError(f"Invalid message: {message}")
- def create_message( # pylint: disable=too-many-arguments
+ def create_message( # pylint: disable=too-many-arguments,R0917
self,
content: RecordSet,
message_type: str,
diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py
index 3a8a4b1bc73d..130562c6defa 100644
--- a/src/py/flwr/server/driver/inmemory_driver.py
+++ b/src/py/flwr/server/driver/inmemory_driver.py
@@ -39,16 +39,20 @@ class InMemoryDriver(Driver):
The identifier of the run.
state_factory : StateFactory
A StateFactory embedding a state that this driver can interface with.
+ pull_interval : float (default=0.1)
+ Sleep duration between calls to `pull_messages`.
"""
def __init__(
self,
run_id: int,
state_factory: StateFactory,
+ pull_interval: float = 0.1,
) -> None:
self._run_id = run_id
self._run: Optional[Run] = None
self.state = state_factory.state()
+ self.pull_interval = pull_interval
self.node = Node(node_id=0, anonymous=True)
def _check_message(self, message: Message) -> None:
@@ -78,7 +82,7 @@ def run(self) -> Run:
self._init_run()
return Run(**vars(cast(Run, self._run)))
- def create_message( # pylint: disable=too-many-arguments
+ def create_message( # pylint: disable=too-many-arguments,R0917
self,
content: RecordSet,
message_type: str,
@@ -146,7 +150,7 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]:
"""
msg_ids = {UUID(msg_id) for msg_id in message_ids}
# Pull TaskRes
- task_res_list = self.state.get_task_res(task_ids=msg_ids, limit=len(msg_ids))
+ task_res_list = self.state.get_task_res(task_ids=msg_ids)
# Delete tasks in state
self.state.delete_tasks(msg_ids)
# Convert TaskRes to Message
@@ -180,5 +184,5 @@ def send_and_receive(
if len(msg_ids) == 0:
break
# Sleep
- time.sleep(3)
+ time.sleep(self.pull_interval)
return ret
diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py
index a9ec05fe90e0..28a66e136639 100644
--- a/src/py/flwr/server/run_serverapp.py
+++ b/src/py/flwr/server/run_serverapp.py
@@ -181,19 +181,17 @@ def run_server_app() -> None:
)
flwr_dir = get_flwr_dir(args.flwr_dir)
run_ = driver.run
- if run_.fab_hash:
- fab_req = GetFabRequest(hash_str=run_.fab_hash)
- # pylint: disable-next=W0212
- fab_res: GetFabResponse = driver._stub.GetFab(fab_req)
- if fab_res.fab.hash_str != run_.fab_hash:
- raise ValueError("FAB hashes don't match.")
-
- install_from_fab(fab_res.fab.content, flwr_dir, True)
- fab_id, fab_version = get_fab_metadata(fab_res.fab.content)
- else:
- fab_id, fab_version = run_.fab_id, run_.fab_version
-
- app_path = str(get_project_dir(fab_id, fab_version, flwr_dir))
+ if not run_.fab_hash:
+ raise ValueError("FAB hash not provided.")
+ fab_req = GetFabRequest(hash_str=run_.fab_hash)
+ # pylint: disable-next=W0212
+ fab_res: GetFabResponse = driver._stub.GetFab(fab_req)
+ if fab_res.fab.hash_str != run_.fab_hash:
+ raise ValueError("FAB hashes don't match.")
+ install_from_fab(fab_res.fab.content, flwr_dir, True)
+ fab_id, fab_version = get_fab_metadata(fab_res.fab.content)
+
+ app_path = str(get_project_dir(fab_id, fab_version, run_.fab_hash, flwr_dir))
config = get_project_config(app_path)
else:
# User provided `app_dir`, but not `--run-id`
diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py
index e9cb4ddcaf0d..9d91be88e94e 100644
--- a/src/py/flwr/server/server_app.py
+++ b/src/py/flwr/server/server_app.py
@@ -71,7 +71,7 @@ class ServerApp:
>>> print("ServerApp running")
"""
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
def __init__(
self,
server: Optional[Server] = None,
diff --git a/src/py/flwr/server/strategy/bulyan_test.py b/src/py/flwr/server/strategy/bulyan_test.py
index c0b87c82a036..f5b7282fed2c 100644
--- a/src/py/flwr/server/strategy/bulyan_test.py
+++ b/src/py/flwr/server/strategy/bulyan_test.py
@@ -124,7 +124,7 @@ def test_aggregate_fit() -> None:
actual_aggregated, _ = strategy.aggregate_fit(
server_round=1, results=results, failures=[]
)
- if actual_aggregated:
- actual_list = parameters_to_ndarrays(actual_aggregated)
- actual = actual_list[0]
+ assert actual_aggregated
+ actual_list = parameters_to_ndarrays(actual_aggregated)
+ actual = actual_list[0]
assert (actual == expected[0]).all()
diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py
index 77e70bb9af04..c64091091c51 100644
--- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py
+++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py
@@ -88,7 +88,7 @@ class DifferentialPrivacyServerSideAdaptiveClipping(Strategy):
>>> )
"""
- # pylint: disable=too-many-arguments,too-many-instance-attributes
+ # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments
def __init__(
self,
strategy: Strategy,
@@ -307,7 +307,7 @@ class DifferentialPrivacyClientSideAdaptiveClipping(Strategy):
>>> )
"""
- # pylint: disable=too-many-arguments,too-many-instance-attributes
+ # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments
def __init__(
self,
strategy: Strategy,
diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py
index ab513aba2269..170c9d619a7d 100644
--- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py
+++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py
@@ -39,7 +39,7 @@ class DPFedAvgAdaptive(DPFedAvgFixed):
This class is deprecated and will be removed in a future release.
"""
- # pylint: disable=too-many-arguments,too-many-instance-attributes
+ # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments
def __init__(
self,
strategy: Strategy,
diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py
index 4ea84db30cd4..60f8c16f8e6d 100644
--- a/src/py/flwr/server/strategy/dpfedavg_fixed.py
+++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py
@@ -36,7 +36,7 @@ class DPFedAvgFixed(Strategy):
This class is deprecated and will be removed in a future release.
"""
- # pylint: disable=too-many-arguments,too-many-instance-attributes
+ # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments
def __init__(
self,
strategy: Strategy,
diff --git a/src/py/flwr/server/strategy/fedadagrad_test.py b/src/py/flwr/server/strategy/fedadagrad_test.py
index 96d98fe750f3..6ac217b021b4 100644
--- a/src/py/flwr/server/strategy/fedadagrad_test.py
+++ b/src/py/flwr/server/strategy/fedadagrad_test.py
@@ -79,7 +79,7 @@ def test_aggregate_fit() -> None:
actual_aggregated, _ = strategy.aggregate_fit(
server_round=1, results=results, failures=[]
)
- if actual_aggregated:
- actual_list = parameters_to_ndarrays(actual_aggregated)
- actual = actual_list[0]
+ assert actual_aggregated
+ actual_list = parameters_to_ndarrays(actual_aggregated)
+ actual = actual_list[0]
assert (actual == expected[0]).all()
diff --git a/src/py/flwr/server/strategy/fedmedian_test.py b/src/py/flwr/server/strategy/fedmedian_test.py
index 2c9881635319..bbce69c19ac5 100644
--- a/src/py/flwr/server/strategy/fedmedian_test.py
+++ b/src/py/flwr/server/strategy/fedmedian_test.py
@@ -193,7 +193,7 @@ def test_aggregate_fit() -> None:
actual_aggregated, _ = strategy.aggregate_fit(
server_round=1, results=results, failures=[]
)
- if actual_aggregated:
- actual_list = parameters_to_ndarrays(actual_aggregated)
- actual = actual_list[0]
+ assert actual_aggregated
+ actual_list = parameters_to_ndarrays(actual_aggregated)
+ actual = actual_list[0]
assert (actual == expected[0]).all()
diff --git a/src/py/flwr/server/strategy/krum_test.py b/src/py/flwr/server/strategy/krum_test.py
index dc996b480630..ac068a8e6ba6 100644
--- a/src/py/flwr/server/strategy/krum_test.py
+++ b/src/py/flwr/server/strategy/krum_test.py
@@ -194,7 +194,7 @@ def test_aggregate_fit() -> None:
actual_aggregated, _ = strategy.aggregate_fit(
server_round=1, results=results, failures=[]
)
- if actual_aggregated:
- actual_list = parameters_to_ndarrays(actual_aggregated)
- actual = actual_list[0]
+ assert actual_aggregated
+ actual_list = parameters_to_ndarrays(actual_aggregated)
+ actual = actual_list[0]
assert (actual == expected[0]).all()
diff --git a/src/py/flwr/server/strategy/multikrum_test.py b/src/py/flwr/server/strategy/multikrum_test.py
index 90607e2c0edc..d9c73fb4eb8f 100644
--- a/src/py/flwr/server/strategy/multikrum_test.py
+++ b/src/py/flwr/server/strategy/multikrum_test.py
@@ -93,7 +93,7 @@ def test_aggregate_fit() -> None:
actual_aggregated, _ = strategy.aggregate_fit(
server_round=1, results=results, failures=[]
)
- if actual_aggregated:
- actual_list = parameters_to_ndarrays(actual_aggregated)
- actual = actual_list[0]
+ assert actual_aggregated
+ actual_list = parameters_to_ndarrays(actual_aggregated)
+ actual = actual_list[0]
assert (actual == expected[0]).all()
diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py
index 3cafb3e71f7c..72c0d110ac14 100644
--- a/src/py/flwr/server/superlink/driver/driver_servicer.py
+++ b/src/py/flwr/server/superlink/driver/driver_servicer.py
@@ -155,7 +155,7 @@ def on_rpc_done() -> None:
context.add_callback(on_rpc_done)
# Read from state
- task_res_list: list[TaskRes] = state.get_task_res(task_ids=task_ids, limit=None)
+ task_res_list: list[TaskRes] = state.get_task_res(task_ids=task_ids)
context.set_code(grpc.StatusCode.OK)
return PullTaskResResponse(task_res_list=task_res_list)
diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py
index b161492000f2..9d2e13d5b107 100644
--- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py
+++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py
@@ -60,7 +60,7 @@ def valid_certificates(certificates: tuple[bytes, bytes, bytes]) -> bool:
return is_valid
-def start_grpc_server( # pylint: disable=too-many-arguments
+def start_grpc_server( # pylint: disable=too-many-arguments,R0917
client_manager: ClientManager,
server_address: str,
max_concurrent_workers: int = 1000,
@@ -156,7 +156,7 @@ def start_grpc_server( # pylint: disable=too-many-arguments
return server
-def generic_create_grpc_server( # pylint: disable=too-many-arguments
+def generic_create_grpc_server( # pylint: disable=too-many-arguments,R0917
servicer_and_add_fn: Union[
tuple[FleetServicer, AddServicerToServerFn],
tuple[GrpcAdapterServicer, AddServicerToServerFn],
@@ -174,7 +174,7 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments
Parameters
----------
- servicer_and_add_fn : Tuple
+ servicer_and_add_fn : tuple
A tuple holding a servicer implementation and a matching
add_Servicer_to_server function.
server_address : str
@@ -214,6 +214,8 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments
* CA certificate.
* server certificate.
* server private key.
+ interceptors : Optional[Sequence[grpc.ServerInterceptor]] (default: None)
+ A list of gRPC interceptors.
Returns
-------
diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py
index 8f4e18e14e28..785390534001 100644
--- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py
+++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py
@@ -172,6 +172,7 @@ def put_taskres_into_state(
pass
+# pylint: disable=too-many-positional-arguments
def run_api(
app_fn: Callable[[], ClientApp],
backend_fn: Callable[[], Backend],
@@ -251,7 +252,7 @@ def run_api(
# pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches
-# pylint: disable=too-many-statements
+# pylint: disable=too-many-statements,too-many-positional-arguments
def start_vce(
backend_name: str,
backend_config_json_stream: str,
@@ -267,6 +268,8 @@ def start_vce(
existing_nodes_mapping: Optional[NodeToPartitionMapping] = None,
) -> None:
"""Start Fleet API with the Simulation Engine."""
+ nodes_mapping = {}
+
if client_app_attr is not None and client_app is not None:
raise ValueError(
"Both `client_app_attr` and `client_app` are provided, "
@@ -340,17 +343,17 @@ def backend_fn() -> Backend:
# Load ClientApp if needed
def _load() -> ClientApp:
+ if client_app:
+ return client_app
if client_app_attr:
- app = get_load_client_app_fn(
+ return get_load_client_app_fn(
default_app_ref=client_app_attr,
app_path=app_dir,
flwr_dir=flwr_dir,
multi_app=False,
- )(run.fab_id, run.fab_version)
+ )(run.fab_id, run.fab_version, run.fab_hash)
- if client_app:
- app = client_app
- return app
+ raise ValueError("Either `client_app_attr` or `client_app` must be provided")
app_fn = _load
diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py
index 1cc3a8f128b6..bc34b825c333 100644
--- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py
+++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py
@@ -170,7 +170,7 @@ def _autoresolve_app_dir(rel_client_app_dir: str = "backend") -> str:
return str(rel_app_dir.parent / rel_client_app_dir)
-# pylint: disable=too-many-arguments
+# pylint: disable=too-many-arguments,too-many-positional-arguments
def start_and_shutdown(
backend: str = "ray",
client_app_attr: Optional[str] = None,
@@ -304,7 +304,7 @@ def test_start_and_shutdown_with_tasks_in_state(self) -> None:
# Get all TaskRes
state = state_factory.state()
task_ids = set(expected_results.keys())
- task_res_list = state.get_task_res(task_ids=task_ids, limit=len(task_ids))
+ task_res_list = state.get_task_res(task_ids=task_ids)
# Check results by first converting to Message
for task_res in task_res_list:
diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py
index e09df8dc76f6..a9c4176ee5f2 100644
--- a/src/py/flwr/server/superlink/state/in_memory_state.py
+++ b/src/py/flwr/server/superlink/state/in_memory_state.py
@@ -17,12 +17,16 @@
import threading
import time
-from logging import ERROR
+from logging import ERROR, WARNING
from typing import Optional
from uuid import UUID, uuid4
from flwr.common import log, now
-from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES
+from flwr.common.constant import (
+ MESSAGE_TTL_TOLERANCE,
+ NODE_ID_NUM_BYTES,
+ RUN_ID_NUM_BYTES,
+)
from flwr.common.typing import Run, UserConfig
from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611
from flwr.server.superlink.state.state import State
@@ -83,6 +87,7 @@ def get_task_ins(
# Find TaskIns for node_id that were not delivered yet
task_ins_list: list[TaskIns] = []
+ current_time = time.time()
with self.lock:
for _, task_ins in self.task_ins_store.items():
# pylint: disable=too-many-boolean-expressions
@@ -91,11 +96,13 @@ def get_task_ins(
and task_ins.task.consumer.anonymous is False
and task_ins.task.consumer.node_id == node_id
and task_ins.task.delivered_at == ""
+ and task_ins.task.created_at + task_ins.task.ttl > current_time
) or (
node_id is None # Anonymous
and task_ins.task.consumer.anonymous is True
and task_ins.task.consumer.node_id == 0
and task_ins.task.delivered_at == ""
+ and task_ins.task.created_at + task_ins.task.ttl > current_time
):
task_ins_list.append(task_ins)
if limit and len(task_ins_list) == limit:
@@ -109,6 +116,7 @@ def get_task_ins(
# Return TaskIns
return task_ins_list
+ # pylint: disable=R0911
def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
"""Store one TaskRes."""
# Validate task
@@ -122,6 +130,17 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
task_ins_id = task_res.task.ancestry[0]
task_ins = self.task_ins_store.get(UUID(task_ins_id))
+ # Ensure that the consumer_id of taskIns matches the producer_id of taskRes.
+ if (
+ task_ins
+ and task_res
+ and not (
+ task_ins.task.consumer.anonymous or task_res.task.producer.anonymous
+ )
+ and task_ins.task.consumer.node_id != task_res.task.producer.node_id
+ ):
+ return None
+
if task_ins is None:
log(ERROR, "TaskIns with task_id %s does not exist.", task_ins_id)
return None
@@ -134,6 +153,27 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
)
return None
+ # Fail if the TaskRes TTL exceeds the
+ # expiration time of the TaskIns it replies to.
+ # Condition: TaskIns.created_at + TaskIns.ttl ≥
+ # TaskRes.created_at + TaskRes.ttl
+ # A small tolerance is introduced to account
+ # for floating-point precision issues.
+ max_allowed_ttl = (
+ task_ins.task.created_at + task_ins.task.ttl - task_res.task.created_at
+ )
+ if task_res.task.ttl and (
+ task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE
+ ):
+ log(
+ WARNING,
+ "Received TaskRes with TTL %.2f "
+ "exceeding the allowed maximum TTL %.2f.",
+ task_res.task.ttl,
+ max_allowed_ttl,
+ )
+ return None
+
# Validate run_id
if task_res.run_id not in self.run_ids:
log(ERROR, "`run_id` is invalid")
@@ -150,27 +190,33 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
# Return the new task_id
return task_id
- def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]:
+ def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]:
"""Get all TaskRes that have not been delivered yet."""
- if limit is not None and limit < 1:
- raise AssertionError("`limit` must be >= 1")
-
with self.lock:
# Find TaskRes that were not delivered yet
task_res_list: list[TaskRes] = []
replied_task_ids: set[UUID] = set()
for _, task_res in self.task_res_store.items():
reply_to = UUID(task_res.task.ancestry[0])
+
+ # Check if corresponding TaskIns exists and is not expired
+ task_ins = self.task_ins_store.get(reply_to)
+ if task_ins is None:
+ log(WARNING, "TaskIns with task_id %s does not exist.", reply_to)
+ task_ids.remove(reply_to)
+ continue
+
+ if task_ins.task.created_at + task_ins.task.ttl <= time.time():
+ log(WARNING, "TaskIns with task_id %s is expired.", reply_to)
+ task_ids.remove(reply_to)
+ continue
+
if reply_to in task_ids and task_res.task.delivered_at == "":
task_res_list.append(task_res)
replied_task_ids.add(reply_to)
- if limit and len(task_res_list) == limit:
- break
# Check if the node is offline
for task_id in task_ids - replied_task_ids:
- if limit and len(task_res_list) == limit:
- break
task_ins = self.task_ins_store.get(task_id)
if task_ins is None:
continue
diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py
index d18683286196..6d644c3b2232 100644
--- a/src/py/flwr/server/superlink/state/sqlite_state.py
+++ b/src/py/flwr/server/superlink/state/sqlite_state.py
@@ -14,18 +14,23 @@
# ==============================================================================
"""SQLite based implemenation of server state."""
+# pylint: disable=too-many-lines
import json
import re
import sqlite3
import time
from collections.abc import Sequence
-from logging import DEBUG, ERROR
+from logging import DEBUG, ERROR, WARNING
from typing import Any, Optional, Union, cast
from uuid import UUID, uuid4
from flwr.common import log, now
-from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES
+from flwr.common.constant import (
+ MESSAGE_TTL_TOLERANCE,
+ NODE_ID_NUM_BYTES,
+ RUN_ID_NUM_BYTES,
+)
from flwr.common.typing import Run, UserConfig
from flwr.proto.node_pb2 import Node # pylint: disable=E0611
from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611
@@ -146,6 +151,11 @@ def initialize(self, log_queries: bool = False) -> list[tuple[str]]:
----------
log_queries : bool
Log each query which is executed.
+
+ Returns
+ -------
+ list[tuple[str]]
+ The list of all tables in the DB.
"""
self.conn = sqlite3.connect(self.database_path)
self.conn.execute("PRAGMA foreign_keys = ON;")
@@ -295,6 +305,7 @@ def get_task_ins(
WHERE consumer_anonymous == 1
AND consumer_node_id == 0
AND delivered_at = ""
+ AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL)
"""
else:
# Convert the uint64 value to sint64 for SQLite
@@ -307,6 +318,7 @@ def get_task_ins(
WHERE consumer_anonymous == 0
AND consumer_node_id == :node_id
AND delivered_at = ""
+ AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL)
"""
if limit is not None:
@@ -383,6 +395,37 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
)
return None
+ # Ensure that the consumer_id of taskIns matches the producer_id of taskRes.
+ if (
+ task_ins
+ and task_res
+ and not (task_ins["consumer_anonymous"] or task_res.task.producer.anonymous)
+ and convert_sint64_to_uint64(task_ins["consumer_node_id"])
+ != task_res.task.producer.node_id
+ ):
+ return None
+
+ # Fail if the TaskRes TTL exceeds the
+ # expiration time of the TaskIns it replies to.
+ # Condition: TaskIns.created_at + TaskIns.ttl ≥
+ # TaskRes.created_at + TaskRes.ttl
+ # A small tolerance is introduced to account
+ # for floating-point precision issues.
+ max_allowed_ttl = (
+ task_ins["created_at"] + task_ins["ttl"] - task_res.task.created_at
+ )
+ if task_res.task.ttl and (
+ task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE
+ ):
+ log(
+ WARNING,
+ "Received TaskRes with TTL %.2f "
+ "exceeding the allowed maximum TTL %.2f.",
+ task_res.task.ttl,
+ max_allowed_ttl,
+ )
+ return None
+
# Store TaskRes
task_res.task_id = str(task_id)
data = (task_res_to_dict(task_res),)
@@ -405,8 +448,8 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
return task_id
- # pylint: disable-next=R0914
- def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]:
+ # pylint: disable-next=R0912,R0915,R0914
+ def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]:
"""Get TaskRes for task_ids.
Usually, the Driver API calls this method to get results for instructions it has
@@ -421,8 +464,34 @@ def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRe
will only take effect if enough task_ids are in the set AND are currently
available. If `limit` is set, it has to be greater than zero.
"""
- if limit is not None and limit < 1:
- raise AssertionError("`limit` must be >= 1")
+ # Check if corresponding TaskIns exists and is not expired
+ task_ids_placeholders = ",".join([f":id_{i}" for i in range(len(task_ids))])
+ query = f"""
+ SELECT *
+ FROM task_ins
+ WHERE task_id IN ({task_ids_placeholders})
+ AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL)
+ """
+ query += ";"
+
+ task_ins_data = {}
+ for index, task_id in enumerate(task_ids):
+ task_ins_data[f"id_{index}"] = str(task_id)
+
+ task_ins_rows = self.query(query, task_ins_data)
+
+ if not task_ins_rows:
+ return []
+
+ for row in task_ins_rows:
+ # Convert values from sint64 to uint64
+ convert_sint64_values_in_dict_to_uint64(
+ row, ["run_id", "producer_node_id", "consumer_node_id"]
+ )
+ task_ins = dict_to_task_ins(row)
+ if task_ins.task.created_at + task_ins.task.ttl <= time.time():
+ log(WARNING, "TaskIns with task_id %s is expired.", task_ins.task_id)
+ task_ids.remove(UUID(task_ins.task_id))
# Retrieve all anonymous Tasks
if len(task_ids) == 0:
@@ -438,10 +507,6 @@ def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRe
data: dict[str, Union[str, float, int]] = {}
- if limit is not None:
- query += " LIMIT :limit"
- data["limit"] = limit
-
query += ";"
for index, task_id in enumerate(task_ids):
@@ -516,9 +581,6 @@ def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRe
# Make TaskRes containing node unavailabe error
for row in task_ins_rows:
- if limit and len(result) == limit:
- break
-
for row in rows:
# Convert values from sint64 to uint64
convert_sint64_values_in_dict_to_uint64(
diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py
index 39da052fb0aa..b220aad3ebcc 100644
--- a/src/py/flwr/server/superlink/state/state.py
+++ b/src/py/flwr/server/superlink/state/state.py
@@ -98,7 +98,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]:
"""
@abc.abstractmethod
- def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]:
+ def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]:
"""Get TaskRes for task_ids.
Usually, the Driver API calls this method to get results for instructions it has
@@ -106,12 +106,6 @@ def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRe
Retrieves all TaskRes for the given `task_ids` and returns and empty list of
none could be found.
-
- Constraints
- -----------
- If `limit` is not `None`, return, at most, `limit` number of TaskRes. The limit
- will only take effect if enough task_ids are in the set AND are currently
- available. If `limit` is set, it has to be greater zero.
"""
@abc.abstractmethod
diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py
index 85cda1a5af9c..a4663f80f630 100644
--- a/src/py/flwr/server/superlink/state/state_test.py
+++ b/src/py/flwr/server/superlink/state/state_test.py
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests all state implemenations have to conform to."""
-# pylint: disable=invalid-name, disable=R0904
+# pylint: disable=invalid-name, too-many-lines, R0904, R0913
import tempfile
import time
@@ -21,6 +21,7 @@
from abc import abstractmethod
from datetime import datetime, timezone
from unittest.mock import patch
+from uuid import UUID
from flwr.common import DEFAULT_TTL
from flwr.common.constant import ErrorCode
@@ -148,18 +149,18 @@ def test_store_and_delete_tasks(self) -> None:
# Insert one TaskRes and retrive it to mark it as delivered
task_res_0 = create_task_res(
- producer_node_id=100,
+ producer_node_id=consumer_node_id,
anonymous=False,
ancestry=[str(task_id_0)],
run_id=run_id,
)
_ = state.store_task_res(task_res=task_res_0)
- _ = state.get_task_res(task_ids={task_id_0}, limit=None)
+ _ = state.get_task_res(task_ids={task_id_0})
# Insert one TaskRes, but don't retrive it
task_res_1: TaskRes = create_task_res(
- producer_node_id=100,
+ producer_node_id=consumer_node_id,
anonymous=False,
ancestry=[str(task_id_1)],
run_id=run_id,
@@ -315,8 +316,8 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None:
# Execute
task_res_uuid = state.store_task_res(task_res)
- if task_ins_id is not None:
- task_res_list = state.get_task_res(task_ids={task_ins_id}, limit=None)
+ assert task_ins_id
+ task_res_list = state.get_task_res(task_ids={task_ins_id})
# Assert
retrieved_task_res = task_res_list[0]
@@ -661,7 +662,7 @@ def test_node_unavailable_error(self) -> None:
# Create and store TaskRes
task_res_0 = create_task_res(
- producer_node_id=100,
+ producer_node_id=node_id_0,
anonymous=False,
ancestry=[str(task_id_0)],
run_id=run_id,
@@ -672,7 +673,7 @@ def test_node_unavailable_error(self) -> None:
current_time = time.time()
task_res_list: list[TaskRes] = []
with patch("time.time", side_effect=lambda: current_time + 50):
- task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None)
+ task_res_list = state.get_task_res({task_id_0, task_id_1})
# Assert
assert len(task_res_list) == 2
@@ -707,6 +708,195 @@ def test_store_task_res_task_ins_expired(self) -> None:
# Assert
assert result is None
+ def test_store_task_res_limit_ttl(self) -> None:
+ """Test the behavior of store_task_res regarding the TTL limit of TaskRes."""
+ current_time = time.time()
+
+ test_cases = [
+ (
+ current_time - 5,
+ 10,
+ current_time - 2,
+ 6,
+ True,
+ ), # TaskRes within allowed TTL
+ (
+ current_time - 5,
+ 10,
+ current_time - 2,
+ 15,
+ False,
+ ), # TaskRes TTL exceeds max allowed TTL
+ ]
+
+ for (
+ task_ins_created_at,
+ task_ins_ttl,
+ task_res_created_at,
+ task_res_ttl,
+ expected_store_result,
+ ) in test_cases:
+
+ # Prepare
+ state: State = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+
+ task_ins = create_task_ins(
+ consumer_node_id=0, anonymous=True, run_id=run_id
+ )
+ task_ins.task.created_at = task_ins_created_at
+ task_ins.task.ttl = task_ins_ttl
+ task_ins_id = state.store_task_ins(task_ins)
+
+ task_res = create_task_res(
+ producer_node_id=0,
+ anonymous=True,
+ ancestry=[str(task_ins_id)],
+ run_id=run_id,
+ )
+ task_res.task.created_at = task_res_created_at
+ task_res.task.ttl = task_res_ttl
+
+ # Execute
+ res = state.store_task_res(task_res)
+
+ # Assert
+ if expected_store_result:
+ assert res is not None
+ else:
+ assert res is None
+
+ def test_get_task_ins_not_return_expired(self) -> None:
+ """Test get_task_ins not to return expired tasks."""
+ # Prepare
+ consumer_node_id = 1
+ state = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+ task_ins = create_task_ins(
+ consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id
+ )
+ task_ins.task.created_at = time.time() - 5
+ task_ins.task.ttl = 5.0
+
+ # Execute
+ state.store_task_ins(task_ins=task_ins)
+
+ # Assert
+ with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1):
+ task_ins_list = state.get_task_ins(node_id=1, limit=None)
+ assert len(task_ins_list) == 0
+
+ def test_get_task_res_not_return_expired(self) -> None:
+ """Test get_task_res not to return TaskRes if its TaskIns is expired."""
+ # Prepare
+ consumer_node_id = 1
+ state = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+ task_ins = create_task_ins(
+ consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id
+ )
+ task_ins.task.created_at = time.time() - 5
+ task_ins.task.ttl = 5.1
+
+ task_id = state.store_task_ins(task_ins=task_ins)
+
+ task_res = create_task_res(
+ producer_node_id=1,
+ anonymous=False,
+ ancestry=[str(task_id)],
+ run_id=run_id,
+ )
+ task_res.task.ttl = 0.1
+ _ = state.store_task_res(task_res=task_res)
+
+ with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1):
+ # Execute
+ assert task_id is not None
+ task_res_list = state.get_task_res(task_ids={task_id})
+
+ # Assert
+ assert len(task_res_list) == 0
+
+ def test_get_task_res_returns_empty_for_missing_taskins(self) -> None:
+ """Test that get_task_res returns an empty result when the corresponding TaskIns
+ does not exist."""
+ # Prepare
+ state = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+ task_ins_id = "5b0a3fc2-edba-4525-a89a-04b83420b7c8"
+
+ task_res = create_task_res(
+ producer_node_id=1,
+ anonymous=False,
+ ancestry=[str(task_ins_id)],
+ run_id=run_id,
+ )
+ _ = state.store_task_res(task_res=task_res)
+
+ # Execute
+ task_res_list = state.get_task_res(task_ids={UUID(task_ins_id)})
+
+ # Assert
+ assert len(task_res_list) == 0
+
+ def test_get_task_res_return_if_not_expired(self) -> None:
+ """Test get_task_res to return TaskRes if its TaskIns exists and is not
+ expired."""
+ # Prepare
+ consumer_node_id = 1
+ state = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+ task_ins = create_task_ins(
+ consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id
+ )
+ task_ins.task.created_at = time.time() - 5
+ task_ins.task.ttl = 7.1
+
+ task_id = state.store_task_ins(task_ins=task_ins)
+
+ task_res = create_task_res(
+ producer_node_id=1,
+ anonymous=False,
+ ancestry=[str(task_id)],
+ run_id=run_id,
+ )
+ task_res.task.ttl = 0.1
+ _ = state.store_task_res(task_res=task_res)
+
+ with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1):
+ # Execute
+ assert task_id is not None
+ task_res_list = state.get_task_res(task_ids={task_id})
+
+ # Assert
+ assert len(task_res_list) != 0
+
+ def test_store_task_res_fail_if_consumer_producer_id_mismatch(self) -> None:
+ """Test store_task_res to fail if there is a mismatch between the
+ consumer_node_id of taskIns and the producer_node_id of taskRes."""
+ # Prepare
+ consumer_node_id = 1
+ state = self.state_factory()
+ run_id = state.create_run(None, None, "9f86d08", {})
+ task_ins = create_task_ins(
+ consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id
+ )
+
+ task_id = state.store_task_ins(task_ins=task_ins)
+
+ task_res = create_task_res(
+ producer_node_id=100, # different than consumer_node_id
+ anonymous=False,
+ ancestry=[str(task_id)],
+ run_id=run_id,
+ )
+
+ # Execute
+ task_res_uuid = state.store_task_res(task_res=task_res)
+
+ # Assert
+ assert task_res_uuid is None
+
def create_task_ins(
consumer_node_id: int,
diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py
index 00ba02d2e43b..db44719c6a8a 100644
--- a/src/py/flwr/server/superlink/state/utils.py
+++ b/src/py/flwr/server/superlink/state/utils.py
@@ -100,11 +100,6 @@ def convert_uint64_values_in_dict_to_sint64(
A dictionary where the values are integers to be converted.
keys : list[str]
A list of keys in the dictionary whose values need to be converted.
-
- Returns
- -------
- None
- This function does not return a value. It modifies `data_dict` in place.
"""
for key in keys:
if key in data_dict:
@@ -122,11 +117,6 @@ def convert_sint64_values_in_dict_to_uint64(
A dictionary where the values are integers to be converted.
keys : list[str]
A list of keys in the dictionary whose values need to be converted.
-
- Returns
- -------
- None
- This function does not return a value. It modifies `data_dict` in place.
"""
for key in keys:
if key in data_dict:
diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py
index 90e932aa8015..ad9be6bd1fc0 100644
--- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py
+++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py
@@ -48,7 +48,7 @@
class RayActorClientProxy(ClientProxy):
"""Flower client proxy which delegates work using Ray."""
- def __init__( # pylint: disable=too-many-arguments
+ def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments
self,
client_fn: ClientFnExt,
node_id: int,
diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py
index 2d29629c4f01..8c4e42c34744 100644
--- a/src/py/flwr/simulation/run_simulation.py
+++ b/src/py/flwr/simulation/run_simulation.py
@@ -225,7 +225,7 @@ def run_simulation_from_cli() -> None:
# Entry point from Python session (script or notebook)
-# pylint: disable=too-many-arguments
+# pylint: disable=too-many-arguments,too-many-positional-arguments
def run_simulation(
server_app: ServerApp,
client_app: ClientApp,
@@ -300,7 +300,7 @@ def run_simulation(
)
-# pylint: disable=too-many-arguments
+# pylint: disable=too-many-arguments,too-many-positional-arguments
def run_serverapp_th(
server_app_attr: Optional[str],
server_app: Optional[ServerApp],
@@ -369,7 +369,7 @@ def server_th_with_start_checks(
return serverapp_th
-# pylint: disable=too-many-locals
+# pylint: disable=too-many-locals,too-many-positional-arguments
def _main_loop(
num_supernodes: int,
backend_name: str,
@@ -455,7 +455,7 @@ def _main_loop(
log(DEBUG, "Stopping Simulation Engine now.")
-# pylint: disable=too-many-arguments,too-many-locals
+# pylint: disable=too-many-arguments,too-many-locals,too-many-positional-arguments
def _run_simulation(
num_supernodes: int,
exit_event: EventType,
diff --git a/taplo.toml b/taplo.toml
new file mode 100644
index 000000000000..23531011a9f7
--- /dev/null
+++ b/taplo.toml
@@ -0,0 +1,24 @@
+include = ["**/*.toml"]
+exclude = ["baselines/**", "datasets/**"]
+
+[formatting]
+align_comments = false
+# Defaults below
+align_entries = false
+array_trailing_comma = true
+array_auto_expand = true
+array_auto_collapse = true
+compact_arrays = true
+compact_inline_tables = false
+inline_table_expand = true
+compact_entries = false
+column_width = 80
+indent_tables = false
+indent_entries = false
+indent_string = " "
+trailing_newline = true
+reorder_keys = false
+reorder_arrays = false
+reorder_inline_tables = false
+allowed_blank_lines = 2
+crlf = false