diff --git a/README.md b/README.md index 1b4ec1d73c..838d92caab 100644 --- a/README.md +++ b/README.md @@ -135,9 +135,6 @@ For additional Windows samples, see [Windows on GitHub](http://microsoft.github. Real-time communication SMS send and receive - - Voice over IP (VoIP) - ### Contacts and calendar diff --git a/Samples/3DPrinting/cppwinrt/Package.appxmanifest b/Samples/3DPrinting/cppwinrt/Package.appxmanifest index 429ce22abd..aff4106b9d 100644 --- a/Samples/3DPrinting/cppwinrt/Package.appxmanifest +++ b/Samples/3DPrinting/cppwinrt/Package.appxmanifest @@ -8,7 +8,7 @@ - _3DPrinting C++/WinRT Sample + 3DPrinting C++/WinRT Sample Microsoft Corporation Assets\storelogo-sdk.png @@ -21,10 +21,10 @@ diff --git a/Samples/3DPrinting/cppwinrt/Scenario1_Print.cpp b/Samples/3DPrinting/cppwinrt/Scenario1_Print.cpp index e1956584ef..d425bb6420 100644 --- a/Samples/3DPrinting/cppwinrt/Scenario1_Print.cpp +++ b/Samples/3DPrinting/cppwinrt/Scenario1_Print.cpp @@ -30,10 +30,7 @@ namespace #pragma region Buffer, stream, and string helpers void SetBufferBytes(IBuffer const& buffer, void const* data, uint32_t size) { - byte* bytes; - auto byteAccess = buffer.as<::Windows::Storage::Streams::IBufferByteAccess>(); - winrt::check_hresult(byteAccess->Buffer(&bytes)); - memcpy_s(bytes, buffer.Length(), data, size); + memcpy_s(buffer.data(), buffer.Length(), data, size); } IAsyncOperation StreamToStringAsync(IRandomAccessStream stream) diff --git a/Samples/3DPrinting/cppwinrt/Scenario2_Launch.cpp b/Samples/3DPrinting/cppwinrt/Scenario2_Launch.cpp index dca9461b27..f4c2ad7499 100644 --- a/Samples/3DPrinting/cppwinrt/Scenario2_Launch.cpp +++ b/Samples/3DPrinting/cppwinrt/Scenario2_Launch.cpp @@ -31,6 +31,8 @@ namespace winrt::SDKTemplate::implementation fire_and_forget Scenario2_Launch::CheckIf3DBuilderIsInstalled_Click(IInspectable const&, RoutedEventArgs const&) { + auto lifetime = get_strong(); + IVectorView handlers = co_await Launcher::FindFileHandlersAsync(L".3mf"); auto found = std::find_if(begin(handlers), end(handlers), [](auto&& info) { return info.PackageFamilyName() == PackageFamilyName3DBuilder; }); diff --git a/Samples/3DPrinting/cppwinrt/pch.h b/Samples/3DPrinting/cppwinrt/pch.h index c0ac0228be..2328ff869b 100644 --- a/Samples/3DPrinting/cppwinrt/pch.h +++ b/Samples/3DPrinting/cppwinrt/pch.h @@ -23,4 +23,3 @@ #include "winrt/Windows.UI.Xaml.Markup.h" #include "winrt/Windows.UI.Xaml.Media.h" #include "winrt/Windows.UI.Xaml.Navigation.h" -#include diff --git a/Samples/BasicFaceDetection/README.md b/Samples/BasicFaceDetection/README.md index b676c1a989..476ec789fc 100644 --- a/Samples/BasicFaceDetection/README.md +++ b/Samples/BasicFaceDetection/README.md @@ -3,8 +3,7 @@ page_type: sample languages: - csharp - cpp -- cppcx -- vb +- cppwinrt products: - windows - windows-uwp @@ -56,6 +55,8 @@ The FaceDetector is intended to operate on a static image or a single frame of v ### Samples +[BasicFaceDetection](/archived/BasicFaceTracking) for C++/CX and VB (archived) + [BasicFaceTracking](/Samples/BasicFaceTracking) [HolographicFaceTracking](/Samples/HolographicFaceTracking) diff --git a/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.sln b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.sln new file mode 100644 index 0000000000..071327890d --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.sln @@ -0,0 +1,43 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29920.165 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "BasicFaceDetection", "BasicFaceDetection.vcxproj", "{03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM = Debug|ARM + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|ARM = Release|ARM + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|ARM.ActiveCfg = Debug|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|ARM.Build.0 = Debug|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|ARM.Deploy.0 = Debug|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x64.ActiveCfg = Debug|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x64.Build.0 = Debug|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x64.Deploy.0 = Debug|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x86.ActiveCfg = Debug|Win32 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x86.Build.0 = Debug|Win32 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Debug|x86.Deploy.0 = Debug|Win32 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|ARM.ActiveCfg = Release|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|ARM.Build.0 = Release|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|ARM.Deploy.0 = Release|ARM + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x64.ActiveCfg = Release|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x64.Build.0 = Release|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x64.Deploy.0 = Release|x64 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x86.ActiveCfg = Release|Win32 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x86.Build.0 = Release|Win32 + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D}.Release|x86.Deploy.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {B13F3350-3448-4B3D-B219-BDE8EF55A27E} + EndGlobalSection +EndGlobal diff --git a/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj new file mode 100644 index 0000000000..b97e70d6be --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj @@ -0,0 +1,188 @@ + + + + + $([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), LICENSE))\SharedContent + + + true + {03FE0C00-0A0D-58F2-AAB1-AA9014D5DF4D} + BasicFaceDetection + SDKTemplate + en-US + 15.0 + true + Windows Store + 10.0 + 10.0.18362.0 + $(WindowsTargetPlatformVersion) + + + + + Debug + ARM + + + Debug + Win32 + + + Debug + x64 + + + Release + ARM + + + Release + Win32 + + + Release + x64 + + + + Application + Unicode + + + true + true + + + false + true + false + + + + + + + + $(VC_IncludePath);$(UniversalCRT_IncludePath);$(WindowsSDK_IncludePath);$(SharedContentDir)\cppwinrt + true + + + + Use + pch.h + $(IntDir)pch.pch + Level4 + %(AdditionalOptions) /bigobj + 4453;28204 + + + + + _DEBUG;%(PreprocessorDefinitions) + + + + + NDEBUG;%(PreprocessorDefinitions) + + + + + $(SharedContentDir)\xaml\App.xaml + + + $(SharedContentDir)\xaml\MainPage.xaml + + + + ..\shared\Scenario1_DetectInPhoto.xaml + + + ..\shared\Scenario2_DetectInWebcam.xaml + + + + + + Designer + + + Designer + + + + + Styles\Styles.xaml + + + + + $(SharedContentDir)\xaml\App.xaml + + + $(SharedContentDir)\xaml\MainPage.xaml + + + SampleConfiguration.h + + + ..\shared\Scenario1_DetectInPhoto.xaml + + + ..\shared\Scenario2_DetectInWebcam.xaml + + + Create + pch.h + + + Project.idl + + + + + $(SharedContentDir)\xaml\MainPage.xaml + + + + + + Designer + + + + + + Assets\microsoft-sdk.png + + + Assets\smallTile-sdk.png + + + Assets\splash-sdk.png + + + Assets\squareTile-sdk.png + + + Assets\storeLogo-sdk.png + + + Assets\tile-sdk.png + + + Assets\windows-sdk.png + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + \ No newline at end of file diff --git a/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj.filters b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj.filters new file mode 100644 index 0000000000..1ddb709a19 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/BasicFaceDetection.vcxproj.filters @@ -0,0 +1,63 @@ + + + + + 4416d50a-7676-4d0a-9b2c-91ff70c6047f + bmp;fbx;gif;jpg;jpeg;tga;tiff;tif;png + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Assets + + + Assets + + + Assets + + + Assets + + + Assets + + + Assets + + + Assets + + + + + + + + + \ No newline at end of file diff --git a/Samples/BasicFaceDetection/cppwinrt/Package.appxmanifest b/Samples/BasicFaceDetection/cppwinrt/Package.appxmanifest new file mode 100644 index 0000000000..96950dec2a --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Package.appxmanifest @@ -0,0 +1,42 @@ + + + + + + BasicFaceDetection C++/WinRT Sample + Microsoft Corporation + Assets\storelogo-sdk.png + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Samples/BasicFaceDetection/cppwinrt/Project.idl b/Samples/BasicFaceDetection/cppwinrt/Project.idl new file mode 100644 index 0000000000..d3a1c66ed6 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Project.idl @@ -0,0 +1,25 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +namespace SDKTemplate +{ + [default_interface] + runtimeclass Scenario1_DetectInPhoto : Windows.UI.Xaml.Controls.Page + { + Scenario1_DetectInPhoto(); + } + + [default_interface] + runtimeclass Scenario2_DetectInWebcam : Windows.UI.Xaml.Controls.Page + { + Scenario2_DetectInWebcam(); + } +} diff --git a/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.cpp b/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.cpp new file mode 100644 index 0000000000..2378c63efb --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.cpp @@ -0,0 +1,99 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#include "pch.h" +#include +#include "MainPage.h" +#include "SampleConfiguration.h" + +using namespace winrt; +using namespace winrt::Windows::Foundation::Collections; +using namespace winrt::Windows::Graphics::Imaging; +using namespace winrt::Windows::Media::FaceAnalysis; +using namespace winrt::Windows::UI::Xaml; +using namespace winrt::Windows::UI::Xaml::Controls; +using namespace winrt::Windows::UI::Xaml::Media::Imaging; +using namespace winrt::SDKTemplate; + +hstring implementation::MainPage::FEATURE_NAME() +{ + return L"Face detection C++/WinRT sample"; +} + +IVector implementation::MainPage::scenariosInner = winrt::single_threaded_observable_vector( +{ + Scenario{ L"Detect Faces in Photos", xaml_typename() }, + Scenario{ L"Detect Faces in Webcam", xaml_typename() }, +}); + +// Rescale the size and position of the face highlight +// to account for the difference between the size of the image and +// the canvas. +void ApplyScale(FrameworkElement const& box, double widthScale, double heightScale) +{ + // We saved the original size of the face box in the element's Tag field. + BitmapBounds faceBox = unbox_value(box.Tag()); + box.Width(faceBox.Width * widthScale); + box.Height(faceBox.Height * heightScale); + box.Margin({ faceBox.X * widthScale, faceBox.Y * heightScale, 0, 0 }); +} + +void SampleHelpers::HighlightFaces( + WriteableBitmap const& displaySource, + IVector const& foundFaces, + Canvas const& canvas, + DataTemplate const& dataTemplate) +{ + + double widthScale = canvas.ActualWidth() / displaySource.PixelWidth(); + double heightScale = canvas.ActualHeight() / displaySource.PixelHeight(); + + for (DetectedFace face : foundFaces) + { + // Create an element for displaying the face box but since we're using a Canvas + // we must scale the elements according to the image's actual size. + // The original FaceBox values are saved in the element's Tag field so we can update the + // boxes when the Canvas is resized. + FrameworkElement box = dataTemplate.LoadContent().as(); + box.Tag(box_value(face.FaceBox())); + ApplyScale(box, widthScale, heightScale); + canvas.Children().Append(box); + } + + hstring message; + int32_t faceCount = foundFaces.Size(); + if (faceCount == 0) + { + message = L"Didn't find any human faces in the image"; + } + else if (faceCount == 1) + { + message = L"Found a human face in the image"; + } + else + { + message = L"Found " + to_hstring(faceCount) + L" human faces in the image"; + } + + MainPage::Current().NotifyUser(message, NotifyType::StatusMessage); + +} + +void SampleHelpers::RepositionFaces(WriteableBitmap const& displaySource, Canvas const& canvas) +{ + double widthScale = canvas.ActualWidth() / displaySource.PixelWidth(); + double heightScale = canvas.ActualHeight() / displaySource.PixelHeight(); + + for (UIElement element : canvas.Children()) + { + ApplyScale(element.as(), widthScale, heightScale); + } +} diff --git a/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.h b/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.h new file mode 100644 index 0000000000..3bdb020c07 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/SampleConfiguration.h @@ -0,0 +1,25 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#pragma once +#include "pch.h" + +namespace winrt::SDKTemplate::SampleHelpers +{ + void HighlightFaces( + Windows::UI::Xaml::Media::Imaging::WriteableBitmap const& displaySource, + Windows::Foundation::Collections::IVector const& foundFaces, + Windows::UI::Xaml::Controls::Canvas const& canvas, + Windows::UI::Xaml::DataTemplate const& dataTemplate); + void RepositionFaces( + Windows::UI::Xaml::Media::Imaging::WriteableBitmap const& displaySource, + Windows::UI::Xaml::Controls::Canvas const& canvas); +} diff --git a/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.cpp b/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.cpp new file mode 100644 index 0000000000..9bbf7e3b19 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.cpp @@ -0,0 +1,156 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#include "pch.h" +#include "Scenario1_DetectInPhoto.h" +#include "Scenario1_DetectInPhoto.g.cpp" +#include "SampleConfiguration.h" + +using namespace winrt; +using namespace winrt::Windows::Foundation; +using namespace winrt::Windows::Foundation::Collections; +using namespace winrt::Windows::Graphics::Imaging; +using namespace winrt::Windows::Media::FaceAnalysis; +using namespace winrt::Windows::Storage; +using namespace winrt::Windows::Storage::Pickers; +using namespace winrt::Windows::Storage::Streams; +using namespace winrt::Windows::UI::Xaml; +using namespace winrt::Windows::UI::Xaml::Media; +using namespace winrt::Windows::UI::Xaml::Media::Imaging; + +namespace winrt::SDKTemplate::implementation +{ + Scenario1_DetectInPhoto::Scenario1_DetectInPhoto() + { + InitializeComponent(); + } + + // Takes the photo image and FaceDetector results and assembles the visualization onto the Canvas. + // + // displaySource is a bitmap object holding the image we're going to display. + // foundFaces is a list of detected faces; output from FaceDetector. + void Scenario1_DetectInPhoto::SetupVisualization(WriteableBitmap const& displaySource, IVector const& foundFaces) + { + ImageBrush brush; + brush.ImageSource(displaySource); + brush.Stretch(Stretch::Fill); + PhotoCanvas().Background(brush); + + SampleHelpers::HighlightFaces(displaySource, foundFaces, PhotoCanvas(), HighlightedFaceBox()); + } + + // Clears the display of image and face boxes. + void Scenario1_DetectInPhoto::ClearVisualization() + { + PhotoCanvas().Background(nullptr); + PhotoCanvas().Children().Clear(); + rootPage.NotifyUser(L"", NotifyType::StatusMessage); + } + + // Computes a BitmapTransform to downscale the source image if it's too large. + // + // Performance of the FaceDetector degrades significantly with large images, and in most cases it's best to downscale + // the source bitmaps if they're too large before passing them into FaceDetector. Remember through, your application's performance needs will vary. + BitmapTransform Scenario1_DetectInPhoto::ComputeScalingTransformForSourceImage(BitmapDecoder const& sourceDecoder) + { + BitmapTransform transform; + + uint32_t sourceHeight = sourceDecoder.PixelHeight(); + if (sourceHeight > sourceImageHeightLimit) + { + double scalingFactor = (double)sourceImageHeightLimit / sourceHeight; + + transform.ScaledWidth((uint32_t)(sourceDecoder.PixelWidth() * scalingFactor)); + transform.ScaledHeight((uint32_t)(sourceHeight * scalingFactor)); + } + + return transform; + } + + // Loads an image file (selected by the user) and runs the FaceDetector on the loaded bitmap. If successful calls SetupVisualization to display the results. + fire_and_forget Scenario1_DetectInPhoto::OpenFile_Click(IInspectable const&, RoutedEventArgs const&) + { + auto lifetime = get_strong(); + + FileOpenPicker photoPicker; + photoPicker.ViewMode(PickerViewMode::Thumbnail); + photoPicker.SuggestedStartLocation(PickerLocationId::PicturesLibrary); + photoPicker.FileTypeFilter().ReplaceAll({ L".jpg", L".jpeg", L".png", L".bmp" }); + + StorageFile photoFile = co_await photoPicker.PickSingleFileAsync(); + if (photoFile == nullptr) + { + co_return; + } + + ClearVisualization(); + rootPage.NotifyUser(L"Opening...", NotifyType::StatusMessage); + + try + { + // Open the image file and decode the bitmap into memory. + // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display. + IRandomAccessStream fileStream = co_await photoFile.OpenAsync(FileAccessMode::Read); + { + BitmapDecoder decoder = co_await BitmapDecoder::CreateAsync(fileStream); + BitmapTransform transform = ComputeScalingTransformForSourceImage(decoder); + + SoftwareBitmap originalBitmap = co_await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat(), BitmapAlphaMode::Ignore, transform, ExifOrientationMode::IgnoreExifOrientation, ColorManagementMode::DoNotColorManage); + + // We need to convert the image into a format that's compatible with FaceDetector. + // Gray8 should be a good type but verify it against FaceDetector’s supported formats. + static constexpr BitmapPixelFormat InputPixelFormat = BitmapPixelFormat::Gray8; + if (FaceDetector::IsBitmapPixelFormatSupported(InputPixelFormat)) + { + SoftwareBitmap detectorInput = SoftwareBitmap::Convert(originalBitmap, InputPixelFormat); + + // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer. + WriteableBitmap displaySource(originalBitmap.PixelWidth(), originalBitmap.PixelHeight()); + originalBitmap.CopyToBuffer(displaySource.PixelBuffer()); + + rootPage.NotifyUser(L"Detecting...", NotifyType::StatusMessage); + + // Initialize our FaceDetector and execute it against our input image. + // NOTE: FaceDetector initialization can take a long time, and in most cases + // you should create a member variable and reuse the object. + // However, for simplicity in this scenario we instantiate a new instance each time. + FaceDetector detector = co_await FaceDetector::CreateAsync(); + IVector faces = co_await detector.DetectFacesAsync(detectorInput); + + // Create our display using the available image and face results. + SetupVisualization(displaySource, faces); + } + else + { + rootPage.NotifyUser(L"PixelFormat 'Gray8' is not supported by FaceDetector", NotifyType::ErrorMessage); + } + } + } + catch (hresult_error const& ex) + { + ClearVisualization(); + rootPage.NotifyUser(ex.message(), NotifyType::ErrorMessage); + } + } + + // Updates any existing face bounding boxes in response to changes in the size of the Canvas. + void Scenario1_DetectInPhoto::PhotoCanvas_SizeChanged(IInspectable const&, SizeChangedEventArgs const&) + { + // If the Canvas is resized we must recompute a new scaling factor and + // apply it to each face box. + ImageBrush brush = PhotoCanvas().Background().try_as(); + if (brush != nullptr) + { + WriteableBitmap displaySource = brush.ImageSource().as(); + SampleHelpers::RepositionFaces(displaySource, PhotoCanvas()); + } + } +} diff --git a/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.h b/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.h new file mode 100644 index 0000000000..d46e303e13 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Scenario1_DetectInPhoto.h @@ -0,0 +1,48 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#pragma once + +#include "Scenario1_DetectInPhoto.g.h" +#include "MainPage.h" + +namespace winrt::SDKTemplate::implementation +{ + struct Scenario1_DetectInPhoto : Scenario1_DetectInPhotoT + { + Scenario1_DetectInPhoto(); + + fire_and_forget OpenFile_Click(Windows::Foundation::IInspectable const& sender, Windows::UI::Xaml::RoutedEventArgs const& e); + void PhotoCanvas_SizeChanged(Windows::Foundation::IInspectable const& sender, Windows::UI::Xaml::SizeChangedEventArgs const& e); + + private: + SDKTemplate::MainPage rootPage{ MainPage::Current() }; + + // Limit on the height of the source image (in pixels) passed into FaceDetector for performance considerations. + // Images larger that this size will be downscaled proportionally. + // + // This is an arbitrary value that was chosen for this scenario, in which FaceDetector performance isn't too important but face + // detection accuracy is; a generous size is used. + // Your application may have different performance and accuracy needs and you'll need to decide how best to control input. + static constexpr uint32_t sourceImageHeightLimit = 1280; + + void SetupVisualization(Windows::UI::Xaml::Media::Imaging::WriteableBitmap const& displaySource, Windows::Foundation::Collections::IVector const& foundFaces); + void ClearVisualization(); + Windows::Graphics::Imaging::BitmapTransform ComputeScalingTransformForSourceImage(Windows::Graphics::Imaging::BitmapDecoder const& sourceDecoder); + }; +} + +namespace winrt::SDKTemplate::factory_implementation +{ + struct Scenario1_DetectInPhoto : Scenario1_DetectInPhotoT + { + }; +} diff --git a/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.cpp b/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.cpp new file mode 100644 index 0000000000..797fcbdc86 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.cpp @@ -0,0 +1,307 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#include "pch.h" +#include "Scenario2_DetectInWebcam.h" +#include "Scenario2_DetectInWebcam.g.cpp" +#include "SampleConfiguration.h" + +using namespace winrt; +using namespace winrt::Windows::ApplicationModel; +using namespace winrt::Windows::Foundation; +using namespace winrt::Windows::Foundation::Collections; +using namespace winrt::Windows::Graphics::Imaging; +using namespace winrt::Windows::Media; +using namespace winrt::Windows::Media::Capture; +using namespace winrt::Windows::Media::FaceAnalysis; +using namespace winrt::Windows::Media::MediaProperties; +using namespace winrt::Windows::UI::Xaml; +using namespace winrt::Windows::UI::Xaml::Media; +using namespace winrt::Windows::UI::Xaml::Media::Imaging; +using namespace winrt::Windows::UI::Xaml::Navigation; + +namespace winrt::SDKTemplate::implementation +{ + Scenario2_DetectInWebcam::Scenario2_DetectInWebcam() + { + InitializeComponent(); + } + + void Scenario2_DetectInWebcam::OnNavigatedTo(NavigationEventArgs const&) + { + suspendingEventToken = Application::Current().Suspending({ get_weak(), &Scenario2_DetectInWebcam::OnSuspending }); + } + + void Scenario2_DetectInWebcam::OnNavigatedFrom(NavigationEventArgs const&) + { + Application::Current().Suspending(suspendingEventToken); + } + + // Responds to App Suspend event to stop/release MediaCapture object if it's running and return to Idle state. + fire_and_forget Scenario2_DetectInWebcam::OnSuspending(IInspectable const&, SuspendingEventArgs const& e) + { + auto lifetime = get_strong(); + + if (currentState == ScenarioState::Streaming) + { + auto deferral = e.SuspendingOperation().GetDeferral(); + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + deferral.Complete(); + } + } + + // Creates the FaceDetector object which we will use for face detection. + // Initializes a new MediaCapture instance and starts the Preview streaming to the CamPreview UI element. + // Completes with true if initialization and streaming were successful and false if an exception occurred. + IAsyncOperation Scenario2_DetectInWebcam::StartWebcamStreamingAsync() + { + auto lifetime = get_strong(); + + bool successful; + + faceDetector = co_await FaceDetector::CreateAsync(); + try + { + MediaCapture mediaCapture; + agileMediaCapture = mediaCapture; + MediaCaptureInitializationSettings settings; + + // For this scenario, we only need Video (not microphone) so specify this in the initializer. + // NOTE: the appxmanifest only declares "webcam" under capabilities and if this is changed to include + // microphone (default constructor) you must add "microphone" to the manifest or initialization will fail. + settings.StreamingCaptureMode(StreamingCaptureMode::Video); + + co_await mediaCapture.InitializeAsync(settings); + mediaCapture.CameraStreamStateChanged({ get_weak(), &Scenario2_DetectInWebcam::MediaCapture_CameraStreamStateChanged }); + + // Cache the media properties as we'll need them later. + auto deviceController = mediaCapture.VideoDeviceController(); + videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType::VideoPreview).as(); + + // Immediately start streaming to our CaptureElement UI. + // NOTE: CaptureElement's Source must be set before streaming is started. + CamPreview().Source(mediaCapture); + co_await mediaCapture.StartPreviewAsync(); + + successful = true; + } + catch (hresult_access_denied const&) + { + // If the user has disabled their webcam this exception is thrown; provide a descriptive message to inform the user of this fact. + rootPage.NotifyUser(L"Webcam is disabled or access to the webcam is disabled for this app.\nEnsure Privacy Settings allow webcam usage.", NotifyType::ErrorMessage); + successful = false; + } + catch (hresult_error const& ex) + { + rootPage.NotifyUser(ex.message(), NotifyType::ErrorMessage); + successful = false; + } + + co_return successful; + } + + // Safely stops webcam streaming (if running) and releases MediaCapture object. + IAsyncAction Scenario2_DetectInWebcam::ShutdownWebcamAsync() + { + auto lifetime = get_strong(); + + if (agileMediaCapture) + { + auto mediaCapture = agileMediaCapture.get(); + if (mediaCapture.CameraStreamState() == Windows::Media::Devices::CameraStreamState::Streaming) + { + try + { + co_await mediaCapture.StopPreviewAsync(); + } + catch (hresult_error const&) + { + // Since we're going to destroy the MediaCapture object there's nothing to do here + } + } + mediaCapture.Close(); + } + + CamPreview().Source(nullptr); + agileMediaCapture = nullptr; + } + + // Captures a single frame from the running webcam stream and executes the FaceDetector on the image. If successful calls SetupVisualization to display the results. + IAsyncOperation Scenario2_DetectInWebcam::TakeSnapshotAndFindFacesAsync() + { + auto lifetime = get_strong(); + + if (currentState != ScenarioState::Streaming) + { + co_return false; + } + + bool successful = false; + + try + { + // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case). + // GetPreviewFrame will convert the native webcam frame into this format. + static constexpr BitmapPixelFormat InputPixelFormat = BitmapPixelFormat::Nv12; + VideoFrame previewFrame(InputPixelFormat, (int)videoProperties.Width(), (int)videoProperties.Height()); + + co_await agileMediaCapture.get().GetPreviewFrameAsync(previewFrame); + + // The returned VideoFrame should be in the supported NV12 format but we need to verify + if (FaceDetector::IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap().BitmapPixelFormat())) + { + IVector faces = co_await faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap()); + + // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer. + // Note that WriteableBitmap doesn't support NV12 and we have to convert it to 32-bit BGRA. + SoftwareBitmap convertedSource = SoftwareBitmap::Convert(previewFrame.SoftwareBitmap(), BitmapPixelFormat::Bgra8); + WriteableBitmap displaySource(convertedSource.PixelWidth(), convertedSource.PixelHeight()); + convertedSource.CopyToBuffer(displaySource.PixelBuffer()); + + // Create our display using the available image and face results. + SetupVisualization(displaySource, faces); + + successful = true; + } + else + { + rootPage.NotifyUser(L"PixelFormat 'NV12' is not supported by FaceDetector", NotifyType::ErrorMessage); + } + } + catch (hresult_error const& ex) + { + rootPage.NotifyUser(ex.message(), NotifyType::ErrorMessage); + } + + co_return successful; + } + + // Takes the webcam image and FaceTracker results and assembles the visualization onto the Canvas. + void Scenario2_DetectInWebcam::SetupVisualization(WriteableBitmap const& displaySource, IVector const& foundFaces) + { + ImageBrush brush; + brush.ImageSource(displaySource); + brush.Stretch(Stretch::Fill); + SnapshotCanvas().Background(brush); + + SampleHelpers::HighlightFaces(displaySource, foundFaces, SnapshotCanvas(), HighlightedFaceBox()); + } + + // Manages the scenario's internal state. Invokes the internal methods and updates the UI according to the + // passed in state value. Handles failures and resets the state if necessary. + IAsyncAction Scenario2_DetectInWebcam::ChangeScenarioStateAsync(ScenarioState newState) + { + auto lifetime = get_strong(); + + switch (newState) + { + case ScenarioState::Idle: + + CameraSnapshotButton().IsEnabled(false); + currentState = newState; + co_await ShutdownWebcamAsync(); + + SnapshotCanvas().Background(nullptr); + SnapshotCanvas().Children().Clear(); + CameraStreamingButton().Content(box_value(L"Start Streaming")); + CameraSnapshotButton().Content(box_value(L"Take Snapshot")); + break; + + case ScenarioState::Streaming: + + if (!co_await StartWebcamStreamingAsync()) + { + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + break; + } + + SnapshotCanvas().Background(nullptr); + SnapshotCanvas().Children().Clear(); + CameraSnapshotButton().IsEnabled(true); + CameraStreamingButton().Content(box_value(L"Stop Streaming")); + CameraSnapshotButton().Content(box_value(L"Take Snapshot")); + currentState = newState; + break; + + case ScenarioState::Snapshot: + + if (!co_await TakeSnapshotAndFindFacesAsync()) + { + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + break; + } + + co_await ShutdownWebcamAsync(); + + CameraSnapshotButton().IsEnabled(true); + CameraStreamingButton().Content(box_value(L"Start Streaming")); + CameraSnapshotButton().Content(box_value(L"Clear Display")); + currentState = newState; + break; + } + } + + // Handles MediaCapture changes by shutting down streaming and returning to Idle state. + fire_and_forget Scenario2_DetectInWebcam::MediaCapture_CameraStreamStateChanged(MediaCapture const&, IInspectable const& args) + { + auto lifetime = get_strong(); + + // MediaCapture is not Agile and so we cannot invoke it's methods on this caller's thread + // and instead need to schedule the state change on the UI thread. + co_await resume_foreground(Dispatcher()); + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + } + + // Handles "streaming" button clicks to start/stop webcam streaming. + fire_and_forget Scenario2_DetectInWebcam::CameraStreamingButton_Click(IInspectable const&, RoutedEventArgs const&) + { + auto lifetime = get_strong(); + + rootPage.NotifyUser(L"", NotifyType::StatusMessage); + if (currentState == ScenarioState::Streaming) + { + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + } + else + { + co_await ChangeScenarioStateAsync(ScenarioState::Streaming); + } + } + + // Handles "snapshot" button clicks to take a snapshot or clear the current display. + fire_and_forget Scenario2_DetectInWebcam::CameraSnapshotButton_Click(IInspectable const&, RoutedEventArgs const&) + { + auto lifetime = get_strong(); + + rootPage.NotifyUser(L"", NotifyType::StatusMessage); + if (currentState == ScenarioState::Streaming) + { + co_await ChangeScenarioStateAsync(ScenarioState::Snapshot); + } + else + { + co_await ChangeScenarioStateAsync(ScenarioState::Idle); + } + } + + // Updates any existing face bounding boxes in response to changes in the size of the Canvas. + void Scenario2_DetectInWebcam::SnapshotCanvas_SizeChanged(IInspectable const&, SizeChangedEventArgs const&) + { + // If the Canvas is resized we must recompute a new scaling factor and + // apply it to each face box. + ImageBrush brush = SnapshotCanvas().Background().try_as(); + if (brush != nullptr) + { + WriteableBitmap displaySource = brush.ImageSource().as(); + SampleHelpers::RepositionFaces(displaySource, SnapshotCanvas()); + } + } +} diff --git a/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.h b/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.h new file mode 100644 index 0000000000..89a353cd8e --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/Scenario2_DetectInWebcam.h @@ -0,0 +1,81 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +#pragma once + +#include "Scenario2_DetectInWebcam.g.h" +#include "MainPage.h" + +namespace winrt::SDKTemplate::implementation +{ + struct Scenario2_DetectInWebcam : Scenario2_DetectInWebcamT + { + Scenario2_DetectInWebcam(); + + void OnNavigatedTo(Windows::UI::Xaml::Navigation::NavigationEventArgs const& e); + void OnNavigatedFrom(Windows::UI::Xaml::Navigation::NavigationEventArgs const& e); + + fire_and_forget CameraStreamingButton_Click(Windows::Foundation::IInspectable const& sender, Windows::UI::Xaml::RoutedEventArgs const& e); + fire_and_forget CameraSnapshotButton_Click(Windows::Foundation::IInspectable const& sender, Windows::UI::Xaml::RoutedEventArgs const& e); + void SnapshotCanvas_SizeChanged(Windows::Foundation::IInspectable const& sender, Windows::UI::Xaml::SizeChangedEventArgs const& e); + + private: + // Values for identifying and controlling scenario states. + enum ScenarioState + { + // Display is blank - default state. + Idle, + + // Webcam is actively engaged and a live video stream is displayed. + Streaming, + + // Snapshot image has been captured and is being displayed along with detected faces; webcam is not active. + Snapshot, + }; + + private: + // Reference back to the "root" page of the app. + SDKTemplate::MainPage rootPage{ MainPage::Current() }; + + // Holds the current scenario state value. + ScenarioState currentState = ScenarioState::Idle; + + // References a MediaCapture instance; is null when not in Streaming state. + agile_ref agileMediaCapture{ nullptr }; + + // Cache of properties from the current MediaCapture device which is used for capturing the preview frame. + Windows::Media::MediaProperties::VideoEncodingProperties videoProperties{ nullptr }; + + // References a FaceDetector instance. + Windows::Media::FaceAnalysis::FaceDetector faceDetector{ nullptr }; + + // Event registration tokens. + event_token suspendingEventToken; + + private: + fire_and_forget OnSuspending(Windows::Foundation::IInspectable const& sender, Windows::ApplicationModel::SuspendingEventArgs const& e); + Windows::Foundation::IAsyncOperation StartWebcamStreamingAsync(); + Windows::Foundation::IAsyncAction ShutdownWebcamAsync(); + Windows::Foundation::IAsyncOperation TakeSnapshotAndFindFacesAsync(); + void SetupVisualization(Windows::UI::Xaml::Media::Imaging::WriteableBitmap const& displaySource, Windows::Foundation::Collections::IVector const& foundFaces); + Windows::Foundation::IAsyncAction ChangeScenarioStateAsync(ScenarioState newState); + fire_and_forget AbandonStreaming(); + fire_and_forget MediaCapture_CameraStreamStateChanged(Windows::Media::Capture::MediaCapture const& sender, Windows::Foundation::IInspectable const& args); + + }; +} + +namespace winrt::SDKTemplate::factory_implementation +{ + struct Scenario2_DetectInWebcam : Scenario2_DetectInWebcamT + { + }; +} diff --git a/SharedContent/Templates/UWPSDKSampleCPP/packages.config b/Samples/BasicFaceDetection/cppwinrt/packages.config similarity index 59% rename from SharedContent/Templates/UWPSDKSampleCPP/packages.config rename to Samples/BasicFaceDetection/cppwinrt/packages.config index 605fbab91d..3b87ab00fd 100644 --- a/SharedContent/Templates/UWPSDKSampleCPP/packages.config +++ b/Samples/BasicFaceDetection/cppwinrt/packages.config @@ -1,4 +1,4 @@  - + \ No newline at end of file diff --git a/Samples/BasicFaceDetection/cppwinrt/pch.cpp b/Samples/BasicFaceDetection/cppwinrt/pch.cpp new file mode 100644 index 0000000000..01484ff5aa --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/pch.cpp @@ -0,0 +1,6 @@ +// +// pch.cpp +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/Samples/BasicFaceDetection/cppwinrt/pch.h b/Samples/BasicFaceDetection/cppwinrt/pch.h new file mode 100644 index 0000000000..986d2c64b4 --- /dev/null +++ b/Samples/BasicFaceDetection/cppwinrt/pch.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include "winrt/Windows.Foundation.h" +#include "winrt/Windows.Foundation.Collections.h" +#include "winrt/Windows.ApplicationModel.h" +#include "winrt/Windows.ApplicationModel.Activation.h" +#include "winrt/Windows.Graphics.Imaging.h" +#include "winrt/Windows.Media.h" +#include "winrt/Windows.Media.Capture.h" +#include "winrt/Windows.Media.Devices.h" +#include "winrt/Windows.Media.FaceAnalysis.h" +#include "winrt/Windows.Media.MediaProperties.h" +#include "winrt/Windows.Storage.h" +#include "winrt/Windows.Storage.Pickers.h" +#include "winrt/Windows.Storage.Streams.h" +#include "winrt/Windows.System.h" +#include "winrt/Windows.UI.Core.h" +#include "winrt/Windows.UI.Xaml.h" +#include "winrt/Windows.UI.Xaml.Automation.Peers.h" +#include "winrt/Windows.UI.Xaml.Controls.h" +#include "winrt/Windows.UI.Xaml.Controls.Primitives.h" +#include "winrt/Windows.UI.Xaml.Documents.h" +#include "winrt/Windows.UI.Xaml.Interop.h" +#include "winrt/Windows.UI.Xaml.Markup.h" +#include "winrt/Windows.UI.Xaml.Media.h" +#include "winrt/Windows.UI.Xaml.Media.Imaging.h" +#include "winrt/Windows.UI.Xaml.Navigation.h" diff --git a/Samples/BasicFaceDetection/cs/Package.appxmanifest b/Samples/BasicFaceDetection/cs/Package.appxmanifest index de6c292a7b..4974f0195e 100644 --- a/Samples/BasicFaceDetection/cs/Package.appxmanifest +++ b/Samples/BasicFaceDetection/cs/Package.appxmanifest @@ -48,7 +48,6 @@ - \ No newline at end of file diff --git a/Samples/BasicFaceDetection/cs/SampleConfiguration.cs b/Samples/BasicFaceDetection/cs/SampleConfiguration.cs index 7892a44c6d..37785539d6 100644 --- a/Samples/BasicFaceDetection/cs/SampleConfiguration.cs +++ b/Samples/BasicFaceDetection/cs/SampleConfiguration.cs @@ -11,7 +11,11 @@ using System; using System.Collections.Generic; +using Windows.Graphics.Imaging; +using Windows.Media.FaceAnalysis; +using Windows.UI.Xaml; using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Media.Imaging; namespace SDKTemplate { @@ -21,9 +25,72 @@ public partial class MainPage : Page List scenarios = new List { - new Scenario() { Title="Detect Faces in Photos", ClassType=typeof(DetectFacesInPhoto)}, - new Scenario() { Title="Detect Faces in Webcam", ClassType=typeof(DetectFacesInWebcam)}, + new Scenario() { Title="Detect Faces in Photos", ClassType=typeof(Scenario1_DetectInPhoto)}, + new Scenario() { Title="Detect Faces in Webcam", ClassType=typeof(Scenario2_DetectInWebcam)}, }; + + /// + /// Rescale the size and position of the face highlight box + /// to account for the difference between the size of the image and + /// the canvas. + /// + /// The element to rescale + /// Horizontal adjustment factor + /// Vertical adjustment factor + static void ApplyScale(FrameworkElement box, double widthScale, double heightScale) + { + // We saved the original size of the face box in the element's Tag field. + BitmapBounds faceBox = (BitmapBounds)box.Tag; + box.Width = faceBox.Width * widthScale; + box.Height = faceBox.Height * heightScale; + box.Margin = new Thickness(faceBox.X * widthScale, faceBox.Y * heightScale, 0, 0); + } + + public static void HighlightFaces(WriteableBitmap displaySource, IList foundFaces, Canvas canvas, DataTemplate template) + { + double widthScale = canvas.ActualWidth / displaySource.PixelWidth; + double heightScale = canvas.ActualHeight / displaySource.PixelHeight; + + foreach (DetectedFace face in foundFaces) + { + // Create an element for displaying the face box but since we're using a Canvas + // we must scale it according to the image's actual size. + // The original FaceBox values are saved in the element's Tag field so we can update the + // boxes when the Canvas is resized. + var box = (FrameworkElement)template.LoadContent(); + box.Tag = face.FaceBox; + ApplyScale(box, widthScale, heightScale); + + canvas.Children.Add(box); + } + + string message; + if (foundFaces.Count == 0) + { + message = "Didn't find any human faces in the image"; + } + else if (foundFaces.Count == 1) + { + message = "Found a human face in the image"; + } + else + { + message = "Found " + foundFaces.Count + " human faces in the image"; + } + + MainPage.Current.NotifyUser(message, NotifyType.StatusMessage); + } + + public static void RepositionFaces(WriteableBitmap displaySource, Canvas canvas) + { + double widthScale = canvas.ActualWidth / displaySource.PixelWidth; + double heightScale = canvas.ActualHeight / displaySource.PixelHeight; + + foreach (var item in canvas.Children) + { + ApplyScale((FrameworkElement)item, widthScale, heightScale); + } + } } public class Scenario diff --git a/Samples/BasicFaceDetection/cs/Scenario1_DetectInPhoto.xaml.cs b/Samples/BasicFaceDetection/cs/Scenario1_DetectInPhoto.xaml.cs index 0e1178d284..32176a936b 100644 --- a/Samples/BasicFaceDetection/cs/Scenario1_DetectInPhoto.xaml.cs +++ b/Samples/BasicFaceDetection/cs/Scenario1_DetectInPhoto.xaml.cs @@ -22,14 +22,13 @@ using Windows.UI.Xaml.Media; using Windows.UI.Xaml.Media.Imaging; using Windows.UI.Xaml.Navigation; -using Windows.UI.Xaml.Shapes; namespace SDKTemplate { /// /// Page for demonstrating FaceDetection on an image file. /// - public sealed partial class DetectFacesInPhoto : Page + public sealed partial class Scenario1_DetectInPhoto : Page { /// /// Brush for drawing the bounding box around each detected face. @@ -63,9 +62,9 @@ public sealed partial class DetectFacesInPhoto : Page private MainPage rootPage; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public DetectFacesInPhoto() + public Scenario1_DetectInPhoto() { this.InitializeComponent(); } @@ -86,50 +85,8 @@ protected override void OnNavigatedTo(NavigationEventArgs e) /// List of detected faces; output from FaceDetector private void SetupVisualization(WriteableBitmap displaySource, IList foundFaces) { - ImageBrush brush = new ImageBrush(); - brush.ImageSource = displaySource; - brush.Stretch = Stretch.Fill; - this.PhotoCanvas.Background = brush; - - if (foundFaces != null) - { - double widthScale = displaySource.PixelWidth / this.PhotoCanvas.ActualWidth; - double heightScale = displaySource.PixelHeight / this.PhotoCanvas.ActualHeight; - - foreach (DetectedFace face in foundFaces) - { - // Create a rectangle element for displaying the face box but since we're using a Canvas - // we must scale the rectangles according to the image’s actual size. - // The original FaceBox values are saved in the Rectangle's Tag field so we can update the - // boxes when the Canvas is resized. - Rectangle box = new Rectangle(); - box.Tag = face.FaceBox; - box.Width = (uint)(face.FaceBox.Width / widthScale); - box.Height = (uint)(face.FaceBox.Height / heightScale); - box.Fill = this.fillBrush; - box.Stroke = this.lineBrush; - box.StrokeThickness = this.lineThickness; - box.Margin = new Thickness((uint)(face.FaceBox.X / widthScale), (uint)(face.FaceBox.Y / heightScale), 0, 0); - - this.PhotoCanvas.Children.Add(box); - } - } - - string message; - if (foundFaces == null || foundFaces.Count == 0) - { - message = "Didn't find any human faces in the image"; - } - else if (foundFaces.Count == 1) - { - message = "Found a human face in the image"; - } - else - { - message = "Found " + foundFaces.Count + " human faces in the image"; - } - - this.rootPage.NotifyUser(message, NotifyType.StatusMessage); + this.PhotoCanvas.Background = new ImageBrush() { ImageSource = displaySource, Stretch = Stretch.Fill }; + MainPage.HighlightFaces(displaySource, foundFaces, this.PhotoCanvas, this.HighlightedFaceBox); } /// @@ -173,36 +130,31 @@ private BitmapTransform ComputeScalingTransformForSourceImage(BitmapDecoder sour /// Event data private async void OpenFile_Click(object sender, RoutedEventArgs e) { - IList faces = null; - SoftwareBitmap detectorInput = null; - WriteableBitmap displaySource = null; - - try + FileOpenPicker photoPicker = new FileOpenPicker(); + photoPicker.ViewMode = PickerViewMode.Thumbnail; + photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary; + photoPicker.FileTypeFilter.Add(".jpg"); + photoPicker.FileTypeFilter.Add(".jpeg"); + photoPicker.FileTypeFilter.Add(".png"); + photoPicker.FileTypeFilter.Add(".bmp"); + + StorageFile photoFile = await photoPicker.PickSingleFileAsync(); + if (photoFile == null) { - FileOpenPicker photoPicker = new FileOpenPicker(); - photoPicker.ViewMode = PickerViewMode.Thumbnail; - photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary; - photoPicker.FileTypeFilter.Add(".jpg"); - photoPicker.FileTypeFilter.Add(".jpeg"); - photoPicker.FileTypeFilter.Add(".png"); - photoPicker.FileTypeFilter.Add(".bmp"); - - StorageFile photoFile = await photoPicker.PickSingleFileAsync(); - if (photoFile == null) - { - return; - } + return; + } - this.ClearVisualization(); - this.rootPage.NotifyUser("Opening...", NotifyType.StatusMessage); + this.ClearVisualization(); + this.rootPage.NotifyUser("Opening...", NotifyType.StatusMessage); + try + { // Open the image file and decode the bitmap into memory. // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display. using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read)) { BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream); BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder); - using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage)) { // We need to convert the image into a format that's compatible with FaceDetector. @@ -210,10 +162,10 @@ private async void OpenFile_Click(object sender, RoutedEventArgs e) const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8; if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat)) { - using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat)) + using (SoftwareBitmap detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat)) { - // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer. - displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight); + // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to WriteableBitmap's buffer. + WriteableBitmap displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight); originalBitmap.CopyToBuffer(displaySource.PixelBuffer); this.rootPage.NotifyUser("Detecting...", NotifyType.StatusMessage); @@ -223,7 +175,7 @@ private async void OpenFile_Click(object sender, RoutedEventArgs e) // you should create a member variable and reuse the object. // However, for simplicity in this scenario we instantiate a new instance each time. FaceDetector detector = await FaceDetector.CreateAsync(); - faces = await detector.DetectFacesAsync(detectorInput); + IList faces = await detector.DetectFacesAsync(detectorInput); // Create our display using the available image and face results. this.SetupVisualization(displaySource, faces); @@ -250,37 +202,12 @@ private async void OpenFile_Click(object sender, RoutedEventArgs e) /// Event data private void PhotoCanvas_SizeChanged(object sender, SizeChangedEventArgs e) { - try - { - // If the Canvas is resized we must recompute a new scaling factor and - // apply it to each face box. - if (this.PhotoCanvas.Background != null) - { - WriteableBitmap displaySource = (this.PhotoCanvas.Background as ImageBrush).ImageSource as WriteableBitmap; - - double widthScale = displaySource.PixelWidth / this.PhotoCanvas.ActualWidth; - double heightScale = displaySource.PixelHeight / this.PhotoCanvas.ActualHeight; - - foreach (var item in PhotoCanvas.Children) - { - Rectangle box = item as Rectangle; - if (box == null) - { - continue; - } - - // We saved the original size of the face box in the rectangles Tag field. - BitmapBounds faceBounds = (BitmapBounds)box.Tag; - box.Width = (uint)(faceBounds.Width / widthScale); - box.Height = (uint)(faceBounds.Height / heightScale); - - box.Margin = new Thickness((uint)(faceBounds.X / widthScale), (uint)(faceBounds.Y / heightScale), 0, 0); - } - } - } - catch (Exception ex) + // If the Canvas is resized we must recompute a new scaling factor and + // apply it to each face box. + ImageBrush brush = (ImageBrush)this.PhotoCanvas.Background; + if (brush != null) { - this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage); + MainPage.RepositionFaces((WriteableBitmap)brush.ImageSource, this.PhotoCanvas); } } } diff --git a/Samples/BasicFaceDetection/cs/Scenario2_DetectInWebcam.xaml.cs b/Samples/BasicFaceDetection/cs/Scenario2_DetectInWebcam.xaml.cs index a3328597d5..40d52f2dbf 100644 --- a/Samples/BasicFaceDetection/cs/Scenario2_DetectInWebcam.xaml.cs +++ b/Samples/BasicFaceDetection/cs/Scenario2_DetectInWebcam.xaml.cs @@ -12,7 +12,7 @@ using System; using System.Collections.Generic; using System.Threading.Tasks; - +using Windows.Foundation; using Windows.Graphics.Imaging; using Windows.Media; using Windows.Media.Capture; @@ -23,34 +23,18 @@ using Windows.UI.Xaml.Media; using Windows.UI.Xaml.Media.Imaging; using Windows.UI.Xaml.Navigation; -using Windows.UI.Xaml.Shapes; namespace SDKTemplate { /// /// Page for demonstrating FaceDetection on a webcam snapshot. /// - public sealed partial class DetectFacesInWebcam : Page + public sealed partial class Scenario2_DetectInWebcam : Page { - /// - /// Brush for drawing the bounding box around each detected face. - /// - private readonly SolidColorBrush lineBrush = new SolidColorBrush(Windows.UI.Colors.Yellow); - - /// - /// Thickness of the face bounding box lines. - /// - private readonly double lineThickness = 2.0; - - /// - /// Transparent fill for the bounding box. - /// - private readonly SolidColorBrush fillBrush = new SolidColorBrush(Windows.UI.Colors.Transparent); - /// /// Reference back to the "root" page of the app. /// - private MainPage rootPage; + private MainPage rootPage = MainPage.Current; /// /// Holds the current scenario state value. @@ -73,14 +57,13 @@ public sealed partial class DetectFacesInWebcam : Page private FaceDetector faceDetector; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public DetectFacesInWebcam() + public Scenario2_DetectInWebcam() { this.InitializeComponent(); this.currentState = ScenarioState.Idle; - App.Current.Suspending += this.OnSuspending; } /// @@ -105,19 +88,21 @@ private enum ScenarioState } /// - /// Responds when we navigate to this page. + /// Called when we navigate to this page. /// /// Event data - protected override async void OnNavigatedTo(NavigationEventArgs e) + protected override void OnNavigatedTo(NavigationEventArgs e) { - this.rootPage = MainPage.Current; + App.Current.Suspending += this.OnSuspending; + } - // The 'await' operation can only be used from within an async method but class constructors - // cannot be labeled as async, and so we'll initialize FaceDetector here. - if (this.faceDetector == null) - { - this.faceDetector = await FaceDetector.CreateAsync(); - } + /// + /// Called when we navigate away from this page. + /// + /// Event data + protected override void OnNavigatedFrom(NavigationEventArgs e) + { + App.Current.Suspending -= this.OnSuspending; } /// @@ -125,14 +110,14 @@ protected override async void OnNavigatedTo(NavigationEventArgs e) /// /// The source of the Suspending event /// Event data - private void OnSuspending(object sender, Windows.ApplicationModel.SuspendingEventArgs e) + private async void OnSuspending(object sender, Windows.ApplicationModel.SuspendingEventArgs e) { if (this.currentState == ScenarioState.Streaming) { var deferral = e.SuspendingOperation.GetDeferral(); try { - this.ChangeScenarioState(ScenarioState.Idle); + await this.ChangeScenarioStateAsync(ScenarioState.Idle); } finally { @@ -145,9 +130,11 @@ private void OnSuspending(object sender, Windows.ApplicationModel.SuspendingEven /// Initializes a new MediaCapture instance and starts the Preview streaming to the CamPreview UI element. /// /// Async Task object returning true if initialization and streaming were successful and false if an exception occurred. - private async Task StartWebcamStreaming() + private async Task StartWebcamStreamingAsync() { - bool successful = true; + bool successful = false; + + this.faceDetector = await FaceDetector.CreateAsync(); try { @@ -169,17 +156,17 @@ private async Task StartWebcamStreaming() // NOTE: CaptureElement's Source must be set before streaming is started. this.CamPreview.Source = this.mediaCapture; await this.mediaCapture.StartPreviewAsync(); + + successful = true; } catch (System.UnauthorizedAccessException) { // If the user has disabled their webcam this exception is thrown; provide a descriptive message to inform the user of this fact. this.rootPage.NotifyUser("Webcam is disabled or access to the webcam is disabled for this app.\nEnsure Privacy Settings allow webcam usage.", NotifyType.ErrorMessage); - successful = false; } catch (Exception ex) { this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage); - successful = false; } return successful; @@ -188,7 +175,7 @@ private async Task StartWebcamStreaming() /// /// Safely stops webcam streaming (if running) and releases MediaCapture object. /// - private async void ShutdownWebCam() + private async Task ShutdownWebcamAsync() { if (this.mediaCapture != null) { @@ -208,20 +195,17 @@ private async void ShutdownWebCam() /// Captures a single frame from the running webcam stream and executes the FaceDetector on the image. If successful calls SetupVisualization to display the results. /// /// Async Task object returning true if the capture was successful and false if an exception occurred. - private async Task TakeSnapshotAndFindFaces() + private async Task TakeSnapshotAndFindFacesAsync() { - bool successful = true; - - try + if (this.currentState != ScenarioState.Streaming) { - if (this.currentState != ScenarioState.Streaming) - { - return false; - } + return false; + } - WriteableBitmap displaySource = null; - IList faces = null; + bool successful = false; + try + { // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case). // GetPreviewFrame will convert the native webcam frame into this format. const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12; @@ -232,23 +216,26 @@ private async Task TakeSnapshotAndFindFaces() // The returned VideoFrame should be in the supported NV12 format but we need to verify this. if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat)) { - faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap); + IList faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap); + + // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer. + // Note that WriteableBitmap doesn't support NV12 and we have to convert it to 32-bit BGRA. + WriteableBitmap displaySource; + using (SoftwareBitmap convertedSource = SoftwareBitmap.Convert(previewFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8)) + { + displaySource = new WriteableBitmap(convertedSource.PixelWidth, convertedSource.PixelHeight); + convertedSource.CopyToBuffer(displaySource.PixelBuffer); + } + + // Create our display using the available image and face results. + this.SetupVisualization(displaySource, faces); + + successful = true; } else { this.rootPage.NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage); } - - // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer. - // Note that WriteableBitmap doesn't support NV12 and we have to convert it to 32-bit BGRA. - using (SoftwareBitmap convertedSource = SoftwareBitmap.Convert(previewFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8)) - { - displaySource = new WriteableBitmap(convertedSource.PixelWidth, convertedSource.PixelHeight); - convertedSource.CopyToBuffer(displaySource.PixelBuffer); - } - - // Create our display using the available image and face results. - this.SetupVisualization(displaySource, faces); } } catch (Exception ex) @@ -267,50 +254,8 @@ private async Task TakeSnapshotAndFindFaces() /// List of detected faces; output from FaceDetector private void SetupVisualization(WriteableBitmap displaySource, IList foundFaces) { - ImageBrush brush = new ImageBrush(); - brush.ImageSource = displaySource; - brush.Stretch = Stretch.Fill; - this.SnapshotCanvas.Background = brush; - - if (foundFaces != null) - { - double widthScale = displaySource.PixelWidth / this.SnapshotCanvas.ActualWidth; - double heightScale = displaySource.PixelHeight / this.SnapshotCanvas.ActualHeight; - - foreach (DetectedFace face in foundFaces) - { - // Create a rectangle element for displaying the face box but since we're using a Canvas - // we must scale the rectangles according to the image's actual size. - // The original FaceBox values are saved in the Rectangle's Tag field so we can update the - // boxes when the Canvas is resized. - Rectangle box = new Rectangle(); - box.Tag = face.FaceBox; - box.Width = (uint)(face.FaceBox.Width / widthScale); - box.Height = (uint)(face.FaceBox.Height / heightScale); - box.Fill = this.fillBrush; - box.Stroke = this.lineBrush; - box.StrokeThickness = this.lineThickness; - box.Margin = new Thickness((uint)(face.FaceBox.X / widthScale), (uint)(face.FaceBox.Y / heightScale), 0, 0); - - this.SnapshotCanvas.Children.Add(box); - } - } - - string message; - if (foundFaces == null || foundFaces.Count == 0) - { - message = "Didn't find any human faces in the image"; - } - else if (foundFaces.Count == 1) - { - message = "Found a human face in the image"; - } - else - { - message = "Found " + foundFaces.Count + " human faces in the image"; - } - - this.rootPage.NotifyUser(message, NotifyType.StatusMessage); + this.SnapshotCanvas.Background = new ImageBrush() { ImageSource = displaySource, Stretch = Stretch.Fill }; + MainPage.HighlightFaces(displaySource, foundFaces, this.SnapshotCanvas, this.HighlightedFaceBox); } /// @@ -318,17 +263,17 @@ private void SetupVisualization(WriteableBitmap displaySource, IList /// State to switch to - private async void ChangeScenarioState(ScenarioState newState) + private async Task ChangeScenarioStateAsync(ScenarioState newState) { switch (newState) { case ScenarioState.Idle: - this.ShutdownWebCam(); + this.CameraSnapshotButton.IsEnabled = false; + await this.ShutdownWebcamAsync(); this.SnapshotCanvas.Background = null; this.SnapshotCanvas.Children.Clear(); - this.CameraSnapshotButton.IsEnabled = false; this.CameraStreamingButton.Content = "Start Streaming"; this.CameraSnapshotButton.Content = "Take Snapshot"; this.currentState = newState; @@ -336,9 +281,9 @@ private async void ChangeScenarioState(ScenarioState newState) case ScenarioState.Streaming: - if (!await this.StartWebcamStreaming()) + if (!await this.StartWebcamStreamingAsync()) { - this.ChangeScenarioState(ScenarioState.Idle); + await this.ChangeScenarioStateAsync(ScenarioState.Idle); break; } @@ -352,13 +297,13 @@ private async void ChangeScenarioState(ScenarioState newState) case ScenarioState.Snapshot: - if (!await this.TakeSnapshotAndFindFaces()) + if (!await this.TakeSnapshotAndFindFacesAsync()) { - this.ChangeScenarioState(ScenarioState.Idle); + await this.ChangeScenarioStateAsync(ScenarioState.Idle); break; } - this.ShutdownWebCam(); + await this.ShutdownWebcamAsync(); this.CameraSnapshotButton.IsEnabled = true; this.CameraStreamingButton.Content = "Start Streaming"; @@ -373,13 +318,13 @@ private async void ChangeScenarioState(ScenarioState newState) /// /// The source of the event, i.e. our MediaCapture object /// Event data - private void MediaCapture_CameraStreamStateChanged(MediaCapture sender, object args) + private async void MediaCapture_CameraStreamStateChanged(MediaCapture sender, object args) { - // MediaCapture is not Agile and so we cannot invoke it's methods on this caller's thread + // MediaCapture is not Agile and so we cannot invoke its methods on this caller's thread // and instead need to schedule the state change on the UI thread. - var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => + await this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async () => { - ChangeScenarioState(ScenarioState.Idle); + await ChangeScenarioStateAsync(ScenarioState.Idle); }); } @@ -388,17 +333,16 @@ private void MediaCapture_CameraStreamStateChanged(MediaCapture sender, object a /// /// Button user clicked /// Event data - private void CameraStreamingButton_Click(object sender, RoutedEventArgs e) + private async void CameraStreamingButton_Click(object sender, RoutedEventArgs e) { + this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); if (this.currentState == ScenarioState.Streaming) { - this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); - this.ChangeScenarioState(ScenarioState.Idle); + await this.ChangeScenarioStateAsync(ScenarioState.Idle); } else { - this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); - this.ChangeScenarioState(ScenarioState.Streaming); + await this.ChangeScenarioStateAsync(ScenarioState.Streaming); } } @@ -407,17 +351,16 @@ private void CameraStreamingButton_Click(object sender, RoutedEventArgs e) /// /// Button user clicked /// Event data - private void CameraSnapshotButton_Click(object sender, RoutedEventArgs e) + private async void CameraSnapshotButton_Click(object sender, RoutedEventArgs e) { + this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); if (this.currentState == ScenarioState.Streaming) { - this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); - this.ChangeScenarioState(ScenarioState.Snapshot); + await this.ChangeScenarioStateAsync(ScenarioState.Snapshot); } else { - this.rootPage.NotifyUser(string.Empty, NotifyType.StatusMessage); - this.ChangeScenarioState(ScenarioState.Idle); + await this.ChangeScenarioStateAsync(ScenarioState.Idle); } } @@ -428,37 +371,12 @@ private void CameraSnapshotButton_Click(object sender, RoutedEventArgs e) /// Event data private void SnapshotCanvas_SizeChanged(object sender, SizeChangedEventArgs e) { - try + // If the Canvas is resized we must recompute a new scaling factor and + // apply it to each face box. + ImageBrush brush = (ImageBrush)this.SnapshotCanvas.Background; + if (brush != null) { - // If the Canvas is resized we must recompute a new scaling factor and - // apply it to each face box. - if (this.currentState == ScenarioState.Snapshot && this.SnapshotCanvas.Background != null) - { - WriteableBitmap displaySource = (this.SnapshotCanvas.Background as ImageBrush).ImageSource as WriteableBitmap; - - double widthScale = displaySource.PixelWidth / this.SnapshotCanvas.ActualWidth; - double heightScale = displaySource.PixelHeight / this.SnapshotCanvas.ActualHeight; - - foreach (var item in this.SnapshotCanvas.Children) - { - Rectangle box = item as Rectangle; - if (box == null) - { - continue; - } - - // We saved the original size of the face box in the rectangles Tag field. - BitmapBounds faceBounds = (BitmapBounds)box.Tag; - box.Width = (uint)(faceBounds.Width / widthScale); - box.Height = (uint)(faceBounds.Height / heightScale); - - box.Margin = new Thickness((uint)(faceBounds.X / widthScale), (uint)(faceBounds.Y / heightScale), 0, 0); - } - } - } - catch (Exception ex) - { - this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage); + MainPage.RepositionFaces((WriteableBitmap)brush.ImageSource, this.SnapshotCanvas); } } } diff --git a/Samples/BasicFaceDetection/shared/Scenario1_DetectInPhoto.xaml b/Samples/BasicFaceDetection/shared/Scenario1_DetectInPhoto.xaml index 7f90a7eb24..93efa8bb8c 100644 --- a/Samples/BasicFaceDetection/shared/Scenario1_DetectInPhoto.xaml +++ b/Samples/BasicFaceDetection/shared/Scenario1_DetectInPhoto.xaml @@ -11,7 +11,7 @@ //********************************************************* --> + + + + + + @@ -44,11 +50,6 @@ + + + + + + diff --git a/Samples/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.cpp b/archived/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.cpp similarity index 100% rename from Samples/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.cpp rename to archived/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.cpp diff --git a/Samples/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.h b/archived/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.h similarity index 100% rename from Samples/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.h rename to archived/UserCertificateStore/cpp/Scenario1_MoveCertificate.xaml.h diff --git a/Samples/UserCertificateStore/cpp/UserCertificateStore.sln b/archived/UserCertificateStore/cpp/UserCertificateStore.sln similarity index 100% rename from Samples/UserCertificateStore/cpp/UserCertificateStore.sln rename to archived/UserCertificateStore/cpp/UserCertificateStore.sln diff --git a/Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj b/archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj similarity index 97% rename from Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj rename to archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj index a1e7deab95..fac87498bb 100644 --- a/Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj +++ b/archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj @@ -139,7 +139,7 @@ - ..\shared\Scenario1_MoveCertificate.xaml + Scenario1_MoveCertificate.xaml @@ -149,7 +149,7 @@ Designer - + Styles\Styles.xaml @@ -176,7 +176,7 @@ - ..\shared\Scenario1_MoveCertificate.xaml + Scenario1_MoveCertificate.xaml diff --git a/Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters b/archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters similarity index 97% rename from Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters rename to archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters index 3bcf98c2e6..efe85cd1af 100644 --- a/Samples/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters +++ b/archived/UserCertificateStore/cpp/UserCertificateStore.vcxproj.filters @@ -34,7 +34,7 @@ Styles - + diff --git a/archived/UserCertificateStore/cpp/pch.cpp b/archived/UserCertificateStore/cpp/pch.cpp new file mode 100644 index 0000000000..ade821753a --- /dev/null +++ b/archived/UserCertificateStore/cpp/pch.cpp @@ -0,0 +1,5 @@ +// +// Include the standard header and generate the precompiled header. +// + +#include "pch.h" diff --git a/archived/UserCertificateStore/cpp/pch.h b/archived/UserCertificateStore/cpp/pch.h new file mode 100644 index 0000000000..1dcc72eba4 --- /dev/null +++ b/archived/UserCertificateStore/cpp/pch.h @@ -0,0 +1,10 @@ +// +// Header for standard system include files. +// + +#pragma once + +#include +#include + +#include "App.xaml.h" diff --git a/Samples/VoIP/README.md b/archived/VoIP/README.md similarity index 99% rename from Samples/VoIP/README.md rename to archived/VoIP/README.md index 9b884d1ebf..843e258a89 100644 --- a/Samples/VoIP/README.md +++ b/archived/VoIP/README.md @@ -1,5 +1,5 @@ --- -page_type: sample +topic: sample languages: - csharp products: diff --git a/Samples/VoIP/cs/Voip.sln b/archived/VoIP/cs/Voip.sln similarity index 100% rename from Samples/VoIP/cs/Voip.sln rename to archived/VoIP/cs/Voip.sln diff --git a/Samples/VoIP/cs/Voip/App.xaml b/archived/VoIP/cs/Voip/App.xaml similarity index 100% rename from Samples/VoIP/cs/Voip/App.xaml rename to archived/VoIP/cs/Voip/App.xaml diff --git a/Samples/VoIP/cs/Voip/App.xaml.cs b/archived/VoIP/cs/Voip/App.xaml.cs similarity index 100% rename from Samples/VoIP/cs/Voip/App.xaml.cs rename to archived/VoIP/cs/Voip/App.xaml.cs diff --git a/Samples/VoIP/cs/Voip/Helpers/AppServiceHelper.cs b/archived/VoIP/cs/Voip/Helpers/AppServiceHelper.cs similarity index 100% rename from Samples/VoIP/cs/Voip/Helpers/AppServiceHelper.cs rename to archived/VoIP/cs/Voip/Helpers/AppServiceHelper.cs diff --git a/Samples/VoIP/cs/Voip/Helpers/VoipCallHelper.cs b/archived/VoIP/cs/Voip/Helpers/VoipCallHelper.cs similarity index 100% rename from Samples/VoIP/cs/Voip/Helpers/VoipCallHelper.cs rename to archived/VoIP/cs/Voip/Helpers/VoipCallHelper.cs diff --git a/Samples/VoIP/cs/Voip/MainPage.xaml b/archived/VoIP/cs/Voip/MainPage.xaml similarity index 100% rename from Samples/VoIP/cs/Voip/MainPage.xaml rename to archived/VoIP/cs/Voip/MainPage.xaml diff --git a/Samples/VoIP/cs/Voip/MainPage.xaml.cs b/archived/VoIP/cs/Voip/MainPage.xaml.cs similarity index 100% rename from Samples/VoIP/cs/Voip/MainPage.xaml.cs rename to archived/VoIP/cs/Voip/MainPage.xaml.cs diff --git a/Samples/VoIP/cs/Voip/Package.appxmanifest b/archived/VoIP/cs/Voip/Package.appxmanifest similarity index 100% rename from Samples/VoIP/cs/Voip/Package.appxmanifest rename to archived/VoIP/cs/Voip/Package.appxmanifest diff --git a/Samples/VoIP/cs/Voip/Voip.csproj b/archived/VoIP/cs/Voip/Voip.csproj similarity index 100% rename from Samples/VoIP/cs/Voip/Voip.csproj rename to archived/VoIP/cs/Voip/Voip.csproj diff --git a/Samples/VoIP/cs/VoipBackEnd/ApiLock.cpp b/archived/VoIP/cs/VoipBackEnd/ApiLock.cpp similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/ApiLock.cpp rename to archived/VoIP/cs/VoipBackEnd/ApiLock.cpp diff --git a/Samples/VoIP/cs/VoipBackEnd/ApiLock.h b/archived/VoIP/cs/VoipBackEnd/ApiLock.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/ApiLock.h rename to archived/VoIP/cs/VoipBackEnd/ApiLock.h diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndAudio.cpp b/archived/VoIP/cs/VoipBackEnd/BackEndAudio.cpp similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndAudio.cpp rename to archived/VoIP/cs/VoipBackEnd/BackEndAudio.cpp diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndAudio.h b/archived/VoIP/cs/VoipBackEnd/BackEndAudio.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndAudio.h rename to archived/VoIP/cs/VoipBackEnd/BackEndAudio.h diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.cpp b/archived/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.cpp similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.cpp rename to archived/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.cpp diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.h b/archived/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.h rename to archived/VoIP/cs/VoipBackEnd/BackEndAudioHelpers.h diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndNativeBuffer.h b/archived/VoIP/cs/VoipBackEnd/BackEndNativeBuffer.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndNativeBuffer.h rename to archived/VoIP/cs/VoipBackEnd/BackEndNativeBuffer.h diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndTransport.cpp b/archived/VoIP/cs/VoipBackEnd/BackEndTransport.cpp similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndTransport.cpp rename to archived/VoIP/cs/VoipBackEnd/BackEndTransport.cpp diff --git a/Samples/VoIP/cs/VoipBackEnd/BackEndTransport.h b/archived/VoIP/cs/VoipBackEnd/BackEndTransport.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/BackEndTransport.h rename to archived/VoIP/cs/VoipBackEnd/BackEndTransport.h diff --git a/Samples/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj b/archived/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj rename to archived/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj diff --git a/Samples/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj.filters b/archived/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj.filters similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj.filters rename to archived/VoIP/cs/VoipBackEnd/VoipBackEnd.vcxproj.filters diff --git a/archived/VoIP/cs/VoipBackEnd/pch.cpp b/archived/VoIP/cs/VoipBackEnd/pch.cpp new file mode 100644 index 0000000000..dc586a230a --- /dev/null +++ b/archived/VoIP/cs/VoipBackEnd/pch.cpp @@ -0,0 +1,11 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* +#include "pch.h" diff --git a/Samples/VoIP/cs/VoipBackEnd/pch.h b/archived/VoIP/cs/VoipBackEnd/pch.h similarity index 100% rename from Samples/VoIP/cs/VoipBackEnd/pch.h rename to archived/VoIP/cs/VoipBackEnd/pch.h diff --git a/Samples/VoIP/cs/VoipHost/VoipHost.cpp b/archived/VoIP/cs/VoipHost/VoipHost.cpp similarity index 100% rename from Samples/VoIP/cs/VoipHost/VoipHost.cpp rename to archived/VoIP/cs/VoipHost/VoipHost.cpp diff --git a/Samples/VoIP/cs/VoipHost/VoipHost.vcxproj b/archived/VoIP/cs/VoipHost/VoipHost.vcxproj similarity index 100% rename from Samples/VoIP/cs/VoipHost/VoipHost.vcxproj rename to archived/VoIP/cs/VoipHost/VoipHost.vcxproj diff --git a/Samples/VoIP/cs/VoipHost/VoipHost.vcxproj.filters b/archived/VoIP/cs/VoipHost/VoipHost.vcxproj.filters similarity index 100% rename from Samples/VoIP/cs/VoipHost/VoipHost.vcxproj.filters rename to archived/VoIP/cs/VoipHost/VoipHost.vcxproj.filters diff --git a/Samples/VoIP/cs/VoipTasks/AppService.cs b/archived/VoIP/cs/VoipTasks/AppService.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/AppService.cs rename to archived/VoIP/cs/VoipTasks/AppService.cs diff --git a/Samples/VoIP/cs/VoipTasks/BackgroundOperations/BackgroundOperations.cs b/archived/VoIP/cs/VoipTasks/BackgroundOperations/BackgroundOperations.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/BackgroundOperations/BackgroundOperations.cs rename to archived/VoIP/cs/VoipTasks/BackgroundOperations/BackgroundOperations.cs diff --git a/Samples/VoIP/cs/VoipTasks/CallRtcTask.cs b/archived/VoIP/cs/VoipTasks/CallRtcTask.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/CallRtcTask.cs rename to archived/VoIP/cs/VoipTasks/CallRtcTask.cs diff --git a/Samples/VoIP/cs/VoipTasks/Helpers/Current.cs b/archived/VoIP/cs/VoipTasks/Helpers/Current.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/Helpers/Current.cs rename to archived/VoIP/cs/VoipTasks/Helpers/Current.cs diff --git a/Samples/VoIP/cs/VoipTasks/Helpers/VccCallHelper.cs b/archived/VoIP/cs/VoipTasks/Helpers/VccCallHelper.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/Helpers/VccCallHelper.cs rename to archived/VoIP/cs/VoipTasks/Helpers/VccCallHelper.cs diff --git a/Samples/VoIP/cs/VoipTasks/Properties/AssemblyInfo.cs b/archived/VoIP/cs/VoipTasks/Properties/AssemblyInfo.cs similarity index 100% rename from Samples/VoIP/cs/VoipTasks/Properties/AssemblyInfo.cs rename to archived/VoIP/cs/VoipTasks/Properties/AssemblyInfo.cs diff --git a/Samples/VoIP/cs/VoipTasks/VoipTasks.csproj b/archived/VoIP/cs/VoipTasks/VoipTasks.csproj similarity index 100% rename from Samples/VoIP/cs/VoipTasks/VoipTasks.csproj rename to archived/VoIP/cs/VoipTasks/VoipTasks.csproj