mirror of https://github.com/tailscale/tailscale/
util/winutil/gp, net/dns: add package for Group Policy API
This adds a package with GP-related functions and types to be used in the future PRs. It also updates nrptRuleDatabase to use the new package instead of its own gpNotificationWatcher implementation. Updates #12687 Signed-off-by: Nick Khyl <nickk@tailscale.com>pull/12750/head
parent
7b1c764088
commit
8bd442ba8c
@ -0,0 +1,79 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package gp contains [Group Policy]-related functions and types.
|
||||
//
|
||||
// [Group Policy]: https://web.archive.org/web/20240630210707/https://learn.microsoft.com/en-us/previous-versions/windows/desktop/policy/group-policy-start-page
|
||||
package gp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Scope is a user or machine policy scope.
|
||||
type Scope int
|
||||
|
||||
const (
|
||||
// MachinePolicy indicates a machine policy.
|
||||
// Registry-based machine policies reside in HKEY_LOCAL_MACHINE.
|
||||
MachinePolicy Scope = iota
|
||||
// UserPolicy indicates a user policy.
|
||||
// Registry-based user policies reside in HKEY_CURRENT_USER of the corresponding user.
|
||||
UserPolicy
|
||||
)
|
||||
|
||||
// _RP_FORCE causes RefreshPolicyEx to reapply policy even if no policy change was detected.
|
||||
// See [RP_FORCE] for details.
|
||||
//
|
||||
// [RP_FORCE]: https://web.archive.org/save/https://learn.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-refreshpolicyex
|
||||
const _RP_FORCE = 0x1
|
||||
|
||||
// RefreshUserPolicy triggers a machine policy refresh, but does not wait for it to complete.
|
||||
// When the force parameter is true, it causes the Group Policy to reapply policy even
|
||||
// if no policy change was detected.
|
||||
func RefreshMachinePolicy(force bool) error {
|
||||
return refreshPolicyEx(true, toRefreshPolicyFlags(force))
|
||||
}
|
||||
|
||||
// RefreshUserPolicy triggers a user policy refresh, but does not wait for it to complete.
|
||||
// When the force parameter is true, it causes the Group Policy to reapply policy even
|
||||
// if no policy change was detected.
|
||||
//
|
||||
// The token indicates user whose policy should be refreshed.
|
||||
// If specified, the token must be either a primary token with TOKEN_QUERY and TOKEN_DUPLICATE
|
||||
// access, or an impersonation token with TOKEN_QUERY and TOKEN_IMPERSONATE access,
|
||||
// and the specified user must be logged in interactively.
|
||||
//
|
||||
// Otherwise, a zero token value indicates the current user. It should not
|
||||
// be used by services or other applications running under system identities.
|
||||
//
|
||||
// The function fails with windows.ERROR_ACCESS_DENIED if the user represented by the token
|
||||
// is not logged in interactively at the time of the call.
|
||||
func RefreshUserPolicy(token windows.Token, force bool) error {
|
||||
if token != 0 {
|
||||
// Impersonate the user whose policy we need to refresh.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
if err := impersonateLoggedOnUser(token); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := windows.RevertToSelf(); err != nil {
|
||||
// RevertToSelf errors are non-recoverable.
|
||||
panic(fmt.Errorf("could not revert impersonation: %w", err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return refreshPolicyEx(true, toRefreshPolicyFlags(force))
|
||||
}
|
||||
|
||||
func toRefreshPolicyFlags(force bool) uint32 {
|
||||
if force {
|
||||
return _RP_FORCE
|
||||
}
|
||||
return 0
|
||||
}
|
@ -0,0 +1,197 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package gp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"tailscale.com/util/cibuild"
|
||||
)
|
||||
|
||||
func TestWatchForPolicyChange(t *testing.T) {
|
||||
if cibuild.On() {
|
||||
// Unlike tests that also use the GP API in net\dns\manager_windows_test.go,
|
||||
// this one does not require elevation. However, a Group Policy change notification
|
||||
// never arrives when this tests runs on a GitHub-hosted runner.
|
||||
t.Skipf("test requires running on a real Windows environment")
|
||||
}
|
||||
|
||||
done, close := setupMachinePolicyChangeNotifier(t)
|
||||
defer close()
|
||||
|
||||
// RefreshMachinePolicy is a non-blocking call.
|
||||
if err := RefreshMachinePolicy(true); err != nil {
|
||||
t.Fatalf("RefreshMachinePolicy failed: %v", err)
|
||||
}
|
||||
|
||||
// We should receive a policy change notification when
|
||||
// the Group Policy service completes policy processing.
|
||||
// Otherwise, the test will eventually time out.
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestGroupPolicyReadLock(t *testing.T) {
|
||||
if cibuild.On() {
|
||||
// Unlike tests that also use the GP API in net\dns\manager_windows_test.go,
|
||||
// this one does not require elevation. However, a Group Policy change notification
|
||||
// never arrives when this tests runs on a GitHub-hosted runner.
|
||||
t.Skipf("test requires running on a real Windows environment")
|
||||
}
|
||||
|
||||
done, close := setupMachinePolicyChangeNotifier(t)
|
||||
defer close()
|
||||
|
||||
doWithMachinePolicyLocked(t, func() {
|
||||
// RefreshMachinePolicy is a non-blocking call.
|
||||
if err := RefreshMachinePolicy(true); err != nil {
|
||||
t.Fatalf("RefreshMachinePolicy failed: %v", err)
|
||||
}
|
||||
|
||||
// Give the Group Policy service a few seconds to attempt to refresh the policy.
|
||||
// It shouldn't be able to do so while the lock is held, and the below should time out.
|
||||
timeout := time.NewTimer(5 * time.Second)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
case <-done:
|
||||
t.Fatal("Policy refresh occurred while the policy lock was held")
|
||||
}
|
||||
})
|
||||
|
||||
// We should receive a policy change notification once the lock is released
|
||||
// and GP can refresh the policy.
|
||||
// Otherwise, the test will eventually time out.
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestHammerGroupPolicyReadLock(t *testing.T) {
|
||||
const N = 10_000
|
||||
|
||||
enter := func(bool) (policyLockHandle, error) { return 1, nil }
|
||||
leave := func(policyLockHandle) error { return nil }
|
||||
|
||||
doWithCustomEnterLeaveFuncs(t, func(gpLock *PolicyLock) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(N)
|
||||
for range N {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := gpLock.Lock(); err != nil {
|
||||
t.Errorf("(*PolicyLock).Lock failed: %v", err)
|
||||
return
|
||||
}
|
||||
defer gpLock.Unlock()
|
||||
if gpLock.handle == 0 {
|
||||
t.Error("(*PolicyLock).handle is 0")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}, enter, leave)
|
||||
}
|
||||
|
||||
func TestGroupPolicyReadLockClose(t *testing.T) {
|
||||
init := make(chan struct{})
|
||||
enter := func(bool) (policyLockHandle, error) {
|
||||
close(init)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
return 1, nil
|
||||
}
|
||||
leave := func(policyLockHandle) error { return nil }
|
||||
|
||||
doWithCustomEnterLeaveFuncs(t, func(gpLock *PolicyLock) {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
err := gpLock.Lock()
|
||||
if err == nil {
|
||||
defer gpLock.Unlock()
|
||||
}
|
||||
|
||||
// We closed gpLock before the enter function returned.
|
||||
// (*PolicyLock).Lock is expected to fail.
|
||||
if err == nil || !errors.Is(err, ErrInvalidLockState) {
|
||||
t.Errorf("(*PolicyLock).Lock: got %v; want %v", err, ErrInvalidLockState)
|
||||
}
|
||||
// gpLock must not be held as Lock() failed.
|
||||
if lockCnt := gpLock.lockCnt.Load(); lockCnt != 0 {
|
||||
t.Errorf("lockCnt: got %v; want 0", lockCnt)
|
||||
}
|
||||
}()
|
||||
|
||||
<-init
|
||||
// Close gpLock right before the enter function returns.
|
||||
if err := gpLock.Close(); err != nil {
|
||||
t.Fatalf("(*PolicyLock).Close failed: %v", err)
|
||||
}
|
||||
<-done
|
||||
}, enter, leave)
|
||||
}
|
||||
|
||||
func TestGroupPolicyReadLockErr(t *testing.T) {
|
||||
wantErr := errors.New("failed to acquire the lock")
|
||||
|
||||
enter := func(bool) (policyLockHandle, error) { return 0, wantErr }
|
||||
leave := func(policyLockHandle) error { t.Error("leaveCriticalPolicySection must not be called"); return nil }
|
||||
|
||||
doWithCustomEnterLeaveFuncs(t, func(gpLock *PolicyLock) {
|
||||
err := gpLock.Lock()
|
||||
if err == nil {
|
||||
defer gpLock.Unlock()
|
||||
}
|
||||
if err != wantErr {
|
||||
t.Errorf("(*PolicyLock).Lock: got %v; want %v", err, wantErr)
|
||||
}
|
||||
// gpLock must not be held when Lock() fails.
|
||||
// The LSB indicates that the lock has not been closed.
|
||||
if lockCnt := gpLock.lockCnt.Load(); lockCnt&^(1) != 0 {
|
||||
t.Errorf("lockCnt: got %v; want 0", lockCnt)
|
||||
}
|
||||
}, enter, leave)
|
||||
}
|
||||
|
||||
func setupMachinePolicyChangeNotifier(t *testing.T) (chan struct{}, func()) {
|
||||
done := make(chan struct{})
|
||||
var watcher *ChangeWatcher
|
||||
watcher, err := NewChangeWatcher(MachinePolicy, func() {
|
||||
close(done)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewChangeWatcher failed: %v", err)
|
||||
}
|
||||
return done, func() {
|
||||
if err := watcher.Close(); err != nil {
|
||||
t.Errorf("(*ChangeWatcher).Close failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doWithMachinePolicyLocked(t *testing.T, f func()) {
|
||||
gpLock := NewMachinePolicyLock()
|
||||
defer gpLock.Close()
|
||||
if err := gpLock.Lock(); err != nil {
|
||||
t.Fatalf("(*PolicyLock).Lock failed: %v", err)
|
||||
}
|
||||
defer gpLock.Unlock()
|
||||
f()
|
||||
}
|
||||
|
||||
func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) {
|
||||
t.Helper()
|
||||
|
||||
l := NewMachinePolicyLock()
|
||||
l.enterFn, l.leaveFn = enter, leave
|
||||
t.Cleanup(func() {
|
||||
if err := l.Close(); err != nil {
|
||||
t.Fatalf("(*PolicyLock).Close failed: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
f(l)
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package gp
|
||||
|
||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go mksyscall.go
|
||||
|
||||
//sys enterCriticalPolicySection(machine bool) (handle policyLockHandle, err error) [int32(failretval)==0] = userenv.EnterCriticalPolicySection
|
||||
//sys impersonateLoggedOnUser(token windows.Token) (err error) [int32(failretval)==0] = advapi32.ImpersonateLoggedOnUser
|
||||
//sys leaveCriticalPolicySection(handle policyLockHandle) (err error) [int32(failretval)==0] = userenv.LeaveCriticalPolicySection
|
||||
//sys registerGPNotification(event windows.Handle, machine bool) (err error) [int32(failretval)==0] = userenv.RegisterGPNotification
|
||||
//sys refreshPolicyEx(machine bool, flags uint32) (err error) [int32(failretval)==0] = userenv.RefreshPolicyEx
|
||||
//sys unregisterGPNotification(event windows.Handle) (err error) [int32(failretval)==0] = userenv.UnregisterGPNotification
|
@ -0,0 +1,291 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package gp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// PolicyLock allows pausing the application of policy to safely read Group Policy
|
||||
// settings. A PolicyLock is an R-lock that can be held by multiple readers simultaneously,
|
||||
// preventing the Group Policy Client service (which maintains its W-counterpart) from
|
||||
// modifying policies while they are being read.
|
||||
//
|
||||
// It is not possible to pause group policy processing for longer than 10 minutes.
|
||||
// If the system needs to apply policies and the lock is being held for more than that,
|
||||
// the Group Policy Client service will release the lock and continue policy processing.
|
||||
//
|
||||
// To avoid deadlocks when acquiring both machine and user locks, acquire the
|
||||
// user lock before the machine lock.
|
||||
type PolicyLock struct {
|
||||
scope Scope
|
||||
token windows.Token
|
||||
|
||||
// hooks for testing
|
||||
enterFn func(bool) (policyLockHandle, error)
|
||||
leaveFn func(policyLockHandle) error
|
||||
|
||||
closing chan struct{} // closing is closed when the Close method is called.
|
||||
|
||||
mu sync.Mutex
|
||||
handle policyLockHandle
|
||||
lockCnt atomic.Int32 // A non-zero LSB indicates that the lock can be acquired.
|
||||
}
|
||||
|
||||
// policyLockHandle is the underlying lock handle returned by enterCriticalPolicySection.
|
||||
type policyLockHandle uintptr
|
||||
|
||||
type policyLockResult struct {
|
||||
handle policyLockHandle
|
||||
err error
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrInvalidLockState is returned by (*PolicyLock).Lock if the lock has a zero value or has already been closed.
|
||||
ErrInvalidLockState = errors.New("the lock has not been created or has already been closed")
|
||||
)
|
||||
|
||||
// NewMachinePolicyLock creates a PolicyLock that facilitates pausing the
|
||||
// application of computer policy. To avoid deadlocks when acquiring both
|
||||
// machine and user locks, acquire the user lock before the machine lock.
|
||||
func NewMachinePolicyLock() *PolicyLock {
|
||||
lock := &PolicyLock{
|
||||
scope: MachinePolicy,
|
||||
closing: make(chan struct{}),
|
||||
enterFn: enterCriticalPolicySection,
|
||||
leaveFn: leaveCriticalPolicySection,
|
||||
}
|
||||
lock.lockCnt.Store(1) // mark as initialized
|
||||
return lock
|
||||
}
|
||||
|
||||
// NewUserPolicyLock creates a PolicyLock that facilitates pausing the
|
||||
// application of the user policy for the specified user. To avoid deadlocks
|
||||
// when acquiring both machine and user locks, acquire the user lock before the
|
||||
// machine lock.
|
||||
//
|
||||
// The token indicates which user's policy should be locked for reading.
|
||||
// If specified, the token must have TOKEN_DUPLICATE access,
|
||||
// the specified user must be logged in interactively.
|
||||
// and the caller retains ownership of the token.
|
||||
//
|
||||
// Otherwise, a zero token value indicates the current user. It should not
|
||||
// be used by services or other applications running under system identities.
|
||||
func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) {
|
||||
lock := &PolicyLock{
|
||||
scope: UserPolicy,
|
||||
closing: make(chan struct{}),
|
||||
enterFn: enterCriticalPolicySection,
|
||||
leaveFn: leaveCriticalPolicySection,
|
||||
}
|
||||
if token != 0 {
|
||||
err := windows.DuplicateHandle(
|
||||
windows.CurrentProcess(),
|
||||
windows.Handle(token),
|
||||
windows.CurrentProcess(),
|
||||
(*windows.Handle)(&lock.token),
|
||||
windows.TOKEN_QUERY|windows.TOKEN_DUPLICATE|windows.TOKEN_IMPERSONATE,
|
||||
false,
|
||||
0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
lock.lockCnt.Store(1) // mark as initialized
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// Lock locks l.
|
||||
// It returns ErrNotInitialized if l has a zero value or has already been closed,
|
||||
// or an Errno if the underlying Group Policy lock cannot be acquired.
|
||||
//
|
||||
// As a special case, it fails with windows.ERROR_ACCESS_DENIED
|
||||
// if l is a user policy lock, and the corresponding user is not logged in
|
||||
// interactively at the time of the call.
|
||||
func (l *PolicyLock) Lock() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.lockCnt.Add(2)&1 == 0 {
|
||||
// The lock cannot be acquired because it has either never been properly
|
||||
// created or its Close method has already been called. However, we need
|
||||
// to call Unlock to both decrement lockCnt and leave the underlying
|
||||
// CriticalPolicySection if we won the race with another goroutine and
|
||||
// now own the lock.
|
||||
l.Unlock()
|
||||
return ErrInvalidLockState
|
||||
}
|
||||
|
||||
if l.handle != 0 {
|
||||
// The underlying CriticalPolicySection is already acquired.
|
||||
// It is an R-Lock (with the W-counterpart owned by the Group Policy service),
|
||||
// meaning that it can be acquired by multiple readers simultaneously.
|
||||
// So we can just return.
|
||||
return nil
|
||||
}
|
||||
|
||||
return l.lockSlow()
|
||||
}
|
||||
|
||||
// lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock.
|
||||
// It waits for either the lock to be acquired, or for the Close method to be called.
|
||||
//
|
||||
// l.mu must be held.
|
||||
func (l *PolicyLock) lockSlow() (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Decrement the counter if the lock cannot be acquired,
|
||||
// and complete the pending close request if we're the last owner.
|
||||
if l.lockCnt.Add(-2) == 0 {
|
||||
l.closeInternal()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// In some cases in production environments, the Group Policy service may
|
||||
// hold the corresponding W-Lock for extended periods of time (minutes
|
||||
// rather than seconds or milliseconds). We need to make our wait operation
|
||||
// cancellable. So, if one goroutine invokes (*PolicyLock).Close while another
|
||||
// initiates (*PolicyLock).Lock and waits for the underlying R-lock to be
|
||||
// acquired by enterCriticalPolicySection, the Close method should cancel
|
||||
// the wait.
|
||||
|
||||
initCh := make(chan error)
|
||||
resultCh := make(chan policyLockResult)
|
||||
|
||||
go func() {
|
||||
closing := l.closing
|
||||
if l.scope == UserPolicy && l.token != 0 {
|
||||
// Impersonate the user whose critical policy section we want to acquire.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
if err := impersonateLoggedOnUser(l.token); err != nil {
|
||||
initCh <- err
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := windows.RevertToSelf(); err != nil {
|
||||
// RevertToSelf errors are non-recoverable.
|
||||
panic(fmt.Errorf("could not revert impersonation: %w", err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
close(initCh)
|
||||
|
||||
var machine bool
|
||||
if l.scope == MachinePolicy {
|
||||
machine = true
|
||||
}
|
||||
handle, err := l.enterFn(machine)
|
||||
|
||||
send_result:
|
||||
for {
|
||||
select {
|
||||
case resultCh <- policyLockResult{handle, err}:
|
||||
// lockSlow has received the result.
|
||||
default:
|
||||
select {
|
||||
case <-closing:
|
||||
// The lock is being closed, and we lost the race to l.closing
|
||||
// it the calling goroutine.
|
||||
if err == nil {
|
||||
l.leaveFn(handle)
|
||||
}
|
||||
break send_result
|
||||
default:
|
||||
// The calling goroutine did not enter the select block yet.
|
||||
runtime.Gosched() // allow other routines to run
|
||||
continue send_result
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// lockSlow should not return until the goroutine above has been fully initialized,
|
||||
// even if the lock is being closed.
|
||||
if err = <-initCh; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
if result.err == nil {
|
||||
l.handle = result.handle
|
||||
}
|
||||
return result.err
|
||||
case <-l.closing:
|
||||
return ErrInvalidLockState
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks l.
|
||||
// It panics if l is not locked on entry to Unlock.
|
||||
func (l *PolicyLock) Unlock() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
lockCnt := l.lockCnt.Add(-2)
|
||||
if lockCnt < 0 {
|
||||
panic("negative lockCnt")
|
||||
}
|
||||
if lockCnt > 1 {
|
||||
// The lock is still being used by other readers.
|
||||
// We compare against 1 rather than 0 because the least significant bit
|
||||
// of lockCnt indicates that l has been initialized and a close
|
||||
// has not been requested yet.
|
||||
return
|
||||
}
|
||||
|
||||
if l.handle != 0 {
|
||||
// Impersonation is not required to unlock a critical policy section.
|
||||
// The handle we pass determines which mutex will be unlocked.
|
||||
leaveCriticalPolicySection(l.handle)
|
||||
l.handle = 0
|
||||
}
|
||||
|
||||
if lockCnt == 0 {
|
||||
// Complete the pending close request if there's no more readers.
|
||||
l.closeInternal()
|
||||
}
|
||||
}
|
||||
|
||||
// Close releases resources associated with l.
|
||||
// It is a no-op for the machine policy lock.
|
||||
func (l *PolicyLock) Close() error {
|
||||
lockCnt := l.lockCnt.Load()
|
||||
if lockCnt&1 == 0 {
|
||||
// The lock has never been initialized, or close has already been called.
|
||||
return nil
|
||||
}
|
||||
|
||||
close(l.closing)
|
||||
|
||||
// Unset the LSB to indicate a pending close request.
|
||||
for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) {
|
||||
lockCnt = l.lockCnt.Load()
|
||||
}
|
||||
|
||||
if lockCnt != 0 {
|
||||
// The lock is still being used and will be closed upon the final Unlock call.
|
||||
return nil
|
||||
}
|
||||
|
||||
return l.closeInternal()
|
||||
}
|
||||
|
||||
func (l *PolicyLock) closeInternal() error {
|
||||
if l.token != 0 {
|
||||
if err := l.token.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.token = 0
|
||||
}
|
||||
l.closing = nil
|
||||
return nil
|
||||
}
|
@ -0,0 +1,107 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package gp
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// ChangeWatcher calls the handler whenever a policy in the specified scope changes.
|
||||
type ChangeWatcher struct {
|
||||
gpWaitEvents [2]windows.Handle
|
||||
handler func()
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewChangeWatcher creates an instance of ChangeWatcher that invokes handler
|
||||
// every time Windows notifies it of a group policy change in the specified scope.
|
||||
func NewChangeWatcher(scope Scope, handler func()) (*ChangeWatcher, error) {
|
||||
var err error
|
||||
|
||||
// evtDone is signaled by (*gpNotificationWatcher).Close() to indicate that
|
||||
// the doWatch goroutine should exit.
|
||||
evtDone, err := windows.CreateEvent(nil, 0, 0, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.CloseHandle(evtDone)
|
||||
}
|
||||
}()
|
||||
|
||||
// evtChanged is registered with the Windows policy engine to become
|
||||
// signalled any time group policy has been refreshed.
|
||||
evtChanged, err := windows.CreateEvent(nil, 0, 0, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.CloseHandle(evtChanged)
|
||||
}
|
||||
}()
|
||||
|
||||
// Tell Windows to signal evtChanged whenever group policies are refreshed.
|
||||
if err := registerGPNotification(evtChanged, scope == MachinePolicy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &ChangeWatcher{
|
||||
// Ordering of the event handles in gpWaitEvents is important:
|
||||
// When calling windows.WaitForMultipleObjects and multiple objects are
|
||||
// signalled simultaneously, it always returns the wait code for the
|
||||
// lowest-indexed handle in its input array. evtDone is higher priority for
|
||||
// us than evtChanged, so the former must be placed into the array ahead of
|
||||
// the latter.
|
||||
gpWaitEvents: [2]windows.Handle{
|
||||
evtDone,
|
||||
evtChanged,
|
||||
},
|
||||
handler: handler,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go result.doWatch()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (w *ChangeWatcher) doWatch() {
|
||||
// The wait code corresponding to the event that is signalled when a group
|
||||
// policy change occurs. That is, w.gpWaitEvents[1] aka evtChanged.
|
||||
const expectedWaitCode = windows.WAIT_OBJECT_0 + 1
|
||||
for {
|
||||
if waitCode, _ := windows.WaitForMultipleObjects(w.gpWaitEvents[:], false, windows.INFINITE); waitCode != expectedWaitCode {
|
||||
break
|
||||
}
|
||||
w.handler()
|
||||
}
|
||||
close(w.done)
|
||||
}
|
||||
|
||||
// Close unsubscribes from further Group Policy notifications,
|
||||
// waits for any running handlers to complete, and releases any remaining resources
|
||||
// associated with w.
|
||||
func (w *ChangeWatcher) Close() error {
|
||||
// Notify doWatch that we're done and it should exit.
|
||||
if err := windows.SetEvent(w.gpWaitEvents[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unregisterGPNotification(w.gpWaitEvents[1])
|
||||
|
||||
// Wait for doWatch to complete.
|
||||
<-w.done
|
||||
|
||||
// Now we may safely clean up all the things.
|
||||
for i, evt := range w.gpWaitEvents {
|
||||
windows.CloseHandle(evt)
|
||||
w.gpWaitEvents[i] = 0
|
||||
}
|
||||
|
||||
w.handler = nil
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,111 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package gp
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
moduserenv = windows.NewLazySystemDLL("userenv.dll")
|
||||
|
||||
procImpersonateLoggedOnUser = modadvapi32.NewProc("ImpersonateLoggedOnUser")
|
||||
procEnterCriticalPolicySection = moduserenv.NewProc("EnterCriticalPolicySection")
|
||||
procLeaveCriticalPolicySection = moduserenv.NewProc("LeaveCriticalPolicySection")
|
||||
procRefreshPolicyEx = moduserenv.NewProc("RefreshPolicyEx")
|
||||
procRegisterGPNotification = moduserenv.NewProc("RegisterGPNotification")
|
||||
procUnregisterGPNotification = moduserenv.NewProc("UnregisterGPNotification")
|
||||
)
|
||||
|
||||
func impersonateLoggedOnUser(token windows.Token) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err error) {
|
||||
var _p0 uint32
|
||||
if machine {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(procEnterCriticalPolicySection.Addr(), 1, uintptr(_p0), 0, 0)
|
||||
handle = policyLockHandle(r0)
|
||||
if int32(handle) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func leaveCriticalPolicySection(handle policyLockHandle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLeaveCriticalPolicySection.Addr(), 1, uintptr(handle), 0, 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func refreshPolicyEx(machine bool, flags uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if machine {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall(procRefreshPolicyEx.Addr(), 2, uintptr(_p0), uintptr(flags), 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func registerGPNotification(event windows.Handle, machine bool) (err error) {
|
||||
var _p0 uint32
|
||||
if machine {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall(procRegisterGPNotification.Addr(), 2, uintptr(event), uintptr(_p0), 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unregisterGPNotification(event windows.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procUnregisterGPNotification.Addr(), 1, uintptr(event), 0, 0)
|
||||
if int32(r1) == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
Loading…
Reference in New Issue